blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6d4ce26d97b48219accdd9f009ef6e961f997b79
|
b984267f3a264d76e3392bbc576fec630bda7050
|
/cachematrix.R
|
aa34bace229a2a9f3b625fecf09806b06253197b
|
[] |
no_license
|
berlincarrie/ProgrammingAssignment2
|
28ad71e6cb7f7d45eb08b91d1ce937e3e02818ae
|
50dc06d1cdf28735fb5480ae2fee9f29dfab5837
|
refs/heads/master
| 2022-12-07T07:10:54.564559
| 2020-09-06T20:22:58
| 2020-09-06T20:22:58
| 293,340,627
| 0
| 0
| null | 2020-09-06T18:42:49
| 2020-09-06T18:42:48
| null |
WINDOWS-1252
|
R
| false
| false
| 1,646
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
# makeCacheMatrix is a function that returns a list of functions
# Its puspose is to store a martix and a cached value of the inverse of the
# matrix. It contains a list of 4 functions.
# * setMatrix set the value of a matrix
# * getMatrix get the value of a matrix
# * cacheInverse set the cached value (inverse of the matrix)
# * getInverse get the cached value (inverse of the matrix)
## Write a short comment describing this function
#makeCacheMatrix is a function that creates a special matrix object that can
#cache its inverse.
#Initially the function determines if the matrix is already cached.
makeCacheMatrix <- function(x = matrix()) {
#holds the cached value or NULL if nothing is cached
#initially nothing is cached so set it to NULL
inv <- NULL
#store a matrix
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
#cacheSolve: This function computes the inverse of the special “matrix” returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## This function returns the inverse of a matrix created with
## makeCacheMatrix
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat,...)
x$setInverse(inv)
inv
}
}
|
8cb57d86382575786ac8cc936ad050d1c1c21684
|
93d1fcc7758e5e99927be0529fb9d681db71e70c
|
/man/organize_database.Rd
|
648a5d6104a64667eb810773901360ee6c25810a
|
[] |
no_license
|
psychmeta/psychmeta
|
ef4319169102b43fd87caacd9881014762939e33
|
b790fac3f2a4da43ee743d06de51b7005214e279
|
refs/heads/master
| 2023-08-17T20:42:48.778862
| 2023-08-14T01:22:19
| 2023-08-14T01:22:19
| 100,509,679
| 37
| 15
| null | 2023-08-14T01:06:53
| 2017-08-16T16:23:28
|
R
|
UTF-8
|
R
| false
| true
| 2,473
|
rd
|
organize_database.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrangle_data.R
\name{organize_database}
\alias{organize_database}
\title{Organize a database of multi-construct or moderated information}
\usage{
organize_database(
es_data,
sample_id = NULL,
citekey = NULL,
construct_x = NULL,
construct_y = NULL,
facet_x = NULL,
facet_y = NULL,
measure_x = NULL,
measure_y = NULL,
data_x = NULL,
data_y = NULL,
moderators = NULL,
use_as_x = NULL,
use_as_y = NULL,
construct_order = NULL,
cat_moderators = TRUE,
moderator_levels = NULL
)
}
\arguments{
\item{es_data}{Matrix of effect-size data to be used in meta-analyses.}
\item{sample_id}{Optional vector of identification labels for studies in the meta-analysis.}
\item{citekey}{Optional vector of bibliographic citation keys for samples/studies in the meta-analysis (if multiple citekeys pertain to a given effect size, combine them into a single string entry with comma delimiters (e.g., "citkey1,citekey2").}
\item{construct_x}{Vector of construct names for construct initially designated as X.}
\item{construct_y}{Vector of construct names for construct initially designated as Y.}
\item{facet_x}{Vector of facet names for construct initially designated as X.}
\item{facet_y}{Vector of facet names for construct initially designated as Y.}
\item{data_x}{Additional data (e.g., artifact information) specific to the variables originally designated as X.}
\item{data_y}{Additional data (e.g., artifact information) specific to the variables originally designated as Y.}
\item{moderators}{Matrix, dataframe, or vector of moderators.}
\item{use_as_x}{Vector of construct names to be categorized as X constructs - cannot overlap with the contents of 'use_as_y'.}
\item{use_as_y}{Vector of construct names to be categorized as Y constructs - cannot overlap with the contents of 'use_as_x'.}
\item{construct_order}{Vector indicating the order in which variables should be arranged, with variables listed earlier in the vector being preferred for designation as X.}
\item{cat_moderators}{Logical vector identifying whether each variable in moderators is a categorical variable (TRUE) or a continuous variable (FALSE).}
\item{moderator_levels}{Optional list of factor levels to be applied to the categorical moderators.}
}
\value{
A reorganized list of study data
}
\description{
Organize a database of multi-construct or moderated information
}
\keyword{internal}
|
498e1112d25aa7cb30c7ce37e071a1e8eaf229f7
|
5e0f953d7499fc1750bcd87a34524023046e1160
|
/src/2-summarise/functions/plot-example-trial.R
|
ce7d21c38aba4f780ef16b549013d5f9b867078d
|
[] |
no_license
|
seunggookim/pdec-analysis-2
|
d16a45b70e13974136c6a30885330e2d33470b4f
|
63264ad78d988645c408d9c25e872a573541b56d
|
refs/heads/master
| 2023-03-18T11:19:12.094275
| 2020-07-09T11:31:53
| 2020-07-09T11:31:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,582
|
r
|
plot-example-trial.R
|
plot_example_trial <- function(x) {
filter(x, label == "L4 + exp.decay") %>%
{.$res[[1]]} %>%
filter(alphabet_size == 10 & tone_len_ms == 50) %>%
{.$detail[[1]]$res[[1]]} %>%
plot(lag = FALSE)
}
plot.trial_analysis <- function(x, lag = TRUE, ...) {
palette <- viridis::viridis_pal(end = 0.7)(2)
p <- x$profile %>%
mutate(cp_stat = x$change_point$statistic,
freq = x$info$alphabet[symbol],
log_freq = log(freq)) %>%
select(pos, information_content, cp_stat, log_freq) %>%
gather(var, value, - pos) %>%
na.omit() %>%
mutate(var = recode_factor(var,
log_freq = "Frequency (logarithm)",
information_content = "Information content (bits)",
cp_stat = "Change-point statistic"
)) %>%
ggplot(aes(x = pos, y = value)) +
geom_point(size = 1, colour = palette[1]) +
scale_x_continuous("Tone number",
sec.axis = sec_axis(~ spline(x$profile$pos,
x$profile$time,
xout = .,
method = "natural")$y,
name = "Time (seconds)")) +
scale_y_continuous("Value") +
facet_wrap(~ var, ncol = 1, scales = "free_y") +
# theme_bw() +
ggpubr::theme_pubr() +
theme(panel.grid = element_blank(),
strip.background = element_rect(colour = "white"),
strip.text = element_text(hjust = 0),
legend.key.size = unit(1, 'cm'),
legend.key.width = unit(3.0, "cm"),
legend.spacing.x = unit(1.0, 'cm'),
legend.position = "bottom")
if (lag) p <- p + ggtitle(glue("Lag = {x$change_point$lag_tones} tones"))
if (!is.na(x$info$trial$transition)) {
f <- function(x) factor(x, levels = c("Nominal transition",
"Effective transition",
"Detection of transition"))
p <- p +
geom_vline(aes(xintercept = x$info$trial$transition,
linetype = "Nominal transition",
colour = "Nominal transition")) +
geom_vline(aes(xintercept = x$info$trial$transition + x$info$trial$alphabet_size,
linetype = "Effective transition",
colour = "Effective transition"))
}
if (x$change_point$change_detected)
p <- p + geom_vline(aes(xintercept = x$change_point$pos_when_change_detected,
colour = "Detection of transition",
linetype = "Detection of transition"))
p <- p + scale_linetype_manual("", values = c(`Nominal transition` = "solid",
`Effective transition` = "dashed",
`Detection of transition` = "dotted"),
guide = guide_legend(reverse = TRUE,
label.position = "bottom"))
p <- p + scale_colour_manual("", values = c(`Nominal transition` = palette[2],
`Effective transition` = palette[2],
`Detection of transition` = palette[2]),
guide = guide_legend(reverse = TRUE,
label.position = "bottom"))
p
}
|
d4c8b1a02cfa5d502c3cc1418ef4241b555e363a
|
046902684f911ccbc54ae7dd8c3f28e1379a4f50
|
/man/apply.ECOVSF.cal.Rd
|
f0ae7c918842d50dfb08ac2a80001edc336088a0
|
[] |
no_license
|
belasi01/Riops
|
1632e9fa5aa94d73cb59dae47cdd9b0a3acaa5db
|
59b3fe0229b24d90c6720bb7705dfe97f4206f07
|
refs/heads/master
| 2023-03-15T12:53:17.142723
| 2021-06-21T16:07:25
| 2021-06-21T16:07:25
| 73,815,809
| 0
| 4
| null | 2023-03-09T21:34:21
| 2016-11-15T13:23:56
|
R
|
UTF-8
|
R
| false
| true
| 283
|
rd
|
apply.ECOVSF.cal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apply.ECOVSF.cal.R
\name{apply.ECOVSF.cal}
\alias{apply.ECOVSF.cal}
\title{Apply ECOVSF calibration}
\usage{
apply.ECOVSF.cal(eco, dev.file = NA, dark.file = NA)
}
\description{
Apply ECOVSF calibration
}
|
8d3d4cb30bea0196f072edd9e7ee59e6c2a99a21
|
b945a76daddff0988dccaf4dcf4af8c296c296d7
|
/plot3.R
|
0fba3314644a51fe518b3fc4e81c06b32ffb2a7d
|
[] |
no_license
|
pooimun/coursera-01_exploratory_data_analysis_course_project_1
|
f5cc0e21a81eedbd16d61565b33bccbbd8b01912
|
d18742628e777dc0990853adc27a2d21ddd202ce
|
refs/heads/master
| 2020-05-20T10:08:15.058561
| 2019-05-08T03:22:09
| 2019-05-08T03:22:09
| 185,519,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 828
|
r
|
plot3.R
|
#Read data
data <- read.csv('household_power_consumption.txt', header = TRUE, sep=';',stringsAsFactors = FALSE,dec = '.')
data2 <- subset(data,data$Date == '1/2/2007'|data$Date == '2/2/2007')
data3 <- subset(data2,data2$Voltage !='?')
#Plot 3
Sub_metering_1 <- as.numeric(data3$Sub_metering_1)
Sub_metering_2 <- as.numeric(data3$Sub_metering_2)
Sub_metering_3 <- as.numeric(data3$Sub_metering_3)
datetime <- strptime(paste(data3$Date, data3$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot3.png",width = 480,height = 480)
plot(datetime, Sub_metering_1,type='l',xlab='',ylab='Energy sub metering')
lines(datetime, Sub_metering_2,type='l',col='red')
lines(datetime, Sub_metering_3,type='l',col='blue')
legend('topright',c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lty=1,lwd=2.5,col=c('black','red','blue'))
dev.off()
|
c1203ad2631c04786c553fabe0039a924bf953f1
|
f07e2ce68624b33053d5c3ad997e7806d9df4bb2
|
/secretrabbitau/secretrabbitau.r
|
a8ea2ef3aff8a7c5a788b9c7f331c40b165c7a92
|
[
"BSD-2-Clause"
] |
permissive
|
kevin--/stretchfix
|
b2d079e6c0219ba7e2c611dce00293cdfdb8326b
|
16a097ca5668a5cb57f2f6b2e8516320ea6fb429
|
refs/heads/master
| 2021-01-10T08:21:15.652411
| 2020-10-03T06:51:46
| 2020-10-03T06:51:46
| 72,315,023
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,031
|
r
|
secretrabbitau.r
|
/****
SecretRabbit Varispeed - SecretRabbitCode / libsamplerate AudioUnit wrapper, implementing a Varispeed
Copyright (C) 2008 Kevin C. Dixon
http://yano.wasteonline.net/software/srvs/
http://www.mega-nerd.com/SRC/
****/
/*
secretrabbitau.r
SecretRabbitCode sample rate conversion Audio Unit
*/
#include <AudioUnit/AudioUnit.r>
#include "secretrabbitauVersion.h"
// Note that resource IDs must be spaced 2 apart for the 'STR ' name and description
#define kAudioUnitResID_secretrabbitau 1000
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ secretrabbitau~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#define RES_ID kAudioUnitResID_secretrabbitau
#define COMP_TYPE kAudioUnitType_OfflineEffect
#define COMP_SUBTYPE secretrabbitau_COMP_SUBTYPE
#define COMP_MANUF secretrabbitau_COMP_MANF
#define VERSION ksecretrabbitauVersion
#define NAME "Yano Signal Processing: SRC Varispeed"
#define DESCRIPTION "Varispeed (libsamplerate/SecretRabbitCode)"
#define ENTRY_POINT "SecretRabbitAUEntry"
#include "AUResources.r"
|
afcc49538fd7ccdbdb075b21bf5896a470002b6b
|
d0f5623feadaad07540301d0fe2c64440ec02e39
|
/tenxutils/man/g_legend.Rd
|
0631c25daeb0883efcffc8295dfa187e712939d9
|
[
"MIT"
] |
permissive
|
sansomlab/tenx
|
81d386f4f593af88565cb7103c4f9c8af57b074a
|
1bfd53aaa3b86df1e35912e1a4749dcb76c4912d
|
refs/heads/master
| 2023-07-25T22:31:32.999625
| 2023-07-12T11:11:17
| 2023-07-12T11:11:17
| 136,856,953
| 54
| 18
|
MIT
| 2022-03-13T15:05:54
| 2018-06-11T00:53:52
|
R
|
UTF-8
|
R
| false
| true
| 279
|
rd
|
g_legend.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Plot.R
\name{g_legend}
\alias{g_legend}
\title{Extract a legend from a ggplot}
\usage{
g_legend(a.ggplot)
}
\arguments{
\item{a.ggplot}{A ggplot obect}
}
\description{
Extract a legend from a ggplot
}
|
610672c82207eb2e58c108f73f4c9255ee047e56
|
f18b8f49aeb0c881aa32329abb83d0813c30608f
|
/plot4.R
|
31d809e977623ea2d7675ccedc393546466b31fd
|
[] |
no_license
|
VimalaNandakumar/ExData_Plotting1
|
1c0b56a292c4f8d43d27a4225c74046f34288177
|
e553c8581aa70beb3f2b51f62d0f1d7053c10444
|
refs/heads/master
| 2022-12-14T10:11:01.812571
| 2020-08-15T10:27:32
| 2020-08-15T10:27:32
| 287,064,377
| 0
| 0
| null | 2020-08-12T16:36:45
| 2020-08-12T16:36:44
| null |
UTF-8
|
R
| false
| false
| 1,740
|
r
|
plot4.R
|
library(data.table)
getwd()
# Set working directory to store the output
setwd('E:\\02 Vimala\\Graphs')
# read the text file based on the delimiter ;
ucidata <- read.table("household_power_consumption.txt", sep=";", header =TRUE, na.strings = "?",colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
# Combine Date and Time field into 1 column using column bind
DateTime <- paste(ucidata$date,ucidata$Time)
ucidata <- cbind(DateTime, ucidata)
# set the date format
ucidata$DateTime <- as.Date(ucidata$Date, "%dd/%mm/%YY")
# filter the text file based on date
ucidata <- subset(ucidata,Date >= as.Date("01/02/2007") & Date <= as.Date("02/02/2007"))
# head(ucidata)
# open the png file with the given height width
png("plot4.png", width = 480, height = 480)
# set the plot area with 2 columns and 2 rows
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(ucidata, {
plot(ucidata$Global_active_power~ucidata$DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(ucidata$Voltage~ucidata$DateTime, type="l",
ylab="Voltage (volt)", xlab="")
plot(ucidata$Sub_metering_1~ucidata$DateTime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(ucidata$Sub_metering_2~ucidata$DateTime,col='Red')
lines(ucidata$Sub_metering_3~ucidata$DateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(ucidata$Global_reactive_power~ucidata$DateTime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
#close the file
dev.off()
|
0b90571da0f472d1bb3fb3700c1d2c6cf18aa7a3
|
985125c768ca67f655fddb99318379bc60861e9c
|
/man/recordRoutines.Rd
|
3ce5b77d5b6a095dd9b05a547be64db0612f8127
|
[] |
no_license
|
TWilliamBell/routines
|
0bf6ce5afcfe279a74c43372a57aee2d1dc3ddf2
|
b7113eace54b8f1e36bd74b9a84d943cde0f8b75
|
refs/heads/master
| 2020-07-13T06:23:02.896238
| 2019-09-13T15:55:39
| 2019-09-13T15:55:39
| 205,016,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 431
|
rd
|
recordRoutines.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recordRoutines.R
\name{recordRoutines}
\alias{recordRoutines}
\title{Record Completed Routines}
\usage{
recordRoutines(routinesCompleted, completed = T, directory = getwd())
}
\description{
If you've completed an activity (or an activity was left incomplete) and you'd like to record that you've finished it (or left it unfinished), then log it here.
}
|
15fd27ab0943ea451ee38ce443b4206de07506d7
|
9a59934f0c7350f8dfdff104e05092a7ae5423dc
|
/R/calcmrnafracgeneral.R
|
95c62c83fcf5f455a920a887472a6781b76fb0a5
|
[] |
no_license
|
usnistgov/mixturesolutions
|
f6866311804dfdefc536c10d72b285d333aef5f3
|
68af7bc28f3c4dd59d25c61db96b6932d115aa04
|
refs/heads/master
| 2021-01-17T21:53:59.819251
| 2016-07-29T17:00:43
| 2016-07-29T17:00:43
| 52,917,700
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,407
|
r
|
calcmrnafracgeneral.R
|
calcmrnafracgeneral <-
function(dat,spikeID="ERCC-",spikemassfraction=.1){
#1) Identify which counts are Spike-In and which are not
#0) Identify which columns are counts and which are 'annotation':
countcolumns<-which(unname(unlist(lapply(dat,class))=="numeric"))
annotcolumn<-which(unname(unlist(lapply(dat,class))!="numeric"))
ercc<-rownames(dat)[which(substr(rownames(dat),1,5)==spikeID)] #one way to identify spikes, if row names = spikeID
if(length(ercc)==0){ercc<-grep(spikeID,dat[,annotcolumn[1]])} #assuming that the name is in the first annotation column...
if(length(ercc)==0){stop("I can't identify the spike-ins within your count table. The spikeID variable should be set to something which uniquely identifies spike-ins. Rownames are first checked for names, then if there are non-numeric columns, only the FIRST is checked for gene names. ")}
nonercc<-!(1:length(dat[,countcolumns[1]]))%in%ercc
count<-rbind(colSums(dat[ercc,countcolumns]),colSums(dat[nonercc,countcolumns])) #determines the counts for spikes and non-spikes.
ercc.targ<-spikemassfraction #defines the "targeted" mass fraction for spikes : Either a vector with length = #columns,or a scalar
mRNA.frac<-ercc.targ*count[2,]/count[1,] #calculates an mRNA fraction based on those available data
#this part doesn't normalize to one, but that's not exactly complicated.
return(mRNA.frac)
}
|
55171fb1798208996665d6f11eb34a8b30335bdd
|
576f09b3d1564ed04df333e009675fc6b193b58c
|
/Plot4.R
|
ce9498357a35207c918548711edb874dadc652ce
|
[] |
no_license
|
sgausden/ExData_Plotting1
|
3a3e09f0e0725167e28262871a593699517100fe
|
11d82c4f7712b832d3b0487ffec712d2935130af
|
refs/heads/master
| 2021-01-13T06:26:57.341108
| 2015-10-11T17:56:13
| 2015-10-11T17:56:13
| 43,899,289
| 0
| 0
| null | 2015-10-11T17:56:13
| 2015-10-08T15:38:18
| null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
Plot4.R
|
# Load Packages
install.packages("lubridate")
library("lubridate")
# Get Working Directory
setwd("./R/Coursera Data Science/Exploratory Data Analysis")
destfile<-file.path(getwd(),"PowerData.zip")
# Load Data
sourcedata<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(sourcedata,destfile)
unzip("PowerData.zip")
PowerData<-read.table("household_power_consumption.txt", TRUE,";")
# Change colClasses
PowerData$Date<-dmy(PowerData$Date)
PowerData$Time<-hms(PowerData$Time)
xfor (i in 3:9)
{
PowerData[,i]<-as.numeric(as.character(PowerData[,i]))
}
# Subset Data
startdate<-ymd("2007-02-01")
enddate<-ymd("2007-02-02")
PD.sub<-PowerData[PowerData$Date<=enddate,]
PD.sub<-PD.sub[PD.sub$Date>=startdate,]
# Create Date Time
PD.sub$date.time<-PD.sub$Date+PD.sub$Time
# Plot 4
png("Plot4.png")
par(mfrow=c(2,2))
plot(y=PD.sub$Global_active_power,x=PD.sub$date.time,
ylab="Global Active Power (kilowatts)",xlab='',type="l")
lines(PD.sub$Global_active_power,PD.sub$date.time)
plot(y= PD.sub$Voltage,x=PD.sub$date.time,
ylab="Voltage",xlab='datetime',type="l")
plot(y=PD.sub$Sub_metering_1, x=PD.sub$date.time,
ylab="Energy sub metering",xlab='',type="l")
lines(x=PD.sub$date.time,y=PD.sub$Sub_metering_2,col="red")
lines(x=PD.sub$date.time,y=PD.sub$Sub_metering_3,col="blue")
?legend
legend("topright", c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),bty=1,col=c("black", "red", "blue"), lwd=2)
plot(y= PD.sub$Global_reactive_power,x=PD.sub$date.time,
ylab="Global_reactive_power",xlab='datetime',type="l")
dev.off()
|
ed1a79811a39a3c504e83ab5fd7e820977f1696c
|
c87a2a48db316a31d69794efba9b96a2ddee31aa
|
/tximport_deseq2_script_multi.R
|
e131e9aab889d007fda225a824d1587ca53822c7
|
[] |
no_license
|
rdoresz/MSc-thesis-project
|
be48d4a75a655d47f391227a2873b2e2ebcb7c84
|
efe8081c4dd650eec8b6bcf27a6bac2eeba94ff6
|
refs/heads/master
| 2022-12-06T03:13:49.794617
| 2020-08-31T00:55:42
| 2020-08-31T00:55:42
| 282,692,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,505
|
r
|
tximport_deseq2_script_multi.R
|
# This script was used to import quantification files into R envrinment via tximport and
# to do differential gene expression analysis (DEG) on them via deseq2.
# This script was used for analysing all 12 samples.
# Author(s): Dorottya Ralbovszki
# Created:2020.01.20.
rm(list = ls()) #Clear workspace
# importing sample info of samples
sampleinfo <- read.delim("metadataall.txt")
View(sampleinfo)
sampleinfo
# importing quantification files
dir <- list.files("salmon_data/all/")
quant_files <- list.files("salmon_data/all/", pattern = "quant.sf", recursive = TRUE, full.names = TRUE)
names(quant_files) <- dir
quant_files
# checking the imported files
library(readr)
quants <- read_tsv(quant_files[1])
head(quants)
# creating transcript database from the gencode mouse genome
library(GenomicFeatures)
txdb <- makeTxDbFromGFF(file="gencode.vM24.annotation.gtf",
organism="Mus musculus")
k <- keys(txdb, keytype = "TXNAME")
tx2gene <- select(txdb, keys = k, columns = "GENEID", keytype = "TXNAME")
head(tx2gene)
# importing and summarizing quantification files into matrix using tximport
library(tximport)
write.csv(tx2gene, file = "tx2gene_multi.csv", row.names = FALSE, quote = FALSE)
txi <- tximport(quant_files, type = "salmon", tx2gene = tx2gene, ignoreTxVersion = FALSE, ignoreAfterBar = TRUE, countsFromAbundance = "lengthScaledTPM")
table(tx_map$TXNAME %in% quants$Name)
names(txi)
head(txi$counts)
# DEG analysis using deseq2
library(DESeq2)
dds1 <- DESeqDataSetFromTximport(txi, colData = sampleinfo, design <- ~ Genotype + Strain + Genotype:Strain)
# exporting TPM
tpm <- txi$abudance
write.csv(tpm, file = "tmp_values_multi.csv", quote = FALSE)
# cheking in how many samples genes are expressed
is_expressed <- assay(dds) >= 5
head(is_expressed)
sum(is_expressed[1,])
sum(is_expressed[2,])
hist(rowSums(is_expressed),main="Number of samples a gene is expressed in",xlab="Sample Count")
# filtering out genes that had a lower read number than 5 when the read number of all samples (6) was summarized
keep <- rowSums(counts(dds1)) >= 5
dds1 <- dds1[keep,]
# visualising count distributions
boxplot(assay(dds))
boxplot(log10(assay(dds)))
# setting the right comparison conditions
dds1$Genotype = relevel(dds1$Genotype,"wildtype")
dds1$Strain = relevel(dds1$Strain, "SWISSOF1")
# DEG with the new comparison settings
dds2 <- DESeq(dds1)
# extracting result table from DEG analysis
res_multi <- results(dds2, contrast=list(c("Genotype_mutant_vs_wildtype",
"Genotypemutant.StrainC57")))
# checking if comparison condition was right
resultsNames(dds2)
# plot counts of smallest p value
plotCounts(dds, gene=which.min(res_multi$padj), intgroup=c("Strain", "Genotype"))
# remove string after period to get actual ENSEMBL ID
tmp = gsub("\\..*","",row.names(res_multi))
row.names(res_multi) = tmp
head(row.names(res_multi))
# order the results by p values
res_multiOrdered <- res_multi[order(res_multi$pvalue),]
# save separetaly the down-, and upregulated genes
resup_multi <- subset(res_multi, log2FoldChange>0)
resdown_multi <- subset(res_multi, log2FoldChange<0)
# getting the number of significant genes
sum(res_multi$padj < 0.05, na.rm=TRUE)
# summary of analysis
summary(res_multi)
res05 <- results(dds, alpha=0.05)
summary(res05)
# filtering out too high log2fold changes which means that a gene was only expressed in 1 sample/strain
keep_logfold_p <- res_multiOrdered$log2FoldChange <= 10
res_multiOrdered <- res_multiOrdered[keep_logfold_p,]
keep_logfold_n <- res_multiOrdered$log2FoldChange >= -10
res_multiOrdered <- res_multiOrdered[keep_logfold_n,]
# checking filtered result table
res_multiOrdered
# export results into a CSV file
write.csv( as.data.frame(res_multiOrdered), file="res_multi0508.csv" )
# getting the number of significant genes after filtering
sum(res_multiOrdered$padj < 0.05, na.rm=TRUE)
# annotating result table with gene sybol, entrez ID and gene name
library("AnnotationDbi")
library("org.Mm.eg.db")
annots_symbol <- select(org.Mm.eg.db, keys = rownames(res_multiOrdered), column = "SYMBOL", keytype = "ENSEMBL")
annots_entrez <- select(org.Mm.eg.db, keys = rownames(res_multiOrdered), column = "ENTREZID", keytype = "ENSEMBL")
annots_name <- select(org.Mm.eg.db, keys = rownames(res_multirdered), column = "GENENAME", keytype = "ENSEMBL")
# exporting annotated results into a csv file
write.csv( as.data.frame(annots_name), file="annots_name_c57.csv" )
write.csv( as.data.frame(annots_entrez), file="annots_entrez_c57.csv" )
write.csv( as.data.frame(annots_symbol), file="annots_symbol_multi0507.csv" )
# log fold change shrinkage for visualization and ranking
resultsNames(dds)
resLFC <- lfcShrink(dds, coef="Strain_C57_vs_SWISSOF1", type="apeglm")
resLFC
# MA-plot showing the log2 fold changes attributable to a given variable over the mean of normalized counts for all the samples
# coloured points showing p-value less than 0.1
# comparint the raw and the log fold change shrinkage
plotMA(res_multi, ylim=c(-2,2))
plotMA(resLFC, ylim=c(-2,2))
# variance stabilizing transformations (VST)
vsd <- vst(dds2, blind=FALSE)
# plot PCA of the transformed data
plotPCA(vsd, intgroup=c("Strain", "Genotype"))
# creating significant genes heatmap
# only worked if sample metadata was created this way
sampleTable <- data.frame(Genotype = factor(rep(c("WT", "MUT", "WT", "MUT"),
each = 3)), Strain = factor(rep(c("SWISSOF1", "C57"),
each = 6)))
# testing heatmap plotting
rownames(sampleTable) <- colnames(txi$counts)
sampleTable
# extracting test genes
expmatrix_DESeq <- DESeq2::rlog(dds2, fitType="local")
expmatrix <- SummarizedExperiment::assay(expmatrix_DESeq)
select <- order(rowMeans(expmatrix),decreasing=TRUE)[1:30]
library(gplots)
heatmap(expmatrix[select,])
genes_interest <- expmatrix[c("ENSMUSG00000069917.7",
"ENSMUSG00000073940.3",
"ENSMUSG00000069919.7",
"ENSMUSG00000052305.6",
"ENSMUSG00000038600.12",
"ENSMUSG00000016427.7",
"ENSMUSG00000040280.10",
"ENSMUSG00000053930.13",
"ENSMUSG00000020950.10",
"ENSMUSG00000038738.15")
,]
rownames(genes_interest)<- c("Hba-a2","Hbb-bt","Hba-a1","Hbb-bs",
"Atp6v0a4","Ndufa1","Ndufa4l2","Shisa6",
"Foxg1","Shank1")
# ordering test genes
genes_interest <- genes_interest[order(rowMeans(genes_interest),
decreasing = TRUE),]
# plotting test heatmap
pheatmap::pheatmap(genes_interest,
cluster_rows=FALSE,
show_rownames=TRUE,
show_colnames = TRUE,
cluster_cols=TRUE,
annotation_col = sampleTable,
clustering_method = "median")
# my heatmap containing significant genes from the 4 comparisons
genes_interest <- expmatrix[c("ENSMUSG00000000058.6",
"ENSMUSG00000000120.6",
"ENSMUSG00000000308.14",
"ENSMUSG00000000384.15",
"ENSMUSG00000000805.18",
"ENSMUSG00000001027.7",
"ENSMUSG00000001240.13",
"ENSMUSG00000001300.16",
"ENSMUSG00000001930.17",
"ENSMUSG00000001946.14",
"ENSMUSG00000002980.14",
"ENSMUSG00000003476.16",
"ENSMUSG00000003929.11",
"ENSMUSG00000004221.16",
"ENSMUSG00000004698.11",
"ENSMUSG00000004885.5",
"ENSMUSG00000004895.9",
"ENSMUSG00000004936.8",
"ENSMUSG00000005299.6",
"ENSMUSG00000005447.12",
"ENSMUSG00000005672.12",
"ENSMUSG00000005892.4",
"ENSMUSG00000005973.6",
"ENSMUSG00000007034.15",
"ENSMUSG00000009734.18",
"ENSMUSG00000012483.4",
"ENSMUSG00000012519.14",
"ENSMUSG00000013523.13",
"ENSMUSG00000016427.7",
"ENSMUSG00000017929.13",
"ENSMUSG00000017978.18",
"ENSMUSG00000018470.8",
"ENSMUSG00000018698.15",
"ENSMUSG00000019828.13",
"ENSMUSG00000019865.9",
"ENSMUSG00000019874.11",
"ENSMUSG00000019890.4",
"ENSMUSG00000020000.7",
"ENSMUSG00000020142.12",
"ENSMUSG00000020173.17",
"ENSMUSG00000020218.11",
"ENSMUSG00000020253.15",
"ENSMUSG00000020524.16",
"ENSMUSG00000020723.3",
"ENSMUSG00000020728.17",
"ENSMUSG00000020836.15",
"ENSMUSG00000020908.14",
"ENSMUSG00000020950.10",
"ENSMUSG00000020953.17",
"ENSMUSG00000021070.6",
"ENSMUSG00000021193.10",
"ENSMUSG00000021319.7",
"ENSMUSG00000021337.8",
"ENSMUSG00000021567.15",
"ENSMUSG00000021587.5",
"ENSMUSG00000021647.7",
"ENSMUSG00000021732.14",
"ENSMUSG00000021867.16",
"ENSMUSG00000021872.8",
"ENSMUSG00000021880.7",
"ENSMUSG00000021969.8",
"ENSMUSG00000022048.8",
"ENSMUSG00000022090.10",
"ENSMUSG00000022103.10",
"ENSMUSG00000022342.6",
"ENSMUSG00000022667.18",
"ENSMUSG00000023439.11",
"ENSMUSG00000023571.4",
"ENSMUSG00000024014.8",
"ENSMUSG00000024044.19",
"ENSMUSG00000024140.10",
"ENSMUSG00000024526.9",
"ENSMUSG00000024565.10",
"ENSMUSG00000024681.11",
"ENSMUSG00000024713.16",
"ENSMUSG00000024734.8",
"ENSMUSG00000024942.17",
"ENSMUSG00000024985.20",
"ENSMUSG00000025013.15",
"ENSMUSG00000025329.3",
"ENSMUSG00000025362.6",
"ENSMUSG00000025789.9",
"ENSMUSG00000025790.14",
"ENSMUSG00000025804.5",
"ENSMUSG00000025870.10",
"ENSMUSG00000026018.12",
"ENSMUSG00000026048.16",
"ENSMUSG00000026098.13",
"ENSMUSG00000026113.17",
"ENSMUSG00000026147.16",
"ENSMUSG00000026156.8",
"ENSMUSG00000026185.8",
"ENSMUSG00000026237.5",
"ENSMUSG00000026413.12",
"ENSMUSG00000026516.8",
"ENSMUSG00000026638.15",
"ENSMUSG00000026688.5",
"ENSMUSG00000026730.12",
"ENSMUSG00000026765.12",
"ENSMUSG00000026787.3",
"ENSMUSG00000026969.3",
"ENSMUSG00000027168.21",
"ENSMUSG00000027217.13",
"ENSMUSG00000027224.14",
"ENSMUSG00000027270.14",
"ENSMUSG00000027274.16",
"ENSMUSG00000027400.11",
"ENSMUSG00000027570.15",
"ENSMUSG00000027577.14",
"ENSMUSG00000027678.17",
"ENSMUSG00000027792.11",
"ENSMUSG00000027859.10",
"ENSMUSG00000027985.14",
"ENSMUSG00000027996.13",
"ENSMUSG00000028003.6",
"ENSMUSG00000028023.16",
"ENSMUSG00000028194.15",
"ENSMUSG00000028234.6",
"ENSMUSG00000028298.10",
"ENSMUSG00000028370.7",
"ENSMUSG00000028487.18",
"ENSMUSG00000028558.14",
"ENSMUSG00000028584.3",
"ENSMUSG00000028602.12",
"ENSMUSG00000028635.7",
"ENSMUSG00000028656.14",
"ENSMUSG00000028757.4",
"ENSMUSG00000028841.14",
"ENSMUSG00000028862.6",
"ENSMUSG00000028883.17",
"ENSMUSG00000028901.13",
"ENSMUSG00000029005.4",
"ENSMUSG00000029193.7",
"ENSMUSG00000029288.11",
"ENSMUSG00000029361.18",
"ENSMUSG00000029428.13",
"ENSMUSG00000029552.19",
"ENSMUSG00000029608.10",
"ENSMUSG00000029754.13",
"ENSMUSG00000029765.12",
"ENSMUSG00000029917.15",
"ENSMUSG00000030123.15",
"ENSMUSG00000030235.17",
"ENSMUSG00000030270.11",
"ENSMUSG00000030279.15",
"ENSMUSG00000030413.7",
"ENSMUSG00000030532.6",
"ENSMUSG00000030551.14",
"ENSMUSG00000030677.8",
"ENSMUSG00000030761.16",
"ENSMUSG00000030792.8",
"ENSMUSG00000031212.3",
"ENSMUSG00000031297.14",
"ENSMUSG00000031391.18",
"ENSMUSG00000031548.7",
"ENSMUSG00000031558.15",
"ENSMUSG00000031738.14",
"ENSMUSG00000031767.13",
"ENSMUSG00000031772.17",
"ENSMUSG00000031997.9",
"ENSMUSG00000032076.20",
"ENSMUSG00000032259.8",
"ENSMUSG00000032271.13",
"ENSMUSG00000032368.14",
"ENSMUSG00000032643.12",
"ENSMUSG00000032679.12",
"ENSMUSG00000032854.12",
"ENSMUSG00000033597.9",
"ENSMUSG00000033808.16",
"ENSMUSG00000033960.6",
"ENSMUSG00000034055.16",
"ENSMUSG00000034243.17",
"ENSMUSG00000034652.12",
"ENSMUSG00000034723.11",
"ENSMUSG00000034758.12",
"ENSMUSG00000034796.14",
"ENSMUSG00000034892.8",
"ENSMUSG00000035277.15",
"ENSMUSG00000035329.7",
"ENSMUSG00000035513.19",
"ENSMUSG00000035726.8",
"ENSMUSG00000035929.11",
"ENSMUSG00000036131.12",
"ENSMUSG00000036526.8",
"ENSMUSG00000036545.9",
"ENSMUSG00000036902.11",
"ENSMUSG00000037025.11",
"ENSMUSG00000037143.17",
"ENSMUSG00000037362.8",
"ENSMUSG00000037400.17",
"ENSMUSG00000037526.7",
"ENSMUSG00000037600.16",
"ENSMUSG00000037771.11",
"ENSMUSG00000037784.14",
"ENSMUSG00000037962.7",
"ENSMUSG00000037990.18",
"ENSMUSG00000038007.14",
"ENSMUSG00000038173.15",
"ENSMUSG00000038257.9",
"ENSMUSG00000038291.16",
"ENSMUSG00000038600.12",
"ENSMUSG00000038738.15",
"ENSMUSG00000039106.6",
"ENSMUSG00000039126.10",
"ENSMUSG00000039231.18",
"ENSMUSG00000039252.11",
"ENSMUSG00000039474.13",
"ENSMUSG00000039488.15",
"ENSMUSG00000039579.15",
"ENSMUSG00000039672.12",
"ENSMUSG00000039706.11",
"ENSMUSG00000039714.9",
"ENSMUSG00000039735.16",
"ENSMUSG00000039977.16",
"ENSMUSG00000040118.15",
"ENSMUSG00000040312.14",
"ENSMUSG00000040543.16",
"ENSMUSG00000040998.18",
"ENSMUSG00000041380.13",
"ENSMUSG00000041449.16",
"ENSMUSG00000041559.7",
"ENSMUSG00000041607.17",
"ENSMUSG00000041736.7",
"ENSMUSG00000041773.8",
"ENSMUSG00000041911.3",
"ENSMUSG00000041959.14",
"ENSMUSG00000041975.17",
"ENSMUSG00000042369.8",
"ENSMUSG00000042379.8",
"ENSMUSG00000042501.12",
"ENSMUSG00000042514.11",
"ENSMUSG00000042589.18",
"ENSMUSG00000042770.8",
"ENSMUSG00000042772.15",
"ENSMUSG00000043091.9",
"ENSMUSG00000043671.14",
"ENSMUSG00000044068.7",
"ENSMUSG00000044566.15",
"ENSMUSG00000044708.5",
"ENSMUSG00000044816.10",
"ENSMUSG00000045573.9",
"ENSMUSG00000046410.10",
"ENSMUSG00000046480.6",
"ENSMUSG00000046500.8",
"ENSMUSG00000046610.15",
"ENSMUSG00000046922.7",
"ENSMUSG00000046999.2",
"ENSMUSG00000047182.6",
"ENSMUSG00000047586.4",
"ENSMUSG00000047746.14",
"ENSMUSG00000047766.15",
"ENSMUSG00000047810.9",
"ENSMUSG00000047904.6",
"ENSMUSG00000048027.9",
"ENSMUSG00000048251.15",
"ENSMUSG00000049336.16",
"ENSMUSG00000049630.6",
"ENSMUSG00000049744.15",
"ENSMUSG00000049928.15",
"ENSMUSG00000050148.9",
"ENSMUSG00000050447.15",
"ENSMUSG00000050505.7",
"ENSMUSG00000050558.13",
"ENSMUSG00000050711.7",
"ENSMUSG00000051246.3",
"ENSMUSG00000051397.5",
"ENSMUSG00000051747.15",
"ENSMUSG00000052305.6",
"ENSMUSG00000052926.16",
"ENSMUSG00000053310.11",
"ENSMUSG00000053930.13",
"ENSMUSG00000054409.5",
"ENSMUSG00000054457.5",
"ENSMUSG00000055202.11",
"ENSMUSG00000055301.8",
"ENSMUSG00000055675.6",
"ENSMUSG00000055775.16",
"ENSMUSG00000056158.14",
"ENSMUSG00000056306.5",
"ENSMUSG00000056418.3",
"ENSMUSG00000056596.8",
"ENSMUSG00000056608.14",
"ENSMUSG00000057123.14",
"ENSMUSG00000057729.12",
"ENSMUSG00000057818.8",
"ENSMUSG00000058174.7",
"ENSMUSG00000058400.13",
"ENSMUSG00000058443.5",
"ENSMUSG00000058897.18",
"ENSMUSG00000059040.5",
"ENSMUSG00000059203.10",
"ENSMUSG00000059325.14",
"ENSMUSG00000059327.9",
"ENSMUSG00000059839.9",
"ENSMUSG00000060063.9",
"ENSMUSG00000060550.16",
"ENSMUSG00000060860.8",
"ENSMUSG00000061414.8",
"ENSMUSG00000061762.12",
"ENSMUSG00000061859.17",
"ENSMUSG00000063171.4",
"ENSMUSG00000063260.2",
"ENSMUSG00000063698.9",
"ENSMUSG00000063887.13",
"ENSMUSG00000064215.13",
"ENSMUSG00000064329.13",
"ENSMUSG00000064330.9",
"ENSMUSG00000066361.3",
"ENSMUSG00000066363.12",
"ENSMUSG00000066438.6",
"ENSMUSG00000066705.7",
"ENSMUSG00000067288.13",
"ENSMUSG00000067870.5",
"ENSMUSG00000068117.10",
"ENSMUSG00000068396.9",
"ENSMUSG00000068523.12",
"ENSMUSG00000068697.7",
"ENSMUSG00000069072.9",
"ENSMUSG00000069132.3",
"ENSMUSG00000069917.7",
"ENSMUSG00000069919.7",
"ENSMUSG00000070056.6",
"ENSMUSG00000070583.1",
"ENSMUSG00000070605.4",
"ENSMUSG00000070880.10",
"ENSMUSG00000071369.11",
"ENSMUSG00000071379.2",
"ENSMUSG00000071470.4",
"ENSMUSG00000071489.1",
"ENSMUSG00000072437.4",
"ENSMUSG00000072812.4",
"ENSMUSG00000073876.3",
"ENSMUSG00000073940.3",
"ENSMUSG00000073982.11",
"ENSMUSG00000074269.10",
"ENSMUSG00000074577.9",
"ENSMUSG00000074695.3",
"ENSMUSG00000074731.3",
"ENSMUSG00000074735.2",
"ENSMUSG00000075296.5",
"ENSMUSG00000075330.4",
"ENSMUSG00000075705.12",
"ENSMUSG00000076498.2",
"ENSMUSG00000078503.9",
"ENSMUSG00000078591.1",
"ENSMUSG00000078735.4",
"ENSMUSG00000078952.9",
"ENSMUSG00000078954.9",
"ENSMUSG00000079017.3",
"ENSMUSG00000079018.10",
"ENSMUSG00000079499.9",
"ENSMUSG00000079588.3",
"ENSMUSG00000079641.3",
"ENSMUSG00000079685.10",
"ENSMUSG00000082361.6",
"ENSMUSG00000086017.1",
"ENSMUSG00000086365.2",
"ENSMUSG00000086600.8",
"ENSMUSG00000087369.1",
"ENSMUSG00000089679.1",
"ENSMUSG00000090223.2",
"ENSMUSG00000091705.8",
"ENSMUSG00000092116.1",
"ENSMUSG00000093483.1",
"ENSMUSG00000094686.1",
"ENSMUSG00000095595.2",
"ENSMUSG00000096449.2",
"ENSMUSG00000096995.2",
"ENSMUSG00000097039.8",
"ENSMUSG00000097431.2",
"ENSMUSG00000097462.7",
"ENSMUSG00000097622.2",
"ENSMUSG00000097785.2",
"ENSMUSG00000099061.1",
"ENSMUSG00000100241.1",
"ENSMUSG00000100627.6",
"ENSMUSG00000101028.1",
"ENSMUSG00000101969.2",
"ENSMUSG00000102422.1",
"ENSMUSG00000102644.5",
"ENSMUSG00000104178.1",
"ENSMUSG00000105960.1",
"ENSMUSG00000105987.4",
"ENSMUSG00000111785.1",
"ENSMUSG00000113771.1",
"ENSMUSG00000115529.1",
"ENSMUSG00000116819.1",
"ENSMUSG00000117655.1")
,]
genes_interest <- genes_interest[order(rowMeans(genes_interest),
decreasing = TRUE),]
# plotting heatmap
pheatmap::pheatmap(genes_interest,
cluster_rows=FALSE,
show_rownames=FALSE,
show_colnames = TRUE,
cluster_cols=TRUE,
annotation_col = sampleTable)
# exporting dds2 into csv file
write.csv( as.data.frame(counts(dds2)), file="dds_multi0507.csv" )
|
6ce81d4b9d2732ec45588766b3413ed7ecb747a5
|
24b43cee70de15f2c30369da2afcfc9e5781f7a3
|
/r/1_analysis-bisbing.R
|
66a5cf0ef07cc2c75fc9194d73f83d2dbfe17879
|
[] |
no_license
|
esdarling/sci-twitter
|
40c01082ff7f5222876c1b5e150380af953f2cd5
|
d2498aeadd4ab4524b5d5eda11cb2cd5d2a59863
|
refs/heads/master
| 2020-06-18T02:04:09.666767
| 2018-03-19T15:18:49
| 2018-03-19T15:18:49
| 74,957,101
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,868
|
r
|
1_analysis-bisbing.R
|
## =================
# code for Twitter followers
# created: 28 Nov 2016
# where? Paris!
## =================
library(dplyr)
library(reshape2)
library(stringr)
library(ggplot2)
library(ggrepel)
library(RColorBrewer)
library(vegan)
library(readxl)
#install.packages("readxl")
## =================
# load 110 scientists info
## =================
setwd("/Users/emilydarling/Dropbox/1-On the go/Twitter_Followers/data/sent to NUVI")
scis110 <- read.csv("random 110 handles_16April2015.csv",
header = TRUE, strip.white = TRUE, stringsAsFactors = FALSE)
head(scis110)
min(scis110$Followers)
filter(scis110, Followers < 150)
## =================
# load long followers data
## =================
setwd("/Users/emilydarling/Dropbox/1-On the go/Twitter_Followers/data")
d <- read_excel("110 profiles_long.xlsx", sheet = 1)
head(d)
length(unique(d$Username)) * 0.05 / 2
levels(as.factor(d$handle))
#translate from multiple languages into English -- another time
## =================
# basic string cleaning
## =================
#change all to lower case
head(d)
d$bio <- tolower(d$Bio)
d$full_name <- tolower(d$full_name)
d$Username <- tolower(d$Username)
#remove lists with / and replace with " " (e.g., wife/phd/friend)
d$bio <- gsub("\\/", " ", d$bio)
# forward slash -- escape with a double backslash
sub("\\/", " ", "Peace/Love")
#[1] "Peace Love"
length(unique(d$full_name))
#remove punctuations
punct <- '[]\\?!\"\'#$%&(){}+*/:;,._`|~\\[<=>@\\^-]'
d$bio <- gsub(punct, "", d$bio)
d <- d %>%
arrange(id,desc(Reach))
############
#make foreign category
##############
#identify bios with special characters (suggests another languages)
# ??
#remove special characters
d$bio <- iconv(d$bio, "UTF-8", "ASCII", sub = "")
#remove extra whitespaces
d$bio <- str_replace_all(d$bio, pattern = "\\s+", " ")
head(d$bio)
## =================
# test with @redlipblenny
## =================
head(d)
unique(d$handle)
bisbing <- filter(d, handle == "@SarahBisbing")
bisbing
## =================
# code to assign categories
## =================
## =================
# science faculty
## =================
#lectur*, prof*
pat3 <- "\\blectur+|\\bprof\\b|\\bprofessor+|\\bresearch chair\\b|\\bcrccrc\\b|\\bdean\\b|\\bfaculty\\b"
test3 <- c("lecturer in marine","university professor",
"journal of the","university of X biology phd student",
"project","professora")
grepl(pat3,test3)
names(bisbing)
bisbing$sci.faculty <- ifelse(grepl(pat3,bisbing$bio) , 1, 0)
test <- filter(bisbing, sci.faculty == 1)
test$Username
test$bio
## =================
# science student
## =================
#BS, BSc, MSc, PhD, DPhil, postdoc or posdoc, fellow,
#(student and
#grad*, *ologist or *ology or science)
pat0 <- "(stud+|\\bcandidate\\b)"
pat1 <- "(\\bbsc?\\b|\\bmsc?\\b|\\bphd|\\bdphil\\b|\\bdoctoral\\b|\\masters\\b|\\bgraduate school\\b|\\undergraduate\\b|\\grad+|*ologist|*ology\\b|*ography\\b|*biome|systems)"
pat2 <- "(\\bpost?doc|fellow|\\bpost doc\\b|\\bgradschool\\b|\\bstudying\\b|\\bundergrad|\\bmasters\\b)"
test1 <- c("ms student","doctoral candidate","bsc studying","with a phd",
"postdoc","entomology grad student", "student entomologist",
"science and biology student","undergrad")
(grepl(pat0,test1) & grepl(pat1,test1)) | grepl(pat2,test1)
names(bisbing)
bisbing$sci.student <- ifelse(bisbing$sci.faculty == 0 &
((grepl(pat0,bisbing$bio) & grepl(pat1,bisbing$bio)) |
grepl(pat2,bisbing$bio)), 1, 0)
test <- filter(bisbing, sci.student == 1)
test$bio
## =================
# universities, field stations, museums, zoos, aquariums
## =================
pat6 <- "(museum|zoo|aquarium|\\botanical gardens\\b|\\bcurator\\b|\\bcitizen ?science\\b)"
names(bisbing)
bisbing$check <- rowSums(bisbing[9:10])
hist(bisbing$check)
bisbing$mza <- ifelse(bisbing$check == 0 &
(grepl(pat6,bisbing$Username) |
grepl(pat6,bisbing$full_name)|
grepl(pat6,bisbing$bio)), 1, 0)
test <- filter(bisbing, mza > 0)
test$bio
## =================
# other scientists
## =================
#code to find other individual scientists
pat4 <- "(\\btechnician\\b|\\bacademic\\b|\\bdr\\b|\\bresearch associate\\b|\\bresearch scientist\\b|\\lab manager\\b|\\bphd\\b|\\bresearcher\\b|*ographer\\b|chemist\\b)"
pat5 <- "*ologist\\b|*icist\\b|*tician\\b|\\bscientist\\b"
test <- c("research scientist", "marine biologist","biology association")
test2 <- c("documentary filmmaker amp digital media strategist i help share stories that matter opinions mine reachingblue","specialist")
grepl(pat4,test2) | grepl(pat5,test2)
#only if not in another science category already
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12)])
hist(bisbing$check)
bisbing$other.sci <- ifelse(bisbing$check == 0 &
(grepl(pat4,bisbing$bio) | grepl(pat5,bisbing$bio) |
(grepl("director",bisbing$bio) &
grepl("research",bisbing$bio))), 1,0)
test <- filter(bisbing, other.sci == 1)
test$bio
## =================
# educators and outreach -- individuals
## ================
pat10 <- "\\beducator|\\bteach+|classrooms"
test <- c("educator","outreach","teaching the world high school")
grepl(pat10,test)
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12:13)])
hist(bisbing$check)
bisbing$outreach <- ifelse(bisbing$check == 0 &
grepl(pat10,bisbing$bio), 1, 0)
test <- filter(bisbing, outreach == 1)
test$bio
## =================
# scientific associations
## =================
pat1 <- "(\\bresearch\\b|\\bscien+)"
pat2 <- "(\\bassociation\\b|\\bsynthesis|\\binterdisciplinary\\b|\\bnetwork\\b|\\bsociet\\b|\\bdept\\b|\\bdepartment\\b|\\blab+|\\balliance\\b|\\bcentre|\\bcenter|\\balliance\\b|\\binitiative\\b|\\bacademicians\\b|\\brepository\\b)"
#OR
pat3 <- "(observator|\\bsymposi|\\bpeer review\\b|\\bjournal\\b|\\bconference\\b|\\bresearch group\\b|\\bfield station|\\buniversity\\b)"
pat3b <- "(\\bmeeting|\\bsociety|chapter)"
###
#fix -- not "conference call"
###
test <- c("conference call","research center","research centre",
"hakai field station", "university of british columbia",
"canadian society for ecology",
"chapter fishery biologists", "nonprofit journal")
(grepl(pat1,test) & grepl(pat2,test)) | grepl(pat3,test) |
(grepl(pat3b,test) &
grepl("*olog",test))
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12:13)])
hist(bisbing$check)
bisbing$sci.assoc<- ifelse(bisbing$check == 0 &
(grepl(pat1,bisbing$bio) & grepl(pat2,bisbing$bio)) |
bisbing$check == 0 & grepl(pat3,bisbing$bio) |
bisbing$check == 0 & (grepl(pat3b,bisbing$bio) &
grepl("*olog",bisbing$bio)), 1, 0)
test <- filter(bisbing, sci.assoc == 1)
test$bio
## =================
# media
## =================
#let media include people within other scientists, students and profs
pat11 <- "(\\bwriter\\b|\\bjournalis|\\bblog|\\bpublisher\\b|\\bcorresponden|\\bcomms\\b|\\communicator\\b|scicomm|\\bauthor\\b|\\bproducer|\\bproduction|\\baudio\\b|\\bradio\\b|\\bpodcast+|\\bdocumentar+|\\bfilm+|\\bphotographer\\b|\\breport|\\bshow\\b|movie\\b|\\bcopyeditor\\b|\\bbroadcast|\\btelevision\\b|\\bcommunications\\b|\\bfreelance\\b|\\bvideograph+|\\beditor\\b|\\bfoto+|\\bpublish+)"
test <- c("author", "blogger","journalist","photojournalist podcaster",
"covemovieops", "commissioning editor","products", "filmmakers",
"environmental reporting")
grepl(pat11,test)
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9,10,12,14:15)])
hist(bisbing$check)
bisbing$media <- ifelse(bisbing$check == 0 & grepl(pat11, bisbing$bio), 1, 0)
test <- filter(bisbing, media > 0 )
test$bio
## =================
# applied
## =================
pat8 <- "(\\bedf\\b|\\bfund\\b|\\bfoundation\\b|\\bwwf+|\\bwcs\\b|\\bsociety\\b|\\btrust|\\bngo\\b|\\biucn\\b|\\bpew|\\bnonprofit\\b|\\bnon ?profit\\b|\\bgreenpeace\\b|\\bphilanthropy\\b|\\bconservation scientist\\b|\\bconservation biologist|\\badvoca+|\\bstewardship\\b|\\busaid\\b|\\bpolicy officer\\b|\\bcapacity development\\b|\\international development|\\bsanctuar|\\bpaul ?g ?allen\\b|\\bthe ?nature ?conservancy\\b|\\btnc\\b|\\bintergovernmental\\b|\\bwildaid\\b|\\bzsl\\b|\\bnonpartisan\\b|\\bcommunity organi(s|z)ation\\b|\\bactivis|\\bthink ?tank\\b|\\bvisual\\b|\\bblue ?ventures|\\bwildlife ?conservation\\b)"
pat8b <- "(\\bwwf+|\\bwcs+)"
test <- c("un wfp", "pewenvironment","nonprofit","organisation","wwfcanada",
"conservation scientist","conserving nature",
"wcsfiji", "advocacy","paulgallen","community organization",
"blueventures")
grepl(pat8,test) | grepl(pat8b,test)
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12,14:15)])
hist(bisbing$check)
bisbing$applied <- ifelse(bisbing$check == 0 & grepl(pat8,bisbing$bio) |
bisbing$check == 0 & grepl(pat8b,bisbing$Username), 1, 0)
test <- filter(bisbing, applied == 1)
test$bio
## =================
# politicians, decision makers
## =================
##start here
#check canadian MP acounts, US senators, congress
pat12 <- "\\bpublic servant\\b|\\bgovernment agency\\b"
pat13 <- "(usfs|usfws|usgs)"
test <- c("usfwspacific", "usgs","usfs", "mpenvironment")
grepl(pat12,test) | grepl(pat13,test)
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12:17)])
hist(bisbing$check)
bisbing$politician <- ifelse(bisbing$check == 0 & (grepl(pat12,bisbing$bio) |
grepl(pat13,bisbing$Username)), 1, 0)
test <- filter(bisbing, politician > 0 )
test$bio
## =================
# unknown
## =================
bisbing$unknown <- ifelse(is.na(bisbing$bio), 1,0)
test <- filter(bisbing, unknown == 1)
test$bio
## =================
# general public
## =================
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12:19)])
hist(bisbing$check)
bisbing$public <- ifelse(bisbing$check == 0, 1, 0)
test <- filter(bisbing, public == 1)
test$bio
unique(bisbing$public)
## =================
# last sweep for leftover scientists
## =================
pat14 <- "(\\bscien+|*olog|*systems|evolution|\\bgrad+|\\bmsc\\b|\\bphd\\b|academi)"
pat15 <- "(\\bmajor\\b|\\bstudent\\b)"
test <- c("biology major", "scientist", "ubc global systems student",
"marine biology enthusiast","evolutionary","oceanography graduate",
"the national marine sanctuary system is a network of special places preserving and protecting americas ocean and great lakes", "specialist")
grepl(pat14, test)
names(bisbing)
bisbing$sci.student <- ifelse(bisbing$public == 1 &
grepl(pat14,bisbing$bio) & grepl(pat15,bisbing$bio),
1,bisbing$sci.student)
bisbing$other.sci <- ifelse(bisbing$public == 1 &
grepl(pat14,bisbing$bio),
1,bisbing$other.sci)
## =================
# last sweep for leftover conservation?
## =================
pat15 <- "(\\bconservation\\b)"
names(bisbing)
bisbing$applied <- ifelse(bisbing$public == 1 &
grepl(pat15,bisbing$bio),
1,bisbing$applied)
names(bisbing)
bisbing$check <- rowSums(bisbing[c(9:10,12:19)])
hist(bisbing$check)
bisbing$public <- ifelse(bisbing$check == 0, 1, 0)
unique(bisbing$public)
#dump extra columns for checking
names(bisbing)
bisbing2 <- bisbing[,c(2:3,8:20)]
write.csv(bisbing2, file.path(PROJHOME, "outputs","output - bisbing test.csv"),
row.names = FALSE)
#could try wordclouds of each of the categories
#check handling and processing of strings in R
|
d8bdae3d80e5c99505c471a21d9bc61d40c6343f
|
d5013aeb19664e104c3fcbc65ab2910194b3cccd
|
/plot3.R
|
99651a5c55101b3e0ddf863aa899ebce9d3a1ba2
|
[] |
no_license
|
ricardorac/ExData_Plotting1
|
e55e6342e2b56cc581547bed40dc5334577bc213
|
55d21d4b8734de995c3080bc7798108c2635cb91
|
refs/heads/master
| 2022-04-22T03:10:12.411303
| 2020-04-20T16:18:30
| 2020-04-20T16:18:30
| 256,484,633
| 0
| 0
| null | 2020-04-17T11:30:14
| 2020-04-17T11:30:13
| null |
UTF-8
|
R
| false
| false
| 763
|
r
|
plot3.R
|
library(dplyr)
data <- read.csv("household_power_consumption.txt",
na.strings="?", stringsAsFactors=FALSE, sep=";")
data$Date <- as.Date(data$Date,"%d/%m/%Y")
subset <- data %>% filter(between(Date, as.Date("2007-02-01"), as.Date("2007-02-02")))
subset$Time <- strptime(paste(subset$Date, subset$Time, sep=" "),"%Y-%m-%d %H:%M:%S")
png("plot3.png", width = 480, height = 480)
plot(subset$Time, subset$Sub_metering_1, type="l", ylab="Energy sub metering",xlab="")
lines(subset$Time, subset$Sub_metering_2, type = "l", col = "red")
lines(subset$Time, subset$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty=1, cex=0.8)
dev.off()
|
5be3e11dfc6192e1a3c8bce243c30a4757f83466
|
65622a1c57ecd181a6927944135e298183d5f062
|
/src/1_main.R
|
9d2f2f4caa6ebba594cfd042e210a4b85fe4cc64
|
[] |
no_license
|
gabrielodom/IamComparison
|
238334c676e0aef1b94d9ef29f8728c66b6eb118
|
27fb1bfa56670d1f23a77f22b0bfe8f7db36b371
|
refs/heads/master
| 2021-07-08T03:57:53.081174
| 2020-09-08T17:46:57
| 2020-09-08T17:46:57
| 184,143,571
| 0
| 0
| null | 2019-04-29T21:00:45
| 2019-04-29T21:00:44
| null |
UTF-8
|
R
| false
| false
| 6,716
|
r
|
1_main.R
|
###############################################################################
## Project: IntMethodCompPublication ##
## 1_main.R ##
## ##
###############################################################################
rm(list = ls(all.names = TRUE))
# Please set path to the Project folder
# .project.dir = "D:/Development/IntMethodCompPublication"
.project.dir = "C:/Users/gjo15/Documents/GitHub/IamComparison"
# Please enter the name of the list of datasets and
# put the R-Object into .project.dir/data/raw
.dataset.list = "tcga_brca.RData"
#.dataset.list = "tcga_luad.RData"
#.dataset.list = "tcga_kirc.RData"
#.dataset.list = "tcga_coad.RData"
# choose a name for the current run
# all relative paths to project subfolders are set automatically
.current.run = "GeneExp_Met_2DS"
#.current.run = "LUAD_GeneExp_Met"
#.current.run = "KIRC_GeneExp_Met"
#.current.run = "COAD_GeneExp_Met"
source(file.path(.project.dir, "src/2_setDirPath.R"))
# TCGA Assembler directory
# Please download this from https://github.com/compgenome365/TCGA-Assembler-2
.TCGA.assembler.dir = "F:/TCGA-Assembler-2/"
# Please edit the parameter files for synthetic and biological data
# The parameters are used for the current run
# If you execute this code on Windows or Macintosh machines, set this parameter
# to FALSE
if(Sys.info()["sysname"] == "Linux"){
.useMALA_logi <- TRUE
} else {
.useMALA_logi <- FALSE
}
#
###### install required packages ############################################
### Bioconductor
#source("https://bioconductor.org/biocLite.R")
#biocLite()
#biocLite("impute") # biological data, sCCA
#biocLite("org.Hs.eg.db") # comparison
#biocLite("GOstats") # comparison
#biocLite("graphite") # comparison
#biocLite("genefu") # comparison
#biocLite("SPIA")
### CRAN
#install.packages("httr") # TCGA Assembler
#install.packages("HGNChelper") # TCGA Assembler
#install.packages("RCurl") # TCGA Assembler
#install.packages("rjson") # TCGA Assembler
#install.packages("stringr") # TCGA Assembler
#install.pakcages("data.table") # TCGA Assembler
#install.packages("gplots") # synthetic data
#install.packages("PMA") # sCCA
#install.packages("abind") # MALA
#install.packages("pROC") # drawROC
#install.packages("VennDiagram") # comparison
#install.packages("xtable") # comparison
#install.packages("gridExtra") # comparison
#install.packages("scales") # comparison
#install.packages("reshape2") # comparison
#install.packages("ggplot2") # comparison
#install.packages("Cairo") # Comparison
###### run comparison on biological datasets ################################
# set parameteres as specified in the parameter file
source(file.path(.src.dir, "3a_biologParameter.R"))
# starts TCGA data download using TCGA Assembler tool
source(file.path(.src.dir, "3b_downloadTCGAData.R"))
# starts TCGA data preprocessing
source(file.path(.src.dir, "3c_preprocTCGAData.R"))
# biological data exploration and transformation + sample reduction
source(file.path(.src.dir, "3d_biologicalData.R"))
# do sCCA
source(file.path(.src.dir, "4_sCCA.R"))
# do NMF on biological data
source(file.path(.src.dir, "5a_preprocForNMF.R"))
source(file.path(.src.dir, "5b_NMF.R"))
source(file.path(.src.dir, "5c_postprocOfNMF.R"))
# do pre- and postprocessing for MALA on biological datasets
# it is recommended to run MALA in a computationally more powerful
# linux environment due to the large size of the datasets
source(file.path(.src.dir, "6a_preprocForMALA.R"))
# source(file.path(.src.dir, "6b_MALA_linux.R"))
# # version to apply MALA to biologic data - run MALA only (not in project
# # framework) on linux
source(file.path(.src.dir, "6c_postprocOfMALA.R"))
# Compare result of each method (Venn diagrams, tables, ...)
source(file.path(.src.dir, "7_methodComparison.R"))
###### run comparison on synthetic datasets #################################
# set parameteres as specified in the parameter file
source(file.path(.src.dir, "8a_synthetParameter.R"))
# generate synthetic datasets
system.time(
source(file.path(.src.dir, "8b_syntheticDataES.R"))
)
# about 30-ish minutes(?) for 10 repititions. This script cleans the global
# environment before execution, so "a" was removed. I'm going to delete these
# results.
# 24.03667, 25.44217 min for 10 reps
# The gene expression data is 100 x 1600; the methlyation data is 100 x 2400
# 53.41383 min for 100 replicates
# For 4 x 3 design, this takes 67.2555 min for 100 reps
# For 16 design points and 100 replicates: 86.22083 min
# do sCCA
system.time(
source(file.path(.src.dir, "4_sCCA.R"))
)
# 17.8418 hrs for 10 repititions (9 design points). 36.19844 hrs for 15 reps by
# 12 design points.
# 140.3611 hours for 1:43 reps by 16 design points; 145.0278 hours for 44:86.
# sCCA TIME (one rep, in minutes)
(17.8418 / 90) * 60
# do NMF on synthetic data
system.time(
source(file.path(.src.dir, "5a_preprocForNMF.R"))
)
# 7.067167 min (9 design points). 8.718 min for 15 reps by 12 design points.
# 31.7375, 30.79133 min for 43 reps by 16 design points
system.time(
source(file.path(.src.dir, "5b_NMF.R"))
)
# 15.59804 hrs for 10 repititions (9 design points). 33.98197 hrs for 15 reps
# by 12 design points
# 123.162 hours for 43 reps by 16 design points
system.time(
source(file.path(.src.dir, "5c_postprocOfNMF.R"))
)
# 3.747 min (9 design points). 15.99167 min for 100 reps X 16 design points
# NNMF TIME (one rep, in minutes):
(7.067167 / 9) + (15.59804 * 60 / 90) + (3.747 / 9)
useMALA <- .useMALA_logi
if(useMALA){
# do pre- and postprocessing for MALA on synthetic datasets
# MALA is run in a computationally more powerful
# linux environment due to the large size of the datasets
source(file.path(.src.dir, "6a_preprocForMALA.R"))
# 9.077 min
source(file.path(.src.dir, "6b_MALA.R"))
# version to apply MALA to multiple synthetic datasets - run on a linux
# cluster
source(file.path(.src.dir, "6c_postprocOfMALA.R"))
}
# Compare results of each method (Venn diagrams, boxplots, ...)
source(file.path(.src.dir, "9_synthetComparison.R"))
|
a7ccbc550ff56b733348e6f0007fbe89914f28ab
|
63538ef67364d53ae169c7501ae9a95c874eef34
|
/man/SEA-package.Rd
|
0c2e0ff9502dc85f850b6ca47ef132d07a415391
|
[] |
no_license
|
cran/SEA
|
b9e658c8fd61ebcc7d84fb308d57bdbd32df0fcc
|
ef7fc9f003eb6c097ea659d3dbc632183b0b2673
|
refs/heads/master
| 2022-05-03T11:22:27.582739
| 2022-03-30T06:30:12
| 2022-03-30T06:30:12
| 134,713,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,582
|
rd
|
SEA-package.Rd
|
\name{SEA-package}
\alias{SEA-package}
\alias{SEA}
\docType{package}
\title{
Segregation Analysis
}
\description{
A few major genes and a series of polygene are responsive for each quantitative trait. Major genes are individually identified while polygene is collectively detected. This is mixed major genes plus polygene inheritance analysis or segregation analysis (SEA). In the SEA, phenotypes from a single or multiple bi-parental segregation populations along with their parents are used to fit all the possible models and the best model for population phenotypic distributions is viewed as the model of the trait. There are fourteen types of population combinations available. Zhang Yuan-Ming, Gai Jun-Yi, Yang Yong-Hua (2003, <doi:10.1017/S0016672303006141>), and Wang Jing-Tian, Zhang Ya-Wen, Du Ying-Wen, Ren Wen-Long, Li Hong-Fu, Sun Wen-Xian, Ge Chao, and Zhang Yuan-Ming(2022, <doi:10.3724/SP.J.1006.2022.14088>)
}
\details{
\tabular{ll}{
Package: \tab SEA\cr
Type: \tab Package\cr
Version: \tab 2.0.1\cr
Date: \tab 2022-03-28\cr
Depends: \tab shiny,MASS,doParallel,foreach\cr
Imports: \tab KScorrect,kolmim,utils,stats,grDevices,graphics,data.table\cr
License: \tab GPL(>=2)\cr
LazyLoad: \tab yes\cr
}
Users can use 'SEA()' start the GUI.
}
\author{
Wang Jing-Tian, Zhang Ya-Wen, and Zhang Yuan-Ming \cr
Maintainer: Yuanming Zhang<soyzhang@mail.hzau.edu.cn>
}
\references{
The EIM algorithm in the joint segregation analysis of quantitative traits.
Zhang Yuan-Ming*,Gai Junyi,Yang Yonghua(2003).
}
\examples{
\dontrun{ SEA() }
}
|
c4881b939811d0e2fe84337e8386baaf42b789c8
|
259d9ec20951d12ada2e1200aece8ffafbc9f9d1
|
/man/rates.Rd
|
536668fd95de754071bf7cb3af3c8dd3fd08b58b
|
[] |
no_license
|
rajsingh7/R-fixedincome
|
febf57b0f3ef0dca7955aa05e9518fe27da88c5b
|
9eb216dbd0332050541725bdd3c7e0e2e1399b37
|
refs/heads/master
| 2021-06-20T18:59:28.945694
| 2014-08-31T19:45:09
| 2014-08-31T19:45:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 813
|
rd
|
rates.Rd
|
\name{rates.compounding}
\alias{rates}
\alias{rates.compounding}
\alias{rates.spotrate}
\title{Return the numeric rates}
\usage{
\method{rates}{compounding}(obj, value, term, ...)
\method{rates}{spotrate}(obj, ...)
rates(obj, ...)
}
\arguments{
\item{value}{a numeric value representing a compounding
factor}
\item{term}{a \code{\link{term-class}} instance}
\item{obj}{See Details}
\item{...}{extra arguments}
}
\value{
a numeric value
}
\description{
Return a numeric value which represents spot rates.
}
\details{
If the \code{obj} argument is a
\code{\link{compounding-class}} the function
\code{rates.compounding} computes the implied rate for the
given compounding and term.
If the \code{obj} argument is a \code{spotrate} instance it
returns a \code{numeric} representing the spot rates.
}
|
3426fc710a3c9244e07e462b7e477a82c0e3cfe1
|
21ccd44440e0b618072e771ca0b3c684a816c6c8
|
/man/SquareBurst.Rd
|
17c313bc6db9b53376a933bbad5fd27664c4ddd9
|
[] |
no_license
|
cran/OscillatorGenerator
|
425e1732915f0abda30871a799c6364ccf0d5313
|
5f4b205c505f35d159f9b6d06a557a6c0fb9326f
|
refs/heads/master
| 2020-03-16T02:36:44.881404
| 2018-05-07T12:47:53
| 2018-05-07T12:47:53
| 132,468,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,184
|
rd
|
SquareBurst.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SquareBurst.R
\name{SquareBurst}
\alias{SquareBurst}
\title{Generation of a Square-wave Burst Signal}
\usage{
SquareBurst(baseline, peak, period, duty_cycle, sec_duty_cycle, sec_peak, trend,
duration, resolution)
}
\arguments{
\item{baseline}{minimal oscillation value}
\item{peak}{maximal oscillation value}
\item{period}{oscillation period of the oscillating species (reciprocal of the frequency)}
\item{duty_cycle}{ratio of the active phase (oscillator above baseline) to the total oscillation period}
\item{sec_duty_cycle}{ratio of the primary active phase (time interval from cycle start till reaching of the secondary peak level) to the total active phase}
\item{sec_peak}{intermediary value reached after the end of the primary active phase}
\item{trend}{percental decrease or increase in the peak and secondary peak values for the successive oscillation cycles; if set to 1, values remain unchanged}
\item{duration}{duration of the generated time course}
\item{resolution}{temporal resolution of the generated time course}
}
\value{
Returns a matrix with two columns: a time vector and an oscillator abundance vector.
}
\description{
This function takes in numeric arguments for a customizable, square-wave burst shape. Each oscillation cycle is separated into three phases: a primary active phase, in which the oscillator resides at peak concentration, a secondary active phase, in which the oscillator stays at secondary peak concentration and an inactive phase, in which the oscillator is fixed to baseline concentration. A discretized time course is returned.
}
\details{
Standards:
\itemize{
\item{\code{peak} and \code{sec_peak} must be larger than \code{baseline}}
\item{\code{duration} must be larger than \code{resolution}}
\item{\code{duration} must be a multiple of the \code{resolution}}
\item{\code{period} must be a multiple of \code{resolution}}
\item{\code{duration}, \code{resolution}, \code{peak}, \code{sec_peak} and \code{period} must be larger than 0}
\item{\code{baseline} must be larger or equal to 0}
\item{\code{duty_cycle} must be larger than 0 and smaller or equal to 1}
\item{\code{sec_duty_cycle} must be larger than 0 and smaller or equal to 1}
\item{\code{trend} must be larger than 0}
}
}
\examples{
# test effect of changes in period
m1 = SquareBurst(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,
sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m3 = SquareBurst(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,
sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
par(mfrow = c(3,1))
plot(m1, type = "l", xlab = "time", ylab = "abundance")
plot(m2, type = "l", xlab = "time", ylab = "abundance")
plot(m3, type = "l", xlab = "time", ylab = "abundance")
# test effect of changes in duty_cycle
m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,
sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,
sec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
par(mfrow = c(3,1))
plot(m1, type = "l", xlab = "time", ylab = "abundance")
plot(m2, type = "l", xlab = "time", ylab = "abundance")
plot(m3, type = "l", xlab = "time", ylab = "abundance")
# test effect of changes in sec_duty_cycle
m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.3, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.9, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
par(mfrow = c(3,1))
plot(m1, type = "l", xlab = "time", ylab = "abundance")
plot(m2, type = "l", xlab = "time", ylab = "abundance")
plot(m3, type = "l", xlab = "time", ylab = "abundance")
# test effect of changes in trend
m1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.6, sec_peak = 700, trend = 0.7, duration = 500, resolution = 0.1)
m2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)
m3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,
sec_duty_cycle = 0.6, sec_peak = 700, trend = 1.3, duration = 500, resolution = 0.1)
par(mfrow = c(3,1))
plot(m1, type = "l", xlab = "time", ylab = "abundance")
plot(m2, type = "l", xlab = "time", ylab = "abundance")
plot(m3, type = "l", xlab = "time", ylab = "abundance")
}
|
a25161733e7893bccf918046541ba8ada12d7818
|
79c0097a6317bfa517472a08008a68ce57152b3d
|
/plot2.R
|
66df054bab6e2cc45ff36ff0e3932c8bd9026c83
|
[] |
no_license
|
pradeeppeddineni/ExData_Plotting1
|
e911c74e8612c70eb3e2e4ec687b02d3a1f435c5
|
aa54c850fb2a0f32048c882dd9823021c05d45bf
|
refs/heads/master
| 2020-06-16T18:32:59.170900
| 2016-11-30T12:27:09
| 2016-11-30T12:27:09
| 75,076,729
| 0
| 0
| null | 2016-11-29T11:54:01
| 2016-11-29T11:54:00
| null |
UTF-8
|
R
| false
| false
| 684
|
r
|
plot2.R
|
##read full data set.
d_f <- read.csv("household_power_consumption.txt", header = T, sep = ';',
na.strings = "?", nrows = 2075259, check.names = F,
stringsAsFactors = F, comment.char = "", quote = '\"')
##Convert the date format.
d_f$Date <- as.Date(d_f$Date, format = "%d/%m/%Y")
## Subset the data
d <- subset(d_f, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(d_f)
## Converting dates
datetime <- paste(as.Date(d$Date), d$Time)
d$Datetime <- as.POSIXct(datetime)
## Generating Plot 2
plot(d$Global_active_power ~ d$Datetime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
|
67af01617022a56fb59543e05d2b954e327fa245
|
3eefcbaa7faaff48f1335a3a3e4dc56e114c1ab0
|
/familyCliques_runTADratioDown.R
|
2c42f1cb30db38753a6203bfcf39c475fd47f709
|
[] |
no_license
|
marzuf/v2_Yuanlong_Cancer_HiC_data_TAD_DA
|
9a435c08a9064d127a86d9909042bb4ff59ad82d
|
e33a0683ac7a9afe21cfec06320c82251d3ba0d5
|
refs/heads/master
| 2021-06-16T15:57:30.182879
| 2021-05-18T08:36:44
| 2021-05-18T08:36:44
| 202,159,949
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,895
|
r
|
familyCliques_runTADratioDown.R
|
#!/usr/bin/Rscript
stop("-- use: _runTADmeanCorrRatioDown.R - corrected version\n")
startTime <- Sys.time()
# Rscript familyCliques_runTADratioDown.R
script_name <- "familyCliquees_runTADratioDown.R"
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
plotType <- "svg"
myHeight <- 5
myWidth <- 7
require(doMC)
require(foreach)
registerDoMC(40)
require(reshape2)
require(igraph)
runFolder <- "."
pipFolder <- file.path(runFolder, "PIPELINE", "OUTPUT_FOLDER")
familyVar <- "hgnc_family_short"
withDiag <- FALSE
minCmpntSize <- 3
minGenes <- 3
maxSameTAD <- 0.5
nMaxSize <- 1
outFolder <- file.path("FAMILYCLIQUES_RUNTADRATIODOWN_V2", nMaxSize)
inFolder <- file.path("WRONG_PREPFAMILYCLIQUES", nMaxSize)
all_hicds <- list.files("PIPELINE/OUTPUT_FOLDER")
# all_hicds=all_hicds[1]
# all_hicds=all_hicds[2:length(all_hicds)]
all_hicds <- all_hicds[!grepl("RANDOM", all_hicds) & !grepl("PERMUT", all_hicds)]
all_exprds <- sapply(all_hicds, function(x) list.files(file.path(pipFolder, x)))
# hicds = "Barutcu_MCF-10A_40kb"
# all_hicds=all_hicds[1:2]
#
all_hicds=all_hicds
exprds="TCGAbrca_lum_bas"
buildData <- TRUE
if(buildData){
all_ratioDown <- foreach(hicds = all_hicds) %do%{
cat(paste0("... start: ", hicds, "\n"))
exprds_ratioDown <- foreach(exprds = all_exprds[[paste0(hicds)]]) %do% {
cat(paste0("... start: ", hicds," - ", exprds, "\n"))
# retrieve file
famMod_file <- file.path(inFolder, hicds, exprds, "all_fams_dt.Rdata")
stopifnot(file.exists(famMod_file))
fam_data <- get(load(famMod_file))
fam_dt <- do.call(rbind, lapply(fam_data, function(x) x[["fam_cl_dt"]]))
fam_dt$entrezID <- as.character(fam_dt$entrezID)
fam_dt$clique <- as.character(fam_dt$clique)
# INPUT DATA
gene2tadDT_file <- file.path(hicds, "genes2tad", "all_genes_positions.txt")
stopifnot(file.exists(gene2tadDT_file))
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
all_gene2tadDT <- gene2tadDT
gene2tadDT <- gene2tadDT[grepl("_TAD", gene2tadDT$region),]
stopifnot(fam_dt$entrezID %in% gene2tadDT$entrezID)
pipeline_geneList <- get(load(file.path(pipFolder, hicds, exprds, "0_prepGeneData", "pipeline_geneList.Rdata")))
rna_geneList <- get(load(file.path(pipFolder, hicds, exprds, "0_prepGeneData", "rna_geneList.Rdata")))
de_DT <- get(load(file.path(pipFolder, hicds, exprds, "1_runGeneDE", "DE_topTable.Rdata")))
# stopifnot(names(rna_geneList) %in% de_DT$genes) FALSE
# stopifnot(de_DT$genes %in% rna_geneList ) # FALSE
stopifnot(de_DT$genes %in% names(rna_geneList) )
de_DT$genes2 <- rna_geneList[de_DT$genes]
stopifnot(de_DT$genes2 %in% rna_geneList)
# stopifnot(de_DT$genes %in% all_gene2tadDT$entrezID) # not TRUE
stopifnot(de_DT$genes2 %in% all_gene2tadDT$entrezID) # here I have genes from TADs in de_DT
stopifnot(!is.na(de_DT$genes2))
# stopifnot(fam_dt$entrezID %in% de_DT$genes2) # not true because de_DT has
# which(! fam_dt$entrezID %in% de_DT$genes2)
stopifnot(sum(fam_dt$entrezID %in% de_DT$genes2) >= sum(fam_dt$entrezID %in% de_DT$genes))
sum(fam_dt$entrezID %in% names(pipeline_geneList)) # 2493
sum(fam_dt$entrezID %in% pipeline_geneList) # 2495
sum(fam_dt$entrezID %in% names(rna_geneList)) # 7045
sum(fam_dt$entrezID %in% rna_geneList) # 7059
sum(names(rna_geneList) %in% de_DT$genes)
sum((rna_geneList) %in% de_DT$genes)
# de_DT <- de_DT[de_DT$genes %in% names(rna_geneList),]
# nrow(de_DT)
# rna_geneList <- rna_geneList[names(rna_geneList) %in% de_DT$genes]
#
# stopifnot(de_DT$genes %in% names(rna_geneList) )
# stopifnot(rna_geneList %in% rownames(norm_rnaseqDT)) # ! wrong
# stopifnot(names(rna_geneList) %in% rownames(norm_rnaseqDT))
# reorder
# norm_rnaseqDT <- norm_rnaseqDT[names(rna_geneList),]
stopifnot(fam_dt$entrezID %in% gene2tadDT$entrezID) ### I took only genes from TADs !!!!
# stopifnot(fam_dt$entrezID %in% names(pipeline_geneList)) ### NOT TRUE !!! I took only genes from TADs !!!!
all_famCls <- unique(fam_dt$clique)
famCpt = all_famCls[1]
all_ratioDown_famCls <- foreach(famCpt=all_famCls) %dopar% {
cl_genes <- fam_dt$entrezID[as.character(fam_dt$clique) == as.character(famCpt)]
stopifnot(length(cl_genes) >= minCmpntSize)
# ADDED 14.05
stopifnot(cl_genes %in% gene2tadDT$entrezID)
cl_gene2tad_dt <- gene2tadDT[gene2tadDT$entrezID %in% cl_genes &
gene2tadDT$entrezID %in% de_DT$genes2 ,
] # need to subset here for then next if keptTADs !
keptTADs <- cl_gene2tad_dt$region
if(max(table(cl_gene2tad_dt$region)/nrow(cl_gene2tad_dt)) > maxSameTAD) return(paste0("sameTAD>", maxSameTAD))
stopifnot(cl_gene2tad_dt$entrezID %in% de_DT$genes2)
cl_de_DT <- de_DT[de_DT$genes2 %in% cl_gene2tad_dt$entrezID,]
stopifnot(nrow(cl_de_DT) == nrow(cl_gene2tad_dt))
if(nrow(cl_de_DT) < minGenes) return(paste0("<", minGenes, "genes"))
cl_ratioDown <- sum(sign(cl_de_DT$logFC) == -1)/nrow(cl_de_DT)
list(
ratioDown=cl_ratioDown,
keptGenes=cl_de_DT$genes2,
keptTADs=keptTADs
)
}
cat(paste0("... end intra-cpt ratioDown\n"))
names(all_ratioDown_famCls) <- all_famCls
stopifnot(length(all_ratioDown_famCls) == length(all_famCls))
outFile <- file.path(outFolder, hicds, exprds, "all_ratioDown_famCls.Rdata")
dir.create(dirname(outFile), recursive = TRUE)
save(all_ratioDown_famCls, file= outFile)
cat(paste0("... written: ", outFile, "\n"))
# famRatioDown_data <- get(load("FAMILYMODULES_RUNMEANTADCORR/Barutcu_MCF-10A_40kb/TCGAbrca_lum_bas/all_ratioDown_famCls.Rdata"))
famRatioDown_data <- all_ratioDown_famCls
famRatioDown_dataF <- famRatioDown_data[lengths(famRatioDown_data) == 3]
famRatioDown <- unlist(lapply(famRatioDown_dataF, function(x) x[["ratioDown"]]))
obsRatioDown <- get(load(file.path("PIPELINE", "OUTPUT_FOLDER", hicds, exprds, "8cOnlyRatioDownFastSave_runAllDown", "all_obs_ratioDown.Rdata" )))
outFile <- file.path(outFolder, hicds, exprds, paste0(hicds, "_", exprds, "_obs_famCl_ratioDown_density.", plotType))
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
plot_multiDens(
list(famCmpnt_ratioDown = famRatioDown,
obsTAD_ratioDown = obsRatioDown),
plotTit = paste0(hicds, " - ", exprds)
)
mtext(side=3, text = paste0("minCmpntSize=", minCmpntSize, "; minGenes=", minGenes, "; maxSameTAD=", maxSameTAD), font=3)
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
list(famCmpnt_ratioDown = famRatioDown,
obsTAD_ratioDown = obsRatioDown
)
}
names(exprds_ratioDown) <- all_exprds[[paste0(hicds)]]
exprds_ratioDown
}
names(all_ratioDown) <- all_hicds
outFile <- file.path(outFolder, "all_ratioDown.Rdata")
dir.create(dirname(outFile), recursive = TRUE)
save(all_ratioDown, file= outFile, version=2)
cat(paste0("... written: ", outFile, "\n"))
} else {
outFile <- file.path(outFolder, "all_ratioDown.Rdata")
all_ratioDown <- get(load(outFile))
}
all_fam_ratioDown <- lapply(all_ratioDown, function(sublist) lapply(sublist, function(x) x[["famCmpnt_ratioDown"]]))
all_obs_ratioDown <- lapply(all_ratioDown, function(sublist) lapply(sublist, function(x) x[["obsTAD_ratioDown"]]))
nDS <- length(unlist(all_fam_ratioDown, recursive = FALSE))
outFile <- file.path(outFolder, paste0("allDS_obs_famCl_ratioDown_density.", plotType))
do.call(plotType, list(outFile, height=myHeight, width=myWidth))
plot_multiDens(
list(famCmpnt_ratioDown = unlist(all_fam_ratioDown),
obsTAD_ratioDown = unlist(all_obs_ratioDown)),
my_xlab = paste0("intra-TAD/component ratioDown"),
plotTit = paste0( "famCliques - ratioDown - all datasets - n =", nDS )
)
mtext(side=3, text = paste0("minCmpntSize=", minCmpntSize, "; minGenes=", minGenes, "; maxSameTAD=", maxSameTAD), font=3)
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
cat(paste0("*** DONE: ", script_name, "\n"))
#
#
#
# #!/usr/bin/Rscript
#
# startTime <- Sys.time()
#
# ################ USE THE FOLLOWING FILES FROM PREVIOUS STEPS
# # - script0: pipeline_regionList.Rdata
# # - script0: rna_geneList.Rdata
# # - script0: pipeline_geneList.Rdata
# # - script0: rna_madnorm_rnaseqDT.Rdata
# # - script1: DE_topTable.Rdata
# # - script1: DE_geneList.Rdata
# ################################################################################
#
# ################ OUTPUT
# # - /all_meanLogFC_TAD.Rdata
# ################################################################################
#
# SSHFS <- F
# setDir <- ifelse(SSHFS, "/media/electron", "")
#
# args <- commandArgs(trailingOnly = TRUE)
# stopifnot(length(args) == 1)
# settingF <- args[1]
# stopifnot(file.exists(settingF))
#
# pipScriptDir <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2")
#
# script0_name <- "0_prepGeneData"
# script1_name <- "1_runGeneDE"
# script_name <- "3_runMeanTADLogFC"
# stopifnot(file.exists(paste0(pipScriptDir, "/", script_name, ".R")))
# cat(paste0("> START ", script_name, "\n"))
#
# source("main_settings.R")
# source(settingF)
# source(paste0(pipScriptDir, "/", "TAD_DE_utils.R"))
# suppressPackageStartupMessages(library(doMC, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
# suppressPackageStartupMessages(library(foreach, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
# suppressPackageStartupMessages(library(dplyr, warn.conflicts = FALSE, quietly = TRUE, verbose = FALSE))
#
# # create the directories
# curr_outFold <- paste0(pipOutFold, "/", script_name)
# system(paste0("mkdir -p ", curr_outFold))
#
# pipLogFile <- paste0(pipOutFold, "/", format(Sys.time(), "%Y%d%m%H%M%S"),"_", script_name, "_logFile.txt")
# system(paste0("rm -f ", pipLogFile))
#
# registerDoMC(ifelse(SSHFS, 2, nCpu)) # from main_settings.R
#
# # ADDED 16.11.2018 to check using other files
# txt <- paste0("inputDataType\t=\t", inputDataType, "\n")
# printAndLog(txt, pipLogFile)
# txt <- paste0("gene2tadDT_file\t=\t", gene2tadDT_file, "\n")
# printAndLog(txt, pipLogFile)
# txt <- paste0("TADpos_file\t=\t", TADpos_file, "\n")
# printAndLog(txt, pipLogFile)
# txt <- paste0("settingF\t=\t", settingF, "\n")
# printAndLog(txt, pipLogFile)
#
# ################################***********************************************************************************
# ############ LOAD INPUT DATA
# ################################***********************************************************************************
# gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names = c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = F)
# gene2tadDT$entrezID <- as.character(gene2tadDT$entrezID)
#
# DE_topTable <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_topTable.Rdata"))))
# DE_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script1_name, "/DE_geneList.Rdata"))))
#
# pipeline_geneList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_geneList.Rdata"))))
# pipeline_regionList <- eval(parse(text = load(paste0(pipOutFold, "/", script0_name, "/pipeline_regionList.Rdata"))))
#
# if(useTADonly) {
# if(any(grepl("_BOUND", pipeline_regionList))) {
# stop("! data were not prepared for \"useTADonly\" !")
# }
# }
#
# stopifnot(all(DE_topTable$genes %in% names(DE_geneList)))
# stopifnot(!any(duplicated(names(DE_geneList))))
#
# entrezList <- unlist(sapply(DE_topTable$genes, function(x) DE_geneList[x]))
# names(entrezList) <- DE_topTable$genes
# stopifnot(length(entrezList) == length(DE_topTable$genes))
#
# # replace the gene symbol rownames by ensemblID rownames
# logFC_DT <- data.frame(entrezID = entrezList,
# logFC = DE_topTable$logFC, stringsAsFactors = F)
#
# rownames(logFC_DT) <- NULL
# initNrow <- nrow(logFC_DT)
# logFC_DT <- logFC_DT[logFC_DT$entrezID %in% pipeline_geneList,]
# txt <- paste0(toupper(script_name), "> Take only filtered genes: ", nrow(logFC_DT), "/", initNrow, "\n")
# printAndLog(txt, pipLogFile)
#
# ### take only the filtered data according to initial settings
# gene2tadDT <- gene2tadDT[gene2tadDT$entrezID %in% as.character(pipeline_geneList),]
# initLen <- length(unique(gene2tadDT$region))
# gene2tadDT <- gene2tadDT[gene2tadDT$region %in% pipeline_regionList,]
# txt <- paste0(toupper(script_name), "> Take only filtered regions: ", length(unique(gene2tadDT$region)), "/", initLen, "\n")
# printAndLog(txt, pipLogFile)
#
# ################################***********************************************************************************
# ################################********************************************* get observed logFC for all regions
# ################################***********************************************************************************
#
# cat(paste0("... start computing mean logFC by TAD \n"))
#
# head(logFC_DT)
#
# mergedDT <- left_join(logFC_DT, gene2tadDT[,c("entrezID", "region")], by="entrezID")
#
#
# save(mergedDT, file="mergedDT.Rdata")
# save(logFC_DT, file="logFC_DT.Rdata")
# save(gene2tadDT, file="gene2tadDT.Rdata")
#
# stopifnot(nrow(mergedDT) == nrow(na.omit(mergedDT)))
#
# mean_DT <- aggregate(logFC ~ region, data=mergedDT, FUN=mean)
# all_meanLogFC_TAD <- setNames(mean_DT$logFC, mean_DT$region)
# stopifnot(length(all_meanLogFC_TAD) == length(unique(gene2tadDT$region)))
# txt <- paste0(toupper(script_name), "> Number of regions for which mean logFC computed: ", length(all_meanLogFC_TAD), "\n")
# printAndLog(txt, pipLogFile)
#
# if(useTADonly) {
# initLen <- length(all_meanLogFC_TAD)
# all_meanLogFC_TAD <- all_meanLogFC_TAD[grep("_TAD", names(all_meanLogFC_TAD))]
# txt <- paste0(toupper(script_name), "> Take only the TAD regions: ", length(all_meanLogFC_TAD),"/", initLen, "\n")
# printAndLog(txt, pipLogFile)
# }
#
# save(all_meanLogFC_TAD, file= paste0(curr_outFold, "/all_meanLogFC_TAD.Rdata"))
# cat(paste0("... written: ", curr_outFold, "/all_meanLogFC_TAD.Rdata", "\n"))
#
# txt <- paste0(startTime, "\n", Sys.time(), "\n")
# printAndLog(txt, pipLogFile)
#
# cat(paste0("*** DONE: ", script_name, "\n"))
#
|
3ce911566cfba8d693c158c5db10e3849a951f34
|
94fc45cde7d78272fdc86b4645d5811cf3d70b5c
|
/04_01_analysis_main.R
|
701b038ffe757fd8b2dff6173871ec2663e025c3
|
[] |
no_license
|
DavidKretschmer/covid-cohorting-code
|
4227a876e1836f2d5bd004cd17f5bdd6ce2b6bbc
|
b7d6dd9bd2dc0edd6c2670c10ed40be8b8dc0eef
|
refs/heads/master
| 2023-03-13T02:26:09.257023
| 2021-03-01T17:43:52
| 2021-03-01T17:43:52
| 343,501,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,533
|
r
|
04_01_analysis_main.R
|
###############################################################
### Main Analysis for Main Text,Extended Data Figures ###
### and Supplementary Material A: County-specific results ###
###############################################################
##############################
### Load relevant packages ###
##############################
library(ggh4x)
library(ggpubr)
library(tidyverse)
setwd("transmission_main")
############################
### Load all of the data ###
############################
color_values <- c(
"Optimized cohorting" = "#d7191c",
"Network chain cohorting" = "#fdae61",
"Gender-split cohorting" = "#abd9e9",
"Random cohorting" = "#2c7bb6",
"No cohorting" = "#a6611a"
)
### Names of the results folders
names_est <- c(
"2021-02-28___21-15-09_sim_0.2"
)
# Load all data
res_complete <- tibble()
for (names in names_est) {
load(paste0(names, "/", "res_all_data.RData"))
res_complete <- res_complete %>% bind_rows(res_all)
}
dim(res_complete)
options(width = 200)
########################
### Prepare the data ###
########################
# Where to save the results?
folder <- paste0("results")
dir.create(folder)
setwd(folder)
# Prepare the data
res_analysis <- res_complete %>%
mutate(
share_qua = share_qua - .5 * share_symptomatic,
groups_affected = groups_affected - 1,
country = case_when(
classid > 400000~"SW",
classid > 300000~"NL",
classid > 200000~"GE",
classid > 100000~"EN",
TRUE~NA_character_
),
classid = as.factor(classid),
mode = ifelse(mode == "parallel", "Same-day instruction", "Weekly rota-system"),
susceptibility_num = susceptibility,
susceptibility = paste0("Baseline probability of\ninfection upon contact: ", susceptibility),
share_subclinical_num = (1 - share_symptomatic) %>% round(2),
share_subclinical = paste0("Prop.\nsubclinical:\n", 1 - share_symptomatic),
scenario = case_when(
susceptibility_num == .05 & share_subclinical_num == .2~"Transmission\ndynamics:\nlow",
susceptibility_num == .15 & share_subclinical_num == .5~"Transmission\ndynamics:\nmedium",
susceptibility_num == .25 & share_subclinical_num == .8~"Transmission\ndynamics:\nhigh",
TRUE~NA_character_
) %>% fct_relevel("Transmission\ndynamics:\nlow", "Transmission\ndynamics:\nmedium"),
type = case_when(
type == "chain"~"Network chain cohorting",
type == "random"~"Random cohorting",
type == "gender"~"Gender-split cohorting",
type == "minimal"~"Optimized cohorting",
type == "all"~"No cohorting",
TRUE~type
),
type = fct_relevel(type, "No cohorting", "Random cohorting", "Gender-split cohorting", "Network chain cohorting")
)
# Collect information on largest outbreaks
res_dist <- res_analysis %>%
filter(!is.na(scenario)) %>%
filter(type != "No cohorting") %>%
group_by(mode, type, inf_asymptomatic, susceptibility_num, susceptibility, share_subclinical_num, share_subclinical, pr_out_of_school, scenario) %>%
mutate(
`5% Largest Outbreaks` = quantile(share_inf, .95),
`1% Largest Outbreaks` = quantile(share_inf, .99)
) %>%
filter(share_inf > `5% Largest Outbreaks`)
# Summarize data at classroom level
res_unclustered_classroom <- res_analysis %>%
group_by(classid, country, mode, type, inf_asymptomatic, susceptibility_num, susceptibility, share_subclinical_num, share_subclinical, pr_out_of_school, scenario) %>%
summarize(
`Proportion infected` = mean(share_inf),
`Excess proportion quarantined` = mean(share_qua),
`Proportion of spread across cohorts` = mean(groups_affected),
) %>%
pivot_longer(
cols = c("Proportion infected", "Excess proportion quarantined", "Proportion of spread across cohorts"),
names_to = "indicator",
values_to = "value"
) %>%
mutate(
indicator = fct_relevel(indicator, "Proportion of spread across cohorts", "Excess proportion quarantined")
)
# Summarize data across classroom and countries
res_unclustered <- res_unclustered_classroom %>%
ungroup() %>%
group_by(indicator, mode, type, inf_asymptomatic, susceptibility_num, susceptibility, share_subclinical_num, share_subclinical, pr_out_of_school, scenario) %>%
summarize(
conf.low = t.test(value)$conf.int[1],
conf.high = t.test(value)$conf.int[2],
value = mean(value)
)
# Summarize data across classroom within a given country
res_unclustered_country <- res_unclustered_classroom %>%
ungroup() %>%
group_by(indicator, country, mode, type, inf_asymptomatic, susceptibility_num, susceptibility, share_subclinical_num, share_subclinical, pr_out_of_school, scenario) %>%
summarize(
conf.low = t.test(value)$conf.int[1],
conf.high = t.test(value)$conf.int[2],
value = mean(value)
)
#######################################
## No cohorting vs. random cohorting ##
#######################################
color_values_no <- c(
"Random cohorting:\nWeekly rota-system" = "#abd9e9",
"Random cohorting:\nSame-day instruction" = "#2c7bb6",
"No cohorting" = "#a6611a"
)
### Results across all classrooms and countries ###
res_unclustered %>%
filter(!is.na(scenario)) %>%
filter(indicator %in% c("Proportion infected")) %>%
filter(type %in% c("No cohorting", "Random cohorting")) %>%
mutate(
type_helper = case_when(
type == "No cohorting" & mode == "Same-day instruction"~"No cohorting",
type == "Random cohorting" & mode == "Same-day instruction"~"Random cohorting:\nSame-day instruction",
type == "Random cohorting" & mode == "Weekly rota-system"~"Random cohorting:\nWeekly rota-system",
TRUE~NA_character_
)
) %>%
filter(!is.na(type_helper)) %>%
ggplot(aes(x = type_helper, y = value, fill = type_helper, color = type_helper)) +
geom_col() +
geom_errorbar(aes(ymax = conf.high, ymin = conf.low), color = "black", alpha = .7, width = .3, size = .3) +
labs(
x = "Type of intervention",
y = "Proportion infected",
fill = "",
color = "",
caption = "Note: Proportions and 95% confidence intervals. Results across entire parameter space
are in Extended Data Figure 1."
) +
scale_color_manual(values = color_values_no) +
scale_fill_manual(values = color_values_no) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
) +
facet_nested(~scenario, scales = "free")
ggsave(filename = "Fig-3-Random-Cohorting.jpg", width = 5, height = 4)
### Extended Results across all classrooms and countries ###
res_unclustered %>%
filter(indicator %in% c("Proportion infected")) %>%
filter(type %in% c("No cohorting", "Random cohorting")) %>%
mutate(
type_helper = case_when(
type == "No cohorting" & mode == "Same-day instruction"~"No cohorting",
type == "Random cohorting" & mode == "Same-day instruction"~"Random cohorting:\nSame-day instruction",
type == "Random cohorting" & mode == "Weekly rota-system"~"Random cohorting:\nWeekly rota-system",
TRUE~NA_character_
)
) %>%
filter(!is.na(type_helper)) %>%
ggplot(aes(x = type_helper, y = value, fill = type_helper, color = type_helper)) +
geom_col() +
geom_errorbar(aes(ymax = conf.high, ymin = conf.low), color = "black", alpha = .7, width = .3, size = .3) +
labs(
x = "Type of intervention",
y = "Proportion infected",
fill = "",
color = "",
caption = "Note: Proportions and 95% confidence intervals."
) +
scale_color_manual(values = color_values_no) +
scale_fill_manual(values = color_values_no) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
) +
facet_nested(~susceptibility + share_subclinical, scales = "free")
ggsave(filename = "Ext-Fig-1-Random-Cohorting-Space.jpg", width = 10, height = 4)
### Results across all classrooms, by country ###
res_unclustered_country %>%
filter(!is.na(scenario)) %>%
filter(indicator %in% c("Proportion infected")) %>%
filter(type %in% c("No cohorting", "Random cohorting")) %>%
mutate(
type_helper = case_when(
type == "No cohorting" & mode == "Same-day instruction"~"No cohorting",
type == "Random cohorting" & mode == "Same-day instruction"~"Random cohorting:\nSame-day instruction",
type == "Random cohorting" & mode == "Weekly rota-system"~"Random cohorting:\nWeekly rota-system",
TRUE~NA_character_
)
) %>%
filter(!is.na(type_helper)) %>%
ggplot(aes(x = type_helper, y = value, fill = type_helper, color = type_helper)) +
geom_col() +
geom_errorbar(aes(ymax = conf.high, ymin = conf.low), color = "black", alpha = .7, width = .3, size = .3) +
labs(
x = "Type of intervention",
y = "Proportion infected",
fill = "",
color = "",
caption = "Proportions and 95% confidence intervals."
) +
scale_color_manual(values = color_values_no) +
scale_fill_manual(values = color_values_no) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
) +
facet_nested(~scenario + country, scales = "free")
ggsave(filename = "Supp-A-Fig-1-Random-Cohorting-Countries.jpg", width = 8, height = 4)
####################################
## Comparing cohorting strategies ##
####################################
### Results across all classrooms and countries ###
# Get information on minimal share quarantined
add_quarantine <- res_unclustered %>%
filter(
!is.na(scenario),
indicator == "Excess proportion quarantined",
type == "Gender-split cohorting",
) %>%
group_by(mode, scenario, type, indicator) %>%
summarize(
mean_prob = .125,
max_qua = max(value),
label = paste0("+", mean((1 - share_subclinical_num)/2))
)
res_unclustered %>%
ungroup() %>%
filter(!is.na(scenario)) %>%
filter(type %in% c("Random cohorting", "Gender-split cohorting", "Network chain cohorting", "Optimized cohorting")) %>%
ggplot(aes(x = type, y = value, fill = type, color = type)) +
geom_col(width = 1, position = position_dodge(0.5)) +
geom_errorbar(aes(ymax = conf.high, ymin = conf.low), color = "black", alpha = .7, width = .3) +
geom_text(data = add_quarantine, aes(x = type, y = max_qua, label = label), color = "black", size = 2.5, vjust = -1.5, hjust = 0) +
labs(
x = "Type of intervention",
y = "",
fill = "",
color = "",
caption = "Note: Proportions and 95% confidence intervals. Numbers above excess proportion quarantined indicate
proportion to be added to obtain total proportion quarantined (+ 1/2 of Proportion clinical).
Results across entire parameter space are in Extended Data Figure 2."
) +
scale_color_manual(values = color_values) +
scale_fill_manual(values = color_values) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
) +
facet_nested(indicator~mode + scenario, scales = "free")
ggsave(filename = "Fig-5-Cohorting-Strategies.jpg", width = 7, height = 10)
### Extended Results across all classrooms and countries ###
add_quarantine <- res_unclustered %>%
filter(
indicator == "Excess proportion quarantined",
type == "Gender-split cohorting",
) %>%
group_by(mode, susceptibility, share_subclinical, type, indicator) %>%
summarize(
mean_prob = .125,
max_qua = max(value),
label = paste0("+", mean((1 - share_subclinical_num)/2))
)
res_unclustered %>%
ungroup() %>%
filter(type %in% c("Random cohorting", "Gender-split cohorting", "Network chain cohorting", "Optimized cohorting")) %>%
ggplot(aes(x = type, y = value, fill = type, color = type)) +
geom_col(width = 1, position = position_dodge(0.5)) +
geom_errorbar(aes(ymax = conf.high, ymin = conf.low), color = "black", alpha = .7, width = .3) +
geom_text(data = add_quarantine, aes(x = type, y = max_qua, label = label), color = "black", size = 2.5, vjust = -1.5, hjust = 0) +
labs(
x = "Type of intervention",
y = "",
fill = "",
color = "",
caption = "Note: Proportions and 95% confidence intervals. Numbers above excess proportion quarantined indicate proportion to be added to obtain total proportion quarantined (+ 1/2 of Proportion clinical)."
) +
scale_color_manual(values = color_values) +
scale_fill_manual(values = color_values) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
) +
facet_nested(indicator~mode + susceptibility + share_subclinical, scales = "free")
ggsave(filename = "Ext-Fig-2-Cohorting-Strategies-Space.jpg", width = 14, height = 10)
### Results across all classrooms, by country ###
add_quarantine <- res_unclustered_country %>%
filter(
!is.na(scenario),
indicator == "Excess proportion quarantined",
type == "Gender-split cohorting",
) %>%
group_by(country, mode, scenario, type, indicator) %>%
summarize(
mean_prob = .125,
max_qua = max(value),
label = paste0("+", mean((1 - share_subclinical_num)/2))
)
res_unclustered_country %>%
ungroup() %>%
filter(!is.na(scenario)) %>%
filter(type %in% c("Random cohorting", "Gender-split cohorting", "Network chain cohorting", "Optimized cohorting")) %>%
ggplot(aes(x = type, y = value, fill = type, color = type)) +
geom_col(width = 1, position = position_dodge(0.5)) +
geom_errorbar(aes(ymax = conf.high, ymin = conf.low), color = "black", alpha = .7, width = .3) +
geom_text(data = add_quarantine, aes(x = type, y = max_qua, label = label), color = "black", size = 2.5, vjust = -1.5, hjust = 0) +
labs(
x = "Type of intervention",
y = "",
fill = "",
color = "",
caption = "Note: Proportions and 95% confidence intervals. Numbers above excess proportion quarantined indicate proportion to be added to obtain total proportion quarantined (+ 1/2 of Proportion clinical)."
) +
scale_color_manual(values = color_values) +
scale_fill_manual(values = color_values) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_blank(),
axis.ticks.x = element_blank()
) +
facet_nested(mode + indicator~scenario + country, scales = "free")
ggsave(filename = "Supp-A-Fig-2-Cohorting-Strategies-Countries.jpg", width = 12, height = 16)
#####################################
## Distribution of Large Outbreaks ##
#####################################
res_dist %>%
mutate(quant = "5% Largest\nOutbreaks") %>%
bind_rows(
res_dist %>%
filter(share_inf > `1% Largest Outbreaks`) %>%
mutate(quant = "1% Largest\nOutbreaks")
) %>%
mutate(
quant = fct_relevel(quant, "5% Largest\nOutbreaks")
) %>%
ggplot(aes(x = share_inf, color = type, fill = type)) +
geom_density(alpha = .2) +
facet_nested(quant~mode+scenario) +
labs(
x = "Proportion infected",
y = "Density",
fill = "",
color = ""
) +
lims(
x = c(0, 1)
) +
scale_color_manual(values = color_values) +
scale_fill_manual(values = color_values) +
theme_classic() +
theme(
legend.position = "bottom",
axis.text.x = element_text(angle = 45, hjust = 1)
)
ggsave(filename = "Fig-6-Large-Outbreaks.jpg", width = 10, height = 5)
sink("All-Results-Summary.txt")
res_unclustered %>%
filter(!is.na(scenario)) %>%
arrange(indicator, mode, scenario, type) %>% print(n = Inf)
sink()
|
a71ed44d074236960ebf940c77c2421f6143317f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/seleniumPipes/examples/remoteDr.Rd.R
|
d7f8f72769d8980e61cef4673bb6c890ada47597
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
remoteDr.Rd.R
|
library(seleniumPipes)
### Name: remoteDr
### Title: Create a remote driver
### Aliases: remoteDr
### ** Examples
## Not run:
##D # assume a server is available at the default location.
##D remDr <- remoteDr()
##D remDR %>% go("http://www.google.com") %>%
##D findElement("name", "q") %>%
##D elementSendKeys("R project", key = "enter")
##D # close our browser
##D remDr %>% deleteSession
## End(Not run)
|
67773d85a3c79db04e78885269a0dd3314a64673
|
72cc5f154465b5cac48a934f46f90e5d4eb85927
|
/man/read_10x_data.Rd
|
5c9e2995a10bac7edda17d03ba02dc4df878d038
|
[
"MIT"
] |
permissive
|
asmagen/robustSingleCell
|
3f570ec16b9d04a1ea1ddfc0748f48517dbf48cf
|
f56f0de6307cdd5bab432df896b0e2661b086591
|
refs/heads/master
| 2023-07-19T21:11:40.210850
| 2023-07-16T20:49:55
| 2023-07-16T20:49:55
| 163,871,827
| 16
| 3
|
MIT
| 2020-06-05T16:33:43
| 2019-01-02T17:50:43
|
R
|
UTF-8
|
R
| false
| true
| 379
|
rd
|
read_10x_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_data.R
\name{read_10x_data}
\alias{read_10x_data}
\title{Read 10X Data}
\usage{
read_10x_data(path)
}
\arguments{
\item{path}{Path to directory containing matrix.mtx, genes.tsv, and barcodes.tsv}
}
\value{
a matrix of genes by cells
}
\description{
Load sparse data matrices from 10X genomics.
}
|
55dccdeb03c0c686f7270a71269047112a39a58c
|
497a6fa06fb167f53e531ff546f75cea2ff5ca72
|
/man/loaloa.Rd
|
476846a7860f10a1007b3e1a94f2debfdc4fb6af
|
[] |
no_license
|
cran/geostatsp
|
8ffd90b15240476ec6e12ecc2f3fe629040178d0
|
8a707e53004f5e587df3c7f5813fdd954306781d
|
refs/heads/master
| 2021-10-14T07:08:10.607607
| 2021-10-05T07:10:08
| 2021-10-05T07:10:08
| 17,696,363
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,564
|
rd
|
loaloa.Rd
|
\name{loaloa}
\alias{loaloa}
\alias{elevationLoa}
\alias{eviLoa}
\alias{ltLoa}
\alias{tempLoa}
\docType{data}
\title{
Loaloa prevalence data from 197 village surveys
}
\description{
Location and prevalence data from villages, elevation an vegetation index for the study region.
}
\usage{data("loaloa")}
\format{
\code{loaloa} is a SpatialPolygonsDataFrame of the data, with columns \code{N} being the number
of individuals tested and \code{y} being the number of positives.
\code{elevationLoa} is a raster of elevation data.
\code{eviLoa} is a raster of vegetation index for a specific date. \code{ltLoa} is land type.
\code{ltLoa} is a raster of land types. 1 2 5 6 7 8 9 10 11 12 13 14 15
\code{tempLoa} is a raster of average temperature in degrees C.
}
\source{
\url{http://www.leg.ufpr.br/doku.php/pessoais:paulojus:mbgbook:datasets} for the loaloa data,
\url{https://lpdaac.usgs.gov/product_search/?collections=Combined+MODIS&collections=Terra+MODIS&collections=Aqua+MODIS&view=list} for EVI and land type and
\url{https://srtm.csi.cgiar.org} for the elevation data.
}
\examples{
data("loaloa")
plot(loaloa, main="loaloa villages")
# elevation
plot(elevationLoa, col=terrain.colors(100), main="elevation")
points(loaloa)
# vegetation index
plot(eviLoa, main="evi")
points(loaloa)
plot(tempLoa, main="temperature")
points(loaloa)
# land type, a categorical variable
plot(ltLoa)
if(requireNamespace("mapmisc")){
mapmisc::legendBreaks("bottomleft",ltLoa, bty='n')
}
points(loaloa)
}
\keyword{datasets}
|
ac48c0cae4bebacbfd7e360f320e4f1b7c608d07
|
a10f9853480343c8fde837f4043e2aca5cd6f50f
|
/SIF Plots Project/SIF Plots for Separate Files.R
|
69c96fccd7ed877ef4bf3b690293a386d3dd22ec
|
[] |
no_license
|
mikaylamurphy/imperial-geophysics
|
9fd01a34f512dbb9d250b67e424772aa25db0db2
|
7556231b1b33d4c5a5a2a46dda1618c5c3bade67
|
refs/heads/master
| 2020-04-06T03:38:47.591478
| 2016-08-18T11:12:00
| 2016-08-18T11:12:00
| 63,054,830
| 2
| 0
| null | 2016-08-18T11:04:07
| 2016-07-11T09:28:20
|
R
|
UTF-8
|
R
| false
| false
| 12,120
|
r
|
SIF Plots for Separate Files.R
|
SIF_plot <- function(filepath){
require(scales)
# Converts raw surface area and fracture_sif_data_raw .txt files to dataframes.
all_SA_data <- read.table(paste(filepath,'surface_areas.txt', sep = ""), fill = TRUE, header = FALSE)
all_sif_data_raw <- read.table(paste(filepath, 'fracture_sif_data_raw.txt', sep = ""), fill = TRUE, header = FALSE)
# Removes rows with header names, extraneous starting data, and phi/psi columns from raw SIF data dataframe.
header_rows <- which(apply(all_sif_data_raw, 1, function(x) any(grepl("Step", x))))
all_sif_data_raw <- all_sif_data_raw[-(1:header_rows[1]) ,]
header_rows <- which(apply(all_sif_data_raw, 1, function(x) any(grepl("Step", x))))
all_sif_data_raw <- all_sif_data_raw[-header_rows, -c(11,12)]
# Set column names for raw sif data and surface area dataframes.
colnames(all_sif_data_raw) <- c('Step', 'FractureName', 'TipNr', 'TipX', 'TipY', 'TipZ', 'KI', 'KII', 'KIII', 'G')
colnames(all_SA_data) <- c()
print(head(all_sif_data_raw,120))
# Creating column with fracture number as integers.
data$'FractureNum' <- as.numeric(gsub("[^0-9]", "", data$'FractureName'))
# Converting factor values in data frame to numeric values.
data[,-2] <- lapply(data[,-2], function(x) as.numeric(as.character(x)))
# Making steps consecutive and starting at zero.
uniqueSteps <- unique(data$Step)
data$'Step' <- sapply(data$'Step', function(x) {match(x, uniqueSteps) - 1})
# Creates identifier for each fracture and tip number.
data$'FractureTipID' <- paste(data$'FractureNum', data$'TipNr')
# Calculating total number of fractures and steps.
num_of_fractures <- length(unique(data$'FractureNum'))
num_of_steps <- length(unique(data$'Step'))
# Calculating max number of tips per fracture at final step.
max_tips_per_fracture <- data[data$Step == (num_of_steps - 1),]
max_tips_per_fracture <- aggregate(max_tips_per_fracture[,3], by = list(FractureNum = max_tips_per_fracture$'FractureNum'), FUN = "max")
colnames(max_tips_per_fracture) <- c('FractureNum', 'MaxTipNr')
# Calculating maximum radius of fracture at final step.
fractureRadii <- data[data$Step == (num_of_steps - 1) & data$TipNr < 2,]
fractureRadii <- aggregate(fractureRadii[,c(-2, -14)], by = list(FractureNum = fractureRadii$'FractureNum'), FUN = "diff")
fractureRadii$'HalfDistance' <- sqrt(fractureRadii$'TipX'^2 + fractureRadii$'TipY'^2 + fractureRadii$'TipZ'^2) / 2
fractureRadii$'Angle'[fractureRadii$'FractureNum' %in% max_tips_per_fracture$'FractureNum'] <- (2 * pi)/ max_tips_per_fracture$'MaxTipNr'
fractureRadii$'Radius' <- fractureRadii$'HalfDistance' / sin(fractureRadii$'Angle')
# Calculating max K value based on formula from Nejati's thesis.
fractureRadii$'MaxKValue' <- 2 * 10^11 * sqrt(fractureRadii$'Radius' / pi)
# Removing all columns except FractureNum and Max K Value.
fractureRadii <- fractureRadii[ ,c(1,17)]
# Adding max K values to data dataframe as numeric values.
data$'MaxKValue' <- 0
data$'MaxKValue' <- sapply(data$'FractureNum', function(x) {fractureRadii[x-1,2]})
#data <- merge(fractureRadii, data, by= 'FractureNum', sort = FALSE)
data[,15] <- lapply(data[,15], function(x) as.numeric(as.character(x)))
print(data[720:730,])
# data <- subset(data, abs(data$'KI') < maxKvalue & abs(data$'KII') < maxKvalue & abs(data$'KIII') < maxKvalue & abs(data$'G' < maxKvalue))
# Preparing symbols and their sizes for plot.
pchvalue <- c(20, 3, 2, 17, 8, 15, 18, 1)
pchvalue <- rep(pchvalue, length.out = num_of_fractures)
dev.off()
# Making points from the first step blue and the last step red.
colours <- rep('black', num_of_steps)
colours[1] <- 'blue'
colours[num_of_steps] <- 'red'
# Saves plots as pdf with title as original file name.
filename_no_ext <- substr(filename, 1, nchar(filename)-4)
pdf_name <- paste(filename_no_ext, '_plots.pdf')
pdf(file = pdf_name, title = pdf_name)
plot_name <- unlist(strsplit(pdf_name, '/'))
plot_name <- plot_name[length(plot_name)]
# Creates 2x2 matrix for four raw data figures drawn below (Type I, II, III, and G values).
par(mfrow=c(2,2), oma=c(0,0,3,0))
# Raw data graphs.
plot(data$'TipNr', data$'KI', main = paste("KI"), xlab = 'Tip Number', ylab= "KI SIF Value", pch= pchvalue[data$'FractureNum'], col = colours[data$Step + 1], cex = (data$'Step' + 4)/(num_of_steps+1))
plot(data$'TipNr', data$'KII', main = paste("KII"), xlab = 'Tip Number', ylab= "KII SIF Value", pch= pchvalue[data$'FractureNum'], col = colours[data$Step + 1], cex = (data$'Step' + 4)/(num_of_steps+1))
plot(data$'TipNr', data$'KIII', main = paste("KIII"), xlab = 'Tip Number', ylab= "KIII SIF Value", pch= pchvalue[data$'FractureNum'], col = colours[data$Step + 1], cex = (data$'Step' + 4)/(num_of_steps+1))
plot(data$'TipNr', data$'G', main = paste("G"), xlab = 'Tip Number', ylab= "G Value", pch= pchvalue[data$'FractureNum'], col = colours[data$Step + 1], cex = (data$'Step' + 4)/(num_of_steps+1))
mtext(plot_name, adj=0.5, side=3, outer=TRUE)
# Calculating data statistics (mean, min, max).
data_means <- aggregate(data[,c(-3, -14)], by = list(TipNr = data$'TipNr', FractureNum = data$'FractureNum'), FUN = "mean")
data_mins <- aggregate(data[,c(-3, -14)], by = list(TipNr = data$'TipNr', FractureNum = data$'FractureNum'), FUN = "min")
data_maxs <- aggregate(data[,c(-3,-14)], by = list(TipNr = data$'TipNr', FractureNum = data$'FractureNum'), FUN = "max")
# Creates 2x2 matrix for four mean figures drawn below (Type I, II, III, and G values).
par(mfrow=c(2,2))
# Mean graphs.
plot(data_means$'TipNr', data_means$'KI', main = paste("KI Mean"), xlab = 'Tip Number', ylab= "KI Avg SIF Value", pch= pchvalue[data_means$'FractureNum'])
plot(data_means$'TipNr', data_means$'KII', main = paste("KII Mean"), xlab = 'Tip Number', ylab= "KII Avg SIF Value", pch= pchvalue[data_means$'FractureNum'])
plot(data_means$'TipNr', data_means$'KIII', main = paste("KIII Mean"), xlab = 'Tip Number', ylab= "KIII Avg SIF Value", pch= pchvalue[data_means$'FractureNum'])
plot(data_means$'TipNr', data_means$'G', main = paste("G Mean"), xlab = 'Tip Number', ylab= "Avg G Value", pch= pchvalue[data_means$'FractureNum'])
# Creates 2x2 matrix for four min figures drawn below (Type I, II, III, and G values).
par(mfrow=c(2,2))
# Min graphs.
plot(data_mins$'TipNr', data_mins$'KI', main = paste("KI Minimum Value"), xlab = 'Tip Number', ylab= "KI Min SIF Value", pch= pchvalue[data_mins$'FractureNum'])
plot(data_mins$'TipNr', data_mins$'KII', main = paste("KII Minimum Value"), xlab = 'Tip Number', ylab= "KII Min SIF Value", pch= pchvalue[data_mins$'FractureNum'])
plot(data_mins$'TipNr', data_mins$'KIII', main = paste("KIII Minimum Value"), xlab = 'Tip Number', ylab= "KIII Min SIF Value", pch= pchvalue[data_mins$'FractureNum'])
plot(data_mins$'TipNr', data_mins$'G', main = paste("G Minimum Value"), xlab = 'Tip Number', ylab= "Min G Value", pch= pchvalue[data_mins$'FractureNum'])
# Creates 2x2 matrix for four max figures drawn below (Type I, II, III, and G values).
par(mfrow=c(2,2))
# Max graphs.
plot(data_maxs$'TipNr', data_maxs$'KI', main = paste("KI Max Value"), xlab = 'Tip Number', ylab= "KI Max SIF Value", pch= pchvalue[data_maxs$'FractureNum'])
plot(data_maxs$'TipNr', data_maxs$'KII', main = paste("KII Max Value"), xlab = 'Tip Number', ylab= "KII Max SIF Value", pch= pchvalue[data_maxs$'FractureNum'])
plot(data_maxs$'TipNr', data_maxs$'KIII', main = paste("KIII Max Value"), xlab = 'Tip Number', ylab= "KIII Max SIF Value", pch= pchvalue[data_maxs$'FractureNum'])
plot(data_maxs$'TipNr', data_maxs$'G', main = paste("Max G Value"), xlab = 'Tip Number', ylab= "Max G Value", pch= pchvalue[data_maxs$'FractureNum'])
# Calculating difference from first step at which fracture + tip appear (generally step 0).
data_diff_from_step_0 <- within(data, KI <- ave(KI, list(FractureTipID), FUN=function(x) x-x[1]))
data_diff_from_step_0 <- within(data_diff_from_step_0, KII <- ave(KII, list(FractureTipID), FUN=function(x) x-x[1]))
data_diff_from_step_0 <- within(data_diff_from_step_0, KIII <- ave(KIII, list(FractureTipID), FUN=function(x) x-x[1]))
data_diff_from_step_0 <- within(data_diff_from_step_0, G <- ave(G, list(FractureTipID), FUN=function(x) x-x[1]))
# Creates 2x2 matrix for four difference from step 0 figures drawn below (Type I, II, III, and G values).
par(mfrow=c(2,2))
# Difference in KI, KII, KIII, and G values for each step from step 0 at each tip graphs.
plot(data_diff_from_step_0$'TipNr', data_diff_from_step_0$'KI', main = paste("Difference in KI values from Step 0"), xlab = 'Tip Number', ylab= "Delta KI SIF Value", pch= pchvalue[data_diff_from_step_0$'FractureNum'], col = colours[data_diff_from_step_0$Step + 1], cex = (data_diff_from_step_0$'Step' + 4)/(num_of_steps+1))
plot(data_diff_from_step_0$'TipNr', data_diff_from_step_0$'KII', main = paste("Difference in KII values from Step 0"), xlab = 'Tip Number', ylab= "Delta KII SIF Value", pch= pchvalue[data_diff_from_step_0$'FractureNum'], col = colours[data_diff_from_step_0$Step + 1], cex = (data_diff_from_step_0$'Step' + 4)/(num_of_steps+1))
plot(data_diff_from_step_0$'TipNr', data_diff_from_step_0$'KIII', main = paste("Difference in KIII values from Step 0"), xlab = 'Tip Number', ylab= "Delta KIII SIF Value", pch= pchvalue[data_diff_from_step_0$'FractureNum'], col = colours[data_diff_from_step_0$Step + 1], cex = (data_diff_from_step_0$'Step' + 4)/(num_of_steps+1))
plot(data_diff_from_step_0$'TipNr', data_diff_from_step_0$'G', main = paste("Difference in G values from Step 0"), xlab = 'Tip Number', ylab= "Delta G Value", pch= pchvalue[data_diff_from_step_0$'FractureNum'], col = colours[data_diff_from_step_0$Step + 1], cex = (data_diff_from_step_0$'Step' + 4)/(num_of_steps+1))
# Calculating difference from previous step.
data_diff_from_prev_step <- within(data, KI <- ave(KI, list(FractureTipID), FUN=function(x) c(0, diff(x))))
data_diff_from_prev_step<- within(data_diff_from_prev_step, KII <- ave(KII, list(FractureTipID), FUN=function(x) c(0, diff(x))))
data_diff_from_prev_step<- within(data_diff_from_prev_step, KIII <- ave(KIII, list(FractureTipID), FUN=function(x) c(0, diff(x))))
data_diff_from_prev_step <- within(data_diff_from_prev_step, G <- ave(G, list(FractureTipID), FUN=function(x) c(0, diff(x))))
# Creates 2x2 matrix for four difference from previous step figures drawn below (Type I, II, III, and G values).
par(mfrow=c(2,2))
# Difference in KI, KII, KIII, and G values for each step from previous step at each tip graphs.
plot(data_diff_from_prev_step$'TipNr', data_diff_from_prev_step$'KI', main = paste("Difference in KI values from \nPrevious Step"), xlab = 'Tip Number', ylab= "Delta KI SIF Value", pch= pchvalue[data_diff_from_prev_step$'FractureNum'], col = colours[data_diff_from_prev_step$Step + 1], cex = (data_diff_from_prev_step$'Step' + 4)/(num_of_steps+1))
plot(data_diff_from_prev_step$'TipNr', data_diff_from_prev_step$'KII', main = paste("Difference in KII values from \nPrevious Step"), xlab = 'Tip Number', ylab= "Delta KII SIF Value", pch= pchvalue[data_diff_from_prev_step$'FractureNum'], col = colours[data_diff_from_prev_step$Step + 1], cex = (data_diff_from_prev_step$'Step' + 4)/(num_of_steps+1))
plot(data_diff_from_prev_step$'TipNr', data_diff_from_prev_step$'KIII', main = paste("Difference in KIII values from \nPrevious Step"), xlab = 'Tip Number', ylab= "Delta KIII SIF Value", pch= pchvalue[data_diff_from_prev_step$'FractureNum'], col = colours[data_diff_from_prev_step$Step + 1], cex = (data_diff_from_prev_step$'Step' + 4)/(num_of_steps+1))
plot(data_diff_from_prev_step$'TipNr', data_diff_from_prev_step$'G', main = paste("Difference in G values from \nPrevious Step"), xlab = 'Tip Number', ylab= "Delta G Value", pch= pchvalue[data_diff_from_prev_step$'FractureNum'], col = colours[data_diff_from_prev_step$Step + 1], cex = (data_diff_from_prev_step$'Step' + 4)/(num_of_steps+1))
# Stops writing to pdf.
dev.off()
}
|
c49f6465c27616c85eb0c45f163bb09aa12de6f9
|
f0c6cb1107da4697db0bbb8786c8adc0211a7a04
|
/r-scripts/plotMethodFreqPolyByMetricsToPngFiles.R
|
24b65e1f8805ab979b8145266e9b1545af069b46
|
[] |
no_license
|
alexil-ferreira/SmellRafactored
|
fb857f1c6aa1f2f2e162f8917baa5de816413fcd
|
9efd2521a91bb4a3230a723729d7a829bf185721
|
refs/heads/master
| 2020-09-07T08:56:18.340667
| 2020-04-12T09:17:34
| 2020-04-12T09:17:34
| 220,729,915
| 0
| 0
| null | 2019-12-29T21:14:28
| 2019-11-10T02:14:20
| null |
UTF-8
|
R
| false
| false
| 389
|
r
|
plotMethodFreqPolyByMetricsToPngFiles.R
|
rm(list = ls())
library(rstudioapi)
source(paste(dirname(getActiveDocumentContext()$path), "/common.R", sep="", collapse=NULL))
source(paste(dirname(getActiveDocumentContext()$path), "/plotMethodFreqPolyByMetricsToPngFile-function.R", sep="", collapse=NULL))
setupWorkDir()
deepenForDesignRole <- FALSE
plotMethodFreqPolyByMetricsitFromDirToPngFiles(getWorkDir(), deepenForDesignRole)
|
b63c4e0134a029298917d2fff1f578f6eeca67d6
|
0ac26fb6235ef0d7b25ef7b003822f08f1ffe9e7
|
/man/classify.Rd
|
6ffba51e18dbe4bc340993ccb3e8b67e4ae5ecab
|
[] |
no_license
|
rscherrer/nmgc
|
5e9fbc0d6eeb0c6fdad1a38b45dfe3b37e919371
|
55bb75eab4c62b74c598d17ead6f448bd53907f8
|
refs/heads/master
| 2023-02-09T10:04:14.770149
| 2020-12-29T18:18:18
| 2020-12-29T18:18:18
| 261,246,435
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,141
|
rd
|
classify.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classify.R
\name{classify}
\alias{classify}
\title{Classification analysis}
\usage{
classify(
data,
variables,
grouping,
nesting = NULL,
method = "SVM",
k = 5,
nrep = 1,
nperm = 0,
minsize = 5,
seed = NULL,
importance = FALSE,
getmachine = FALSE,
verbose = TRUE,
pb = TRUE,
digest = TRUE,
topcomp = NULL,
pccenter = TRUE,
pcscale = TRUE,
showconf = TRUE,
showbinom = TRUE,
showpval = TRUE,
cnorm = 2,
clims = c(0, 1),
clow = "white",
chigh = "darkgreen",
hbins = 30,
hfill = "seagreen",
halpha = 0.5,
cxlim = c(0.75, 1),
cylim = c(0.3, 0.85),
blty = 1,
prounding = 4,
ptoshow = "prandom",
psignif = 0.05,
px = 1,
py = 0.9,
phjust = 1,
psize = 3
)
}
\arguments{
\item{data}{A data frame}
\item{variables}{The variables used to classify}
\item{grouping}{Name of the grouping variable (the labels)}
\item{nesting}{Optional nesting variable, if the analysis must be conducted separately on different subsets of the data}
\item{method}{The data mining model used. Currently supports "SVM" and "LDA".}
\item{k}{Number of bins for the k-fold cross-validation procedure}
\item{nrep}{Number of replicate analyses (i.e. number of k-fold cross validations)}
\item{nperm}{Number of permutations in the randomization test. Use 0 to not conduct a randomization test.}
\item{minsize}{Minimum size required per group for a training data set}
\item{seed}{Optional random seed to reset at the beginning}
\item{importance}{Whether to perform sensitivity analysis on the input (takes a while)}
\item{getmachine}{Whether to return the machines (takes space)}
\item{verbose}{Whether to display messages}
\item{pb}{Whether to display progress bars}
\item{digest}{Whether to return the results in a summarized format. If FALSE, returns the raw results for each machine.}
\item{topcomp}{Variable to perform PCA on}
\item{pccenter}{Center the PCA}
\item{pcscale}{Scale the PCA}
\item{showconf}{Whether to show confusion matrices as insets on accuracy histograms}
\item{showbinom}{Whether to show a binomial null distribution on accuracy histograms}
\item{showpval}{Whether to show P-values on accuracy histograms}
\item{cnorm}{Integer indicating whether to normalize the confusion matrices on display so as to make rows sum to one (1), or columns (2), or neither (0).}
\item{clims}{Limits of the range of frequencies displayed in the confusion matrices}
\item{clow}{Color associated with the lowest frequency in confusion matrix heatmaps}
\item{chigh}{Color associated with the highest frequency in confusion matrix heatmaps}
\item{hbins}{Number of bins in the histogram of accuracy scores}
\item{hfill}{Color of the histogram of accuracy scores}
\item{halpha}{Transparency of the histogram of accuracy scores}
\item{cxlim}{Vector of two values containing the bounds of the inset confusion matrices along the horizontal axis}
\item{cylim}{Vector of two values containing the bounds of the inset confusion matrices along the vertical axis (in proportion of the height of the plot)}
\item{blty}{Line type for displaying the null binomial distribution}
\item{prounding}{Number of decimal places to round P-values on display}
\item{ptoshow}{What P-value to show on the histogram plots (either of "pbinom" for the binomial test or "prandom" for the randomization test)}
\item{psignif}{Significance level for P-values on display. An asterisk will be added to each significant P-value. Use zero to avoid displaying any asterisk.}
\item{px}{Horizontal location of the P-values}
\item{py}{Vertical location of the P-values (in proportion of the height of the plot)}
\item{phjust}{Horizontal justification of the P-values (e.g. 1 to align them to the right, 0 to the left and 0.5 to center them)}
\item{psize}{Font size of the P-values on display}
}
\value{
If \code{digest} is FALSE, this function returns a nested list of raw classification results on three levels. The first level is for each separate plot, or nesting level, in the nested analysis. The second level is for each replicate analysis within each plot. The third level is for each machine, i.e. each cross-validation bin within each replicate. This third level is itself a list with for each machine, the confusion matrix from the classification (\code{conf}), a vector of importance scores for each variable from the sensitivity analysis (\code{imp}, only if \code{importance} is TRUE) and the trained machine itself (\code{machine}, only if \code{getmachine} is TRUE). These are the raw results for each machine. If \code{digest} is TRUE, however, the function returns a summarized version of the results. The output is then a list with three fields. The first field is a summary table (\code{summary}) of the results with, for each nesting level, the mean accuracy score (\code{accu}), the sample size (\code{n}, the total number of points tested within each replicate), the proportion of the data used for testing (\code{ptest}, which depends on \code{k}), the number of points tested by each machine (\code{ntest}), the P-value from a binomial test assessing the significance of the average accuracy score (\code{pbinom}) and the P-value from an equivalent randomization test (\code{prandom}), where the null distribution is computed by training \code{nperm} replicates on permuted data. There are three additional list-columns with, for each nesting level, the average confusion matrix over all replicates (\code{conf}), a data frame of importance scores (\code{imp}) for each variable (in columns) for each machine (in rows), and a vector of acccuracy scores (\code{accus}) where the \code{nrep} first values are for the replicates and the remaining \code{nperm} were measured on randomized data. Note that accuracy scores are measured by summing the confusion matrices of all cross-validation bins into one, yielding one score per replicate.
}
\description{
Perform a replicated classification analysis of a multivariate dataset into categorical labels using machine learning tools and k-fold cross validation
}
|
eee67feddc49a563b67d3a5a050a5ac2c02064dc
|
81c4acf23d5db8910522cdc0caab8e6a7ba5cc31
|
/Random Forest_Final.R
|
6a7404555ec33b6a3857aaf3bcef9682996d8ae4
|
[] |
no_license
|
ruhulali/R_Codes
|
ff2d12dc6450ae1da748c4df6ab51600dd48e7aa
|
e2b3b3f090e7fd8a43746ed29e750b023035b3f1
|
refs/heads/master
| 2021-06-08T06:44:39.003256
| 2021-04-23T16:21:16
| 2021-04-23T16:21:16
| 158,611,318
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,947
|
r
|
Random Forest_Final.R
|
setwd("Z:/CT-Mum/Cello Health/170223 Segmentation/Working/CHAID and Random Forest")
# --------------------------------Data Preprocessing ------------------------------
data3 <- read.csv("UC+CD for JAK.csv", header=T, sep = ",")
data3$Q2 = as.factor(data3$Q2)
data3$p0 = as.factor(data3$p0)
data3$p1 = as.factor(data3$p1)
data3$p2 = as.factor(data3$p2)
data3$p21a = as.factor(data3$p21a)
str(data3)
data3 <- data3[,-c(2:18)]
# --------------------------------- Random Forest-------------------------
# Installing Required Packages
# install.packages("party")
# install.packages("randomForest")
# Load the party package. It will automatically load other required packages.
library(party)
library(randomForest)
#Find the optimal mtry value #Select mtry value with minimum out of bag(OOB) error.
mtry <- tuneRF(data3[-1],data3$Q2, ntreeTry=1000,stepFactor=1.5,improve=0.01, trace=TRUE, plot=TRUE)
best.m <- mtry[mtry[, 2] == min(mtry[, 2]), 1]
print(mtry)
print(best.m)
# Plotting both Test Error and Out of Bag Error
#matplot(1:mtry , cbind(oob.err,test.err), pch=19 , col=c("red","blue"),type="b",ylab="Mean Squared Error",xlab="Number of Predictors Considered at each Split")
#legend("topright",legend=c("Out of Bag Error","Test Error"),pch=19, col=c("red","blue"))
# Creating the forest
output.forest <- randomForest(Q2 ~ ., ntree = 1000,importance=TRUE, data = data3, mtry=15)
#getTree(output.forest, 1)
# Plot
plot(output.forest)
# View the forest results.
print(output.forest)
# Importance of each predictor.
print(importance(output.forest,type = 2))
print(importance(output.forest,type = 1))
# Variable Importance Plot
varImpPlot(output.forest,sort = T,main="Variable Importance", n.var=15)
# Variable Importance Table
# MeanGini
var.imp1 <- data.frame(importance(output.forest,type=2))
var.imp1$Variables <- row.names(var.imp1)
Mean_Gini = var.imp1[order(var.imp1$MeanDecreaseGini,decreasing = T),]
capture.output(Mean_Gini, file = "Mean_Gini_excluding_q3.csv", append = FALSE)
# MeanAccuracy
var.imp2 <- data.frame(importance(output.forest,type=1))
var.imp2$Variables <- row.names(var.imp2)
Mean_Accuracy = var.imp2[order(var.imp2$MeanDecreaseAccuracy,decreasing = T),]
capture.output(Mean_Accuracy, file = "Mean_Accuracy_excluding_q3.csv", append = FALSE)
# ------------------ CHAID nahi yeh CART hai ------------------
library(rpart)
library(rpart.plot)
library(RColorBrewer)
library(party)
library(partykit)
library(caret)
library(grid)
set.seed(123)
ctrl<- ctree_control(mincriterion = 0.05, minsplit = 50, minbucket = 25)
fit <- ctree(Q2~ ., data=data3, control=ctrl)
print(fit)
plot(fit,main="Conditional Inference Tree")
#Tree using rpart
tree.1 <- rpart(Q2~ .,data=data3,control=rpart.control(minsplit=50, minbucket = 25,cp=0))
plot(tree.1)
text(tree.1)
prp(tree.1)
|
a041b44ca573c8b8195bda743975f5f210fc5fc2
|
864de5871194247f7ec4319afed1f6b413601db1
|
/man/is_adjust.Rd
|
f993ad90ee6e08f415b3ba9ef13e982d24504fbb
|
[
"MIT"
] |
permissive
|
han-tun/g2r
|
d3762b82277cdf5d397aa8016608b892f41914bd
|
a48baf1fcceacef5c9f960b52d6054f5fa8d5c70
|
refs/heads/master
| 2023-07-26T07:38:34.951377
| 2021-09-06T19:57:30
| 2021-09-06T19:57:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
is_adjust.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adjust.R
\name{is_adjust}
\alias{is_adjust}
\title{Adjust Check}
\usage{
is_adjust(x)
}
\arguments{
\item{x}{Object to check.}
}
\value{
A boolean.
}
\description{
Checks whether the object is of class \code{adjust},
as returned by \code{\link[=adjust]{adjust()}}.
}
\examples{
\dontrun{
is_adjust(1)
is_adjust(adj("stack"))
}
}
\keyword{internal}
|
97c79f9212de5ff8d6d7ce1ff9bbaf41c7035ab7
|
24f85e94fd44a3648663c2e21ae8f3dd7b4834e0
|
/examples/relationships.R
|
db623e9d67573329245e2ffba7988e69a183ed9a
|
[
"Apache-2.0"
] |
permissive
|
rosette-api/R-Binding
|
ec86e7ff8a2cb35417421b42022aadccf20ed482
|
83900247dd91c7c38ae4369126423cd4cf6a5cac
|
refs/heads/develop
| 2023-07-09T16:07:18.564894
| 2023-06-23T17:48:51
| 2023-06-23T17:48:51
| 57,313,317
| 5
| 10
|
Apache-2.0
| 2023-03-30T15:01:19
| 2016-04-28T15:39:42
|
R
|
UTF-8
|
R
| false
| false
| 1,368
|
r
|
relationships.R
|
source("../R/Api.R")
library(jsonlite)
library(optparse)
option_list <- list(
make_option(c("-k", "--key"),
action = "store", default = NA, type = "character",
help = "Rosette API key"),
make_option(c("-u", "--url"),
action = "store", default = NA, type = "character",
help = "Rosette API url"))
opt_parser <- OptionParser(option_list = option_list)
opt <- parse_args(opt_parser)
relationships_text_data <- "FLIR Systems is headquartered in Oregon and produces thermal imaging, night vision, and infrared cameras and sensor systems. According to the SEC’s order instituting a settled administrative proceeding, FLIR entered into a multi-million dollar contract to provide thermal binoculars to the Saudi government in November 2008. Timms and Ramahi were the primary sales employees responsible for the contract, and also were involved in negotiations to sell FLIR’s security cameras to the same government officials. At the time, Timms was the head of FLIR’s Middle East office in Dubai."
parameters <- list()
parameters[["content"]] <- relationships_text_data
if (is.na(opt$url)) {
result <- api(opt$key, "relationships", parameters)
} else {
result <- api(opt$key, "relationships", parameters, NULL, NULL, opt$url)
}
print(jsonlite::toJSON(result$header, pretty = TRUE))
print(jsonlite::toJSON(result$content, pretty = TRUE))
|
d6805590368cfe41b671c352ad0e23c9532e6e74
|
83bfc2ffa4b4e28c1c6ea877c204931980a3e99d
|
/reports/proposed_GCTA_paper/est_var_analysis/est_combined_data/test_result_sparse_decorr.R
|
127290b1a33b7b07053e0ad9d3cf812ed0298e6e
|
[] |
no_license
|
wal615/prime_project
|
0d555626292a713d94700e565363681e2e2e514e
|
8a85b47ecbcaf4419ca33588fd607019226bf3ca
|
refs/heads/master
| 2022-07-04T20:58:33.789355
| 2020-05-05T20:13:16
| 2020-05-05T20:13:16
| 111,431,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,038
|
r
|
test_result_sparse_decorr.R
|
# Testing the result of the second decorrelation method
## load the dateset
library(R.utils)
library(MASS)
library(tidyverse)
library(foreach)
library(doRNG)
library(doParallel)
library(gtools) # for rbind based on columns
options(warn = 1, error = bettertrace::stacktrace)
setwd("~/dev/projects/Chen_environmental_study/")
sourceDirectory("./R_code/main_fn/",modifiedOnly = FALSE, recursive = TRUE)
sourceDirectory("./R_code/main_fn/method/",modifiedOnly = FALSE, recursive = TRUE)
source("./R_code/simulation_proposed_GCTA/local_helpers.R")
X_orignal <- read.csv("~/dev/projects/Chen_environmental_study/R_code/data/real_data/NHANES/PCB_99_14/clean/individual/PCB_1999_2004_common.csv", header = T, stringsAsFactors = F)
X_total <- X_orignal %>% std_fn(.) %>% add_inter(.)
cov_h <- cov(X_total)
set.seed(1234)
par(mfrow=c(1,1))
X_sample <- X_orignal[sample(1:nrow(X_total), 150, replace = F),] %>% std_fn(.) %>% add_inter(.)
cor(X_sample) %>% offdiag(.) %>% hist(., nclass = 40, main = "Histogram of correlations of PCBs with sample size 150")
X_decor1 <- X_sample %*% invsqrt(cov_h)
cor_sample <- (X_decor1) %>% cor(.)
par(mfrow=c(2,2))
cor_sample %>% offdiag(.) %>% hist(., nclass = 40, main = "historical decor")
cor_sample %>% offdiag(.) %>% summary(.)
# adding sparse covariance eistmation
X_decor2 <- dgpGLASSO_method(X_decor1, rho = 0.1)$uncorr_data
cor_sample <- (X_decor2) %>% cor(.)
cor_sample %>% offdiag(.) %>% hist(., nclass = 40, main = "sparse decor 0.1")
cor_sample %>% offdiag(.) %>% summary(.)
# adding sparse covariance eistmation
X_decor2 <- dgpGLASSO_method(X_decor1, rho = 0.01)$uncorr_data
cor_sample <- (X_decor2) %>% cor(.)
cor_sample %>% offdiag(.) %>% hist(., nclass = 40, main = "sparse decor 0.01")
cor_sample %>% offdiag(.) %>% summary(.)
# adding sparse covariance eistmation
X_decor2 <- dgpGLASSO_method(X_decor1, rho = 0.005)$uncorr_data
cor_sample <- (X_decor2) %>% cor(.)
cor_sample %>% offdiag(.) %>% hist(., nclass = 40, main = "sparse decor 0.005")
cor_sample %>% offdiag(.) %>% summary(.)
|
af4d0073ee2c48030fefd9df28baa94007a80a26
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggfortify/examples/fortify.spec.Rd.R
|
52aed9ebc6a1a910378a81adde85e3646bdf81b7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
fortify.spec.Rd.R
|
library(ggfortify)
### Name: fortify.spec
### Title: Convert 'stats::spec' to 'data.frame'
### Aliases: fortify.spec
### ** Examples
fortify(spectrum(AirPassengers))
fortify(stats::spec.ar(AirPassengers))
fortify(stats::spec.pgram(AirPassengers))
|
96dca08864c7f177dd340e0422f9ad6a3df70707
|
50342b2c958d45d1b011c06a35927f41f27a08ab
|
/R/colorio-package.R
|
870b84dca7ae7e47758dae858ba6b8a60a3b4113
|
[
"MIT"
] |
permissive
|
ijlyttle/colorio
|
dcb04d06c753f9cf956116849ddd283edba30902
|
9a97109a9ede1ed2636d778739fd20eefef503d2
|
refs/heads/master
| 2023-06-15T19:37:57.949424
| 2021-05-31T17:49:09
| 2021-05-31T17:49:09
| 305,543,997
| 3
| 0
|
NOASSERTION
| 2021-07-12T22:51:23
| 2020-10-20T00:12:47
|
R
|
UTF-8
|
R
| false
| false
| 462
|
r
|
colorio-package.R
|
#' colorio: package to wrap colorio Python package
#'
#' This package offers low-level access to the
#' [colorio](https://github.com/nschloe/colorio) Python package, using the
#' [reticulate](https://rstudio.github.io/reticulate) package.
#'
#' The immediate motivation is to provide R users wuth access to the wide
#' array of color spaces available in colorio, including many newer ones like
#' CIECAM02, CAM16, and Jzazbz.
#'
#' @name colorio-package
#'
NULL
|
f1efbb2473e9a85e48b31e96d7c7e5c244f4320a
|
9aff6353f925fbe673f73dffbecd5b7519595211
|
/R/dbFrame-methods.R
|
206be9ecea7def729c371ab2be2e0633f6677fde
|
[] |
no_license
|
lmweber/CATALYST
|
476aecf91d0917cde99459b6a6c973e34d6acf4a
|
ff2ec01779a66068e79204b7d7e003d01dcd7af0
|
refs/heads/master
| 2020-03-12T23:47:29.003394
| 2018-04-24T08:26:32
| 2018-04-24T08:26:32
| 130,873,760
| 1
| 0
| null | 2018-04-24T15:14:04
| 2018-04-24T15:14:04
| null |
UTF-8
|
R
| false
| false
| 7,300
|
r
|
dbFrame-methods.R
|
# ==============================================================================
# Accessor and replacement methods for class dbFrame
# ------------------------------------------------------------------------------
#' @rdname dbFrame-methods
#' @title
#' Extraction and replacement methods for objects of class \code{dbFrame}
#' @aliases
#' dbFrame-methods bc_key bc_ids deltas normed_bcs mhl_dists
#' sep_cutoffs sep_cutoffs<- mhl_cutoff mhl_cutoff<- counts yields
#'
#' @description
#' Methods for replacing and accessing slots in a \code{\link{dbFrame}}.
#' @return
#' \describe{
#' \item{\code{exprs}}{extracts the raw data intensities.}
#' \item{\code{bc_key}}{extracts the barcoding scheme.}
#' \item{\code{bc_ids}}{extracts currently made event assignments.}
#' \item{\code{deltas}}{extracts barcode separations computed from normalized
#' intensities. \code{sep_cutoffs} apply to these values
#' (see \code{\link{applyCutoffs}}).}
#' \item{\code{normed_bcs}}{extracts normalized barcode intensities
#' (see \code{\link{assignPrelim}}).}
#' \item{\code{sep_cutoffs}, \code{sep_cutoffs<-}}{extracts or replaces
#' separation cutoffs. If option \code{sep_cutoffs} is not specified, these will
#' be used by \code{\link{applyCutoffs}}. Replacement value must be a non-
#' negative numeric with length one or same length as the number of barcodes.}
#' \item{\code{mhl_cutoff}, \code{mhl_cutoff<-}}{extracts or replaces the
#' Mahalanobis distance threshold above which events are to be unassigned.
#' Replacement value must be a single non-negative and non-zero numeric.}
#' \item{\code{counts}}{extract the counts matrix (see \code{\link{dbFrame}}).}
#' \item{\code{yields}}{extract the yields matrix (see \code{\link{dbFrame}}).}
#' }
#' @param x,object a \code{\link{dbFrame}}.
#' @param value the replacement value.
#'
#' @author Helena Lucia Crowell \email{crowellh@student.ethz.ch}
#'
#' @examples
#' data(sample_ff, sample_key)
#' re <- assignPrelim(x = sample_ff, y = sample_key)
#'
#' # set global cutoff parameter
#' sep_cutoffs(re) <- 0.4
#' re <- applyCutoffs(x = re)
#'
#' # subset a specific population, e.g. A1: 111000
#' a1 <- bc_ids(re) == "A1"
#' head(exprs(sample_ff[a1, ]))
#'
#' # subset unassigned events
#' unassigned <- bc_ids(re) == 0
#' head(exprs(sample_ff[unassigned, ]))
# ------------------------------------------------------------------------------
setMethod(f="exprs",
signature="dbFrame",
definition=function(object) return(object@exprs))
#' @rdname dbFrame-methods
setMethod(f="bc_key",
signature="dbFrame",
definition=function(x) return(x@bc_key))
#' @rdname dbFrame-methods
setMethod(f="bc_ids",
signature="dbFrame",
definition=function(x) return(x@bc_ids))
#' @rdname dbFrame-methods
setMethod(f="deltas",
signature="dbFrame",
definition=function(x) return(x@deltas))
#' @rdname dbFrame-methods
setMethod(f="normed_bcs",
signature="dbFrame",
definition=function(x) return(x@normed_bcs))
#' @rdname dbFrame-methods
setMethod(f="mhl_dists",
signature="dbFrame",
definition=function(x) return(x@mhl_dists))
#' @rdname dbFrame-methods
setMethod(f="sep_cutoffs",
signature="dbFrame",
definition=function(x) return(x@sep_cutoffs))
#' @rdname dbFrame-methods
setMethod(f="mhl_cutoff",
signature="dbFrame",
definition=function(x) return(x@mhl_cutoff))
#' @rdname dbFrame-methods
setMethod(f="counts",
signature="dbFrame",
definition=function(x) return(x@counts))
#' @rdname dbFrame-methods
setMethod(f="yields",
signature="dbFrame",
definition=function(x) return(x@yields))
# ==============================================================================
# Replace method for slot 'bc_ids' (only used internally)
# ------------------------------------------------------------------------------
setReplaceMethod(f="bc_ids",
signature=signature(x="dbFrame"),
definition=function(x, value) {
valid_ids <- c(0, rownames(bc_key(x)))
if (!any(value %in% valid_ids)) {
invalid <- value[!value %in% valid_ids]
if (length(invalid) == 1)
stop("\n", invalid, " is not a valid barcode ID.",
"\n'bc_ids' should be either 0 = \"unassigned\"",
"\nor occur as rownames in the 'bc_key'.")
if (length(invalid) > 1)
stop("\nBarcode IDs ", paste0(invalid, collapse=", "),
" are invalid.\n'bc_ids' should be either 0 = \"",
"unassigned\"\nor occur as rownames in the 'bc_key'.")
}
x@bc_ids <- value
return(x)
})
# ==============================================================================
# Replace method for slot 'mhl_dists' (only used internally)
# ------------------------------------------------------------------------------
setReplaceMethod(f="mhl_dists",
signature=signature(x="dbFrame", value="numeric"),
definition=function(x, value) {
x@mhl_dists <- value
return(x)
})
# ==============================================================================
# Replace method for slot 'mhl_cutoff'
# ------------------------------------------------------------------------------
#' @rdname dbFrame-methods
#' @export
setReplaceMethod(f="mhl_cutoff",
signature=signature(x="dbFrame", value="numeric"),
definition=function(x, value) {
if (length(value) != 1)
stop("Replacement value must be of length one.")
if (any(value < 0))
stop("Replacement value must be non-negative.")
if (value == 0)
stop("Applying this cutoff will have all events unassigned.")
x@mhl_cutoff <- value
return(x)
})
#' @rdname dbFrame-methods
#' @export
setReplaceMethod(f="mhl_cutoff",
signature=signature(x="dbFrame", value="ANY"),
definition=function(x, value) {
stop("Replacement value must be a non-negative numeric of length one.")
})
# ==============================================================================
# Replace method for slot 'sep_cutoffs'
# ------------------------------------------------------------------------------
#' @rdname dbFrame-methods
#' @export
setReplaceMethod(f="sep_cutoffs",
signature=signature(x="dbFrame", value="numeric"),
definition=function(x, value) {
if (any(value < 0))
stop("Replacement value(s) must be non-negative.")
if (length(value) == 1) {
x@sep_cutoffs <- rep(value, nrow(bc_key(x)))
} else if (length(value) == nrow(bc_key(x))) {
x@sep_cutoffs <- value
} else {
stop("'Replacement value' must be of length one\n or same length",
" as the number of rows in the 'bc_key'.")
}
names(x@sep_cutoffs) <- rownames(bc_key(x))
return(x)
})
#' @rdname dbFrame-methods
#' @export
setReplaceMethod(f="sep_cutoffs",
signature=signature(x="dbFrame", value="ANY"),
definition=function(x, value) {
stop("Replacement value must be a non-negative numeric with length one",
"\n or same length as the number of rows in the 'bc_key'.")
})
|
5c6c7d38beaf387da682c78d05d72a4ad5d59f32
|
bcaf8ba8ae9c6edef2716abef39c1103b2e94f73
|
/get_poem.R
|
4bb457bd6f308f317c9f1ecaa77a11776bdd44ef
|
[
"MIT"
] |
permissive
|
Broccolito/Keyword_poet
|
1dc9a35e2fd0fc90bc266c2ae0cd36863fe635c8
|
74e02d95b3057e3340cab8c46dc563712825c55b
|
refs/heads/master
| 2020-04-13T05:59:08.864233
| 2019-01-07T15:46:12
| 2019-01-07T15:46:12
| 163,008,614
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
get_poem.R
|
get_poem = function(keyword, multiple = FALSE){
keyword = as.character(keyword)
if(!require("rvest")){
install.packages("rvest")
library("rvest")
}
get_pozhe = function(){
return(
unlist(strsplit(
(html_nodes(read_html("https://so.gushiwen.org/search.aspx?value=%E7%A7%8B%E5%A4%A9"),
"textarea")[1] %>% as.character())
, ""))[317]
)
}
pozhe = get_pozhe()
base_url = paste0("https://so.gushiwen.org/search.aspx?value=", keyword)
poem_nodes = html_nodes(read_html(base_url), "textarea")
poems = vector()
for(i in 1:length(poem_nodes)){
tryCatch({
poem_node = as.character(poem_nodes[i])
temp = unlist(strsplit(poem_node, ">"))[2]
temp = unlist(strsplit(temp, "https"))[1]
poems[i] = unlist(strsplit(temp, pozhe))[1]
}, error = function(e){
return(NULL)
})
}
if(multiple){
return(poems[])
}else{
return(poems[1])
}
}
|
0db6e442eb8dd10afed5d6115a2b4b3f2dd9bfc0
|
717c5e4b503c3cbc0349d359885253b8f98fca61
|
/adam2.r
|
943e202b360377760cf0b68f31b4471dd9094f8c
|
[] |
no_license
|
kwende/RScripts
|
b28f67e1b3c20dee974efdc57e482bc98080e9c4
|
ea8773aaf6cea0eb27abbdeaad8606aa729f2d36
|
refs/heads/master
| 2016-09-06T11:47:49.956693
| 2014-12-13T22:07:59
| 2014-12-13T22:07:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,766
|
r
|
adam2.r
|
library(bbmle)
sigmoid = function(x,a,d,b,x0){
if(a < 0) a = 0;
if(a > 1) a = 1;
if(d < 0) d = 0;
if(d > 1) d = 1;
if(b < 0) b = 0;
ret = ((a-d)*(1+exp(-b*x0)))/(1+exp(b*(x-x0))) + d
if(ret < 0){
print(c(a,d,b,x0));
ret = 0;
}
if(ret > 1){
print(c(a,d,b,x0));
ret = 1;
}
return(ret)
}
PM3 = function(x,a1,b1,x01,d2,b2,x02,c){
if(x01 >= x02) return(0)
ret = c * sigmoid(x,a1,1,b1,x01) * sigmoid(x,1,d2,b2,x02);
return(ret)
}
PM2 = function(x,a,d,b,x0){
ret = sigmoid(x,a,d,b,x0)
return(ret);
}
PM3Likelihood = function(values,data,a1,b1,x01,d2,b2,x02,c){
sum = 0
for(i in 1:length(values)){
environ = data[i];
isFound = values[i]
prob = PM3(environ,a1,b1,x01,d2,b2,x02,c);
val = 0;
if(prob>0 && prob<1){
val = -log10(prob^isFound * (1-prob)^(1-isFound))
}
else if(prob <= 0){
val = 10000000000
}
else if(prob >=1){
val = -10000000000
}
sum = sum + val
}
return(sum)
}
PM2Likelihood = function(values,data,a,d,b,x0){
sum = 0;
for(i in 1:length(values)){
environ = data[i];
isFound = values[i];
prob = PM2(environ,a,d,b,x0);
val = 0;
if(prob > 0 && prob < 1){
val = -log10(prob^isFound * (1-prob)^(1-isFound));
}
else{
val = 10000000000
}
sum = sum + val;
}
return(sum);
}
PM3MLE = function(v,d){
leftHandInflectionSlope = .5 #b1
rightHandInfectionSlope = .5 #b2
leftHandAsymptote = 0 #a1
rightHandAsymptote = 0 #d2
leftHandInflectionPoint = 50 #x01
rightHandInflectionPoint = 100 #x02
peak = 1
leftHandInflectionMin = 1
leftHandInflectionMax = 75
rightHandInflectionMin = 76
rightHandInflectionMax = 150
r = mle2(minuslogl = PM3Likelihood,
start = list(x01=leftHandInflectionPoint,
x02=rightHandInflectionPoint,
a1=leftHandAsymptote,
b1=leftHandInflectionSlope,
d2=rightHandAsymptote,
b2=rightHandInfectionSlope,
c=peak),
data = list(values=v,data=d),
lower = c(x01=leftHandInflectionMin,x02=rightHandInflectionMin, a1=0, b1=0, d2=0, b2=0, c=0),
upper = c(x01=leftHandInflectionMax, x02=rightHandInflectionMax, a1=1, b1=1, d2=1, b2=1, c=1),
method="L-BFGS-B")
return(r);
}
PM2MLE = function(v,d){
inflectionPointSlope = .3 #b
leftHandAsymptote = 0 #a
rightHandAsymptote = 0 #d
inflectionPoint = 20 #x0
inflectionPointMin = 1
inflectionPointMax = 100
r = mle2(minuslogl = PM2Likelihood,
start = list(x0=inflectionPoint,
a=leftHandAsymptote,
b=inflectionPointSlope,
d=rightHandAsymptote),
data = list(values=v,data=d),
lower = c(x0=inflectionPointMin, a=0, b=0, d=0),
upper = c(x0=inflectionPointMax, a=1, b=1, d=1),
method="L-BFGS-B")
return(r);
}
csv = read.csv(file="thresholds.csv",head=TRUE,sep=",")
v = csv[,9]
d = csv[,12]
#v = csv[,1]
#d = csv[,2]
r = PM3MLE(v,d);
#r = PM2MLE(v,d);
print(r)
x = 0:which.max(d)
y = 0:which.max(d)
for(i in 0:length(y)){
a1 = r@coef["a1"]
b1 = r@coef["b1"]
x01 = r@coef["x01"]
d2 = r@coef["d2"]
b2 = r@coef["b2"]
x02 = r@coef["x02"]
c = r@coef["c"]
#a = r@coef["a"];
#b = r@coef["b"];
#d = r@coef["d"];
#x0 = r@coef["x0"];
#a1 = .1
#b1 = .3
#x01 = 20
#d2 = .3
#b2 = .3
#x02 = 50
#c = 1
#(x,a,d,b,x0)
y[i] = PM3(i, a1, b1, x01, d2, b2, x02, c)
#y[i] = PM2(i,a,d,b,x0)
}
plot(x, y, xlab="X",ylab="Prob", type="o")
|
942f15745b2cb78fd265c15e95ce80cddc643259
|
d87f9ef68bd905f243faa970e394848edc724f9a
|
/src/e1071naiveBayes.R
|
e21fa3bd9fea259af2c3bb33b89f7bb780db219d
|
[] |
no_license
|
lukaszpochrzest/rules
|
d3938f1d7f68789e1166d820cd860f3f3f2b1d74
|
c2ee696ee86e91101fea56a7c7f625a946eddad0
|
refs/heads/master
| 2021-05-30T13:21:24.560763
| 2016-02-27T19:36:02
| 2016-02-27T19:36:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,097
|
r
|
e1071naiveBayes.R
|
libDir <- "D:/TMP/RShit/"
#setwd( workingDir )
installe1071Bayes <- function()
{
install.packages( "e1071", lib = libDir )
library( e1071, lib.loc = libDir )
}
loadBayesLibs <-function()
{
library( e1071, lib.loc = libDir )
}
#buildNaiveBayes <- function( classes, attributes )
#{
# model <- naiveBayes(Class ~ ., data = HouseVotes84, laplace = 3)
# return (model)
#}
#predictWithNaiveBayes <- function( model, data )
#{
# prediction <- predict( model$finalModel, data )
# return( prediction$class )
#}
#makeTestDataset <- function( csvDataset, numColumn )
#{
# x = csvDataset[,-numColumn]
# y = csvDataset[,numColumn]
# return( list( classes = y, attributes = x ) )
#}
logAll <- function( value1, value2, printLog = FALSE )
{
if(printLog)
{
for( i in 1 : length(value1))
{
print( value1[i])
print( value2[i])
}
}
}
bayesError <- function( model, dataset, method )
{
realClasses <- dataset[,ncol(dataset)]
toClassify <- dataset[,1:( ncol(dataset) - 1 )]
predictions <- predict( object = model, newdata = toClassify, type = "class" )
#logAll( realClasses, predictions, TRUE )
comparisionList <- cbind( predictions, realClasses )
#print( comparisionList )
error <- 0
overallNumberOfClassificationsDone <- 0
apply( comparisionList, 1, function(sample)
{
overallNumberOfClassificationsDone <<- overallNumberOfClassificationsDone + 1
classifiedAs <- sample[1]
shouldBeClassifiedAs <- sample[2]
if( method == "class" )
{# "categorical"
#print( "class" )
if(!(shouldBeClassifiedAs == classifiedAs))
{
error <<- error + 1
}
}
else if( method == "anova" )
{ # "continuous"
#print( "anova" )
error <<- error + (classifiedAs - shouldBeClassifiedAs)^2
}
else
{
print("Unknown method")
}
})
# compute classification error
if(overallNumberOfClassificationsDone > 0L)
{
error <- error/overallNumberOfClassificationsDone
}
names( error ) <- c("bayes error")
return (error)
}
|
015f267e6158c275cfb200f8fc8bedcb0bc068a5
|
7bf45c63e90b8e781e1b1ac1cd5f0504ffd2cfd0
|
/Rcode.R
|
788c669e9232829bde81973a8272237c014ef2f5
|
[
"MIT"
] |
permissive
|
krduncan/mass_spec
|
90f76fc6f15f7e3cb170c7588363f3029d39bc9e
|
0ed039abf19b5abab25cace98d7172ef4e7fe6a4
|
refs/heads/master
| 2020-12-02T21:23:50.667579
| 2017-07-05T14:16:16
| 2017-07-05T14:16:16
| 96,309,450
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
Rcode.R
|
#mass_spec
#install packages and load library
#> install.packages("readMzXmlData")
#> Library(readMzXmlData)
getwd()
list.files()
readMzXmlFile("SBT3014.mzXML")
sample1 <- readMzXmlFile("SBT3014.mzXML")
readMzXmlFile("SBT3015.mzXML")
sample2 <- readMzXmlFile("SBT3015.mzXML")
#s1 <- sample 2 <-[[1]]
str(s1)
s1$specturm
s1$metaData
s1int <- s1$specturm
mass <- s1int$mass
str(mass)
int <- s1int$int
str(int)
plot(x=mass, y=int)
plot(x=mass, y=log(int))
d <- data.frame(mass=mass, int=int)
head(d)
ggplot(data=d, aes(x=mass, y=int))
ggplot(data=d, aes(x=mass, y=int)) + geom_point()
ggplot(data=d, aes(x=mass, y=log(int)) + geom_point()
s2 <- sample2[[2]]
str(s2)
spec2 <- s2$spectrum
str(spec2)
mass2 <- spec2$mass
str(mass2)
int2 <- spec2$int
str(int2)
plot(x=mass2, y=int2)
plot(x=mass2, y=log(int2))
d <- data.frame(mass=(mass, mass2), int=(int, int2))
d2 <- data.frame(mass2, int=int2)
str(d2)
getData <- function(i){
message(sprintf('processing %d...', i))
mass <- sample2[[i]]$spectrum$mass
int <- log(sample2[[i]]$spectrum$int)
output <- data.frame(sample=i, mass=mass, int=int)
return(output)
}
sample.i <- 1:length(sample2)
res <- lapply(sample.i, getData)
res <- do.call(rbind.data.frame, res)
|
b70f63c931980de35c1396374d51de3d667f1aef
|
43682363e7294f29b636667ae0f6c3134174bc4a
|
/man/interaction.Rd
|
6edbea10b17db43b016c09547fec5bd415d26dca
|
[] |
no_license
|
zsemnani/urinaryDBP
|
115b7e64e044cc0f8b3560e64726dc3461c3b005
|
6281f635f954d4204e84280eb54a61bcc6d6b4c8
|
refs/heads/master
| 2022-03-16T06:51:19.663113
| 2018-08-01T05:32:29
| 2018-08-01T05:32:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 237
|
rd
|
interaction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interaction.R
\name{interaction}
\alias{interaction}
\title{Interaction with time}
\usage{
interaction()
}
\description{
Interaction with time
}
\examples{
}
|
e0890889e45c4e6663da772cff28b6748e7feb71
|
b0b61cfd9fec47fc94b5da595fd81372cd5ec369
|
/Number Patterns/problem8.R
|
03d54d26809f65828f84078ca9b0981342c92ec2
|
[] |
no_license
|
ArjunAranetaCodes/MoreCodes-Rlang
|
4c6246e67cec99ab3961260308a02b333b39dbf3
|
555b37e8ee316a48c586327cfc61069e0ce1e198
|
refs/heads/master
| 2021-01-01T19:22:15.672176
| 2018-11-25T04:00:00
| 2018-11-11T23:01:07
| 98,572,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 301
|
r
|
problem8.R
|
# Problem 8: Write a program to print the number pattern of ones and zeros using loop.
# 11111
# 11111
# 11011
# 11111
# 11111
row <- 4
col <- 4
for (y in 0:row){
for (x in 0:col) {
if(x == (row / 2) && y == (col / 2)){
cat(paste("0"))
}else{
cat(paste("1"))
}
}
cat(paste("\n"))
}
|
5dfb63f7af19d75a8e406ccecd9099105bc4b2af
|
5b722119d1b1ca9df17a2914a4db2d35f73b5490
|
/Projects/Taxes vs. Deficits/(2)_analyze_US_fiscal_data.r
|
59b4d61b71651a12ad5f42d3607ed397bb25f8ac
|
[
"CC-BY-4.0"
] |
permissive
|
vishalbelsare/Public_Policy
|
1d459eba9009e7183fa266d3bb9d4dd0d6dacddc
|
4f57140f85855859ff2e49992f4b7673f1b72857
|
refs/heads/master
| 2023-03-29T05:01:10.846030
| 2021-01-13T21:52:45
| 2021-01-13T21:52:45
| 311,356,474
| 0
| 0
|
NOASSERTION
| 2021-04-04T20:12:17
| 2020-11-09T14:00:21
| null |
UTF-8
|
R
| false
| false
| 19,768
|
r
|
(2)_analyze_US_fiscal_data.r
|
# fredr_set_key('d0b9e64aba30b479343a06037a5a10c1')
library(rvest)
library(httr)
library(data.table)
library(tidyverse)
library(WDI)
library(countrycode)
library(lmtest)
library(tseries)
library(plm)
library(rvest)
library(httr)
library(quantmod)
# library(fredr)
library(scales)
library(quantreg)
library(xtable)
library(stargazer)
setwd('~\\Public_Policy\\Projects\\Taxes vs. Deficits\\data')
caption_text = 'Chart: Taylor G. White\nData: OECD, FRED, WDI'
##### Data import and cleanup #####
stacked_oecd_wdi_data_lags_diffs = read.csv('stacked_oecd_wdi_data_lags_diffs.csv')
wide_oecd_wdi_data = read.csv('wide_oecd_wdi_data.csv')
concurrence_with_president_clean = read_csv('concurrence_with_president_clean.csv')
brookings_house_senate_representation = read_csv('brookings congressional stats/1-20.csv',
na=c('', 'NA', '.')
) %>%
separate(Years, sep = '[ ]*[-]{1}[ ]*', into = c('start', 'end'), convert = T) %>%
mutate(
congress_start = (Congress - min(Congress)) * 2 + min(start),
congress_end = congress_start + 1
) %>%
data.table()
brookings_house_senate_representation_stats_by_congress = brookings_house_senate_representation[, {
dem_seats = Seats[PartyStatus == 'Democrat']
rep_seats = Seats[PartyStatus == 'Republican']
other_seats = Seats[PartyStatus == 'Other']
vacant_seats = Seats[PartyStatus == 'Vacant']
total_seats = Seats[PartyStatus == 'All']
dem_rep_diff = dem_seats - rep_seats
out_tab = tibble(
Year = congress_start:congress_end,
congress_start = congress_start[1],
dem_seats = dem_seats, rep_seats = rep_seats,
other_seats = other_seats,
vacant_seats = vacant_seats,
total_seats = total_seats,
dem_rep_diff = dem_rep_diff) %>%
as.data.frame()
out_tab$obs = nrow(out_tab)
out_tab
}, by = list(Congress, Chamber)]
wide_brookings_house_senate_representation_stats_by_congress = pivot_wider(
brookings_house_senate_representation_stats_by_congress,
id_cols = 'Year', names_from = 'Chamber', values_from = 'dem_rep_diff'
)
# check the difference in representation over time
ggplot(brookings_house_senate_representation_stats_by_congress, aes(congress_start, dem_rep_diff)) +
geom_bar(stat = 'identity') +
facet_wrap(~Chamber, ncol = 1, scales = 'free_y')
# get additional data from FRED
# us_real_gdp_per_capita = fredr('A939RX0Q048SBEA', aggregation_method = 'eop', frequency = 'a', units = 'pch') %>%
# rename(value_real_per_capita_gdp_growth = value) %>%
# mutate(
# Year = year(date),
# lag_value_real_per_capita_gdp_growth = dplyr::lag(value_real_per_capita_gdp_growth, 1)
# ) %>%
# select(-date, -series_id)
# recession_years = fredr('JHDUSRGDPBR', aggregation_method = 'sum', frequency = 'a') %>%
# rename(
# n_recession_quarters = value
# ) %>%
# mutate(
# Year = year(date),
# pct_of_year_in_recession = n_recession_quarters / 4,
# recession_year = n_recession_quarters > 0
# ) %>%
# select(-date, -series_id)
# join everything together
US_wide = filter(wide_oecd_wdi_data, Country == 'United States') %>%
left_join(concurrence_with_president_clean) %>%
# left_join(us_real_gdp_per_capita) %>%
# left_join(recession_years) %>%
# inner join because there are way more years here
inner_join(wide_brookings_house_senate_representation_stats_by_congress) %>%
mutate(
# real_gdp_per_capita_z = (value_real_per_capita_gdp_growth - mean(value_real_per_capita_gdp_growth)) / sd(value_real_per_capita_gdp_growth),
# lag_real_gdp_per_capita_z = dplyr::lag(real_gdp_per_capita_z, 1),
z_value_NY.GDP.PCAP.KD.ZG = (value_NY.GDP.PCAP.KD.ZG - mean(value_NY.GDP.PCAP.KD.ZG, na.rm=T)) / sd(value_NY.GDP.PCAP.KD.ZG, na.rm =T),
lag_z_value_NY.GDP.PCAP.KD.ZG = dplyr::lag(z_value_NY.GDP.PCAP.KD.ZG, 1),
dem_congress = House > 0 & Senate > 0,
unified_congress = sign(House * Senate) == 1,
unified_government = (dem_congress & unified_congress & president_party == 'DEM') |
(!dem_congress & unified_congress & president_party == 'REP'),
tax_cut = diff_value_top_tax_rate < 0,
tax_increase = diff_value_top_tax_rate > 0
)
filter(US_wide, diff_value_top_tax_rate != 0) %>% select(Year, President, diff_value_top_tax_rate, value_top_tax_rate) %>% View()
US_long = filter(stacked_oecd_wdi_data_lags_diffs, Country == 'United States')
save(US_wide, US_long, file = 'US_political_economic_data.rdata')
##### get clean dataset to model YOY differences in net lending (budget deficits) #####
options(na.action = na.exclude)
reg_dat = select(US_wide, Year,
president_party,
dem_congress,
tax_cut, tax_increase,
unified_congress,
unified_government,
# house_majority, senate_majority,
pct_of_year_in_recession,
recession_year,
contains("GGNLEND"),
value_real_per_capita_gdp_growth, # real gdp per capita growth
lag_value_real_per_capita_gdp_growth,
contains('NY.GDP'),
# value_NY.GDP.PCAP.KD.ZG, # gdp per capita growth
# value_NY.GDP.MKTP.KD.ZG, # gdp growth
contains('gdp_per_capita'),
contains('top_tax_rate'),
contains('GGEXP'), contains('GGREV')
) %>% na.omit()
#### get correlation matrix #####
reg_dat_numeric = mutate_if(reg_dat, is.logical, as.numeric) %>%
mutate(
president_dem = (president_party == 'DEM') %>% as.numeric()
) %>%
select(-president_party)
correlation_mat = cor(reg_dat_numeric)
write.csv(correlation_mat, 'output/regression_cor_matrix.csv')
percents = correlation_mat[,'diff_value_GGNLEND'] %>% sort(decreasing = T) %>% percent(accuracy = 0.01)
names(percents) = names(correlation_mat[,'diff_value_GGNLEND'] %>% sort(decreasing = T))
correlation_mat[,'lag_value_real_per_capita_gdp_growth']
variable_mappings = c(
'dem_congress' = 'Democratic Congress',
'unified_congress' = 'Unified Congress',
'unified_government' = 'United Government',
'president_dem' = 'Democratic President',
'recession_year' = 'Recession Year',
'last_value_GGNLEND' = "Last Year's Deficit",
'value_real_per_capita_gdp_growth' = 'Real Per Capita GDP Growth',
'lag_value_real_per_capita_gdp_growth' = 'Real Per Capita GDP Growth, Last Year',
'value_NY.GDP.PCAP.KD.ZG' = 'GDP Per Capita Growth',
'last_value_NY.GDP.PCAP.KD.ZG' = 'GDP Per Capita Growth, Last Year',
'diff_value_GGREV' = 'Change in Revenues/GDP',
'diff_value_GGEXP' = 'Change in Expenditures/GDP',
'lag_diff_value_GGEXP' = 'Change in Expenditures/GDP, Last Year',
'lag_diff_value_GGREV' = 'Change in Revenues/GDP, Last Year',
'diff_value_top_tax_rate' = 'Change in Top Marginal Tax Rate',
'tax_cut' = 'Tax Cut',
'tax_increase' = 'Tax Increase'
)
correlation_df =
tibble(
correlation_to_net_lending = correlation_mat[names(variable_mappings),'diff_value_GGNLEND'],
positive_correlation = correlation_to_net_lending > 0,
pretty_variable = variable_mappings
) %>%
arrange(-correlation_to_net_lending) %>%
mutate(
pretty_variable = factor(str_wrap(pretty_variable, 24), levels = str_wrap(pretty_variable, 24))
)
range(reg_dat$Year)
ggplot(correlation_df, aes(pretty_variable, correlation_to_net_lending*-1, fill = positive_correlation)) +
theme_bw() +
geom_bar(stat = 'identity') +
geom_text(aes(label = percent(correlation_to_net_lending*-1)), hjust = 0) +
scale_fill_manual(guide = F, values = c('TRUE' = 'steelblue', 'FALSE' = 'firebrick')) +
coord_flip() +
labs(
y = 'Correlation', x = '',
title = 'Correlation to Changes to Annual Deficits',
subtitle = 'U.S. 1977-2018',
caption = caption_text
) +
scale_y_continuous(labels = percent) +
theme(axis.text = element_text(size = 14), title = element_text(size = 16), axis.title = element_text(size = 16))
ggsave('output/correlations_to_the_deficit.png', height = 8.5, width = 8.75, units = 'in', dpi = 600)
##### Fit models for difference in net lending #####
# confirm that net lending is really an accounting identity between general expenditures and revenues
deficit_change_model = lm(diff_value_GGNLEND ~ diff_value_GGEXP + diff_value_GGREV, data = reg_dat)
# fit several models
base_model = lm(diff_value_GGNLEND ~ last_value_GGNLEND +
z_value_NY.GDP.PCAP.KD.ZG + lag_z_value_NY.GDP.PCAP.KD.ZG, data = reg_dat)
base_model_president = lm(diff_value_GGNLEND ~ last_value_GGNLEND +
president_party +
z_value_NY.GDP.PCAP.KD.ZG +
lag_z_value_NY.GDP.PCAP.KD.ZG, data = reg_dat)
interaction_model = lm(diff_value_GGNLEND ~
last_value_GGNLEND +
z_value_NY.GDP.PCAP.KD.ZG*lag_z_value_NY.GDP.PCAP.KD.ZG,
data = reg_dat)
interaction_model_president = lm(diff_value_GGNLEND ~
last_value_GGNLEND +
president_party*z_value_NY.GDP.PCAP.KD.ZG*lag_z_value_NY.GDP.PCAP.KD.ZG,
data = reg_dat)
# there are outliers in the data -- a median regression is less sensitive to those
interaction_model_president_median = rq(diff_value_GGNLEND ~
last_value_GGNLEND +
president_party*z_value_NY.GDP.PCAP.KD.ZG*lag_z_value_NY.GDP.PCAP.KD.ZG,
data = reg_dat, tau = 0.5)
summary(interaction_model_president)
summary(interaction_model_president_median, se = 'boot')
# the most important coefficients are very similar between the models
coef_comparison_table = data.frame(
ols = interaction_model_president$coefficients,
median = interaction_model_president_median$coefficients
)
##### check and compare the model results #####
# adding president to the base model is an improvement
# the interaction model w/o president is only a slight improvement over the base + president
# interaction with president is the superior model
full_anova = anova(base_model, base_model_president,
interaction_model, interaction_model_president)
# get the variance explained by each term
interaction_model_president_anova = anova(interaction_model_president)
interaction_model_president_anova$`R Squared` = interaction_model_president_anova$`Sum Sq` / sum(interaction_model_president_anova$`Sum Sq`)
# check for heteroskedacity - p value is greater than 0.05 so we're good
bptest(diff_value_GGNLEND ~
last_value_GGNLEND +
president_party*real_gdp_per_capita_z*lag_real_gdp_per_capita_z,
data = reg_dat, studentize=F)
# regression analysis plots
par(mfrow=c(2,2))
plot(interaction_model_president, which = 2)
plot(interaction_model_president, which = 1)
plot(interaction_model_president, which = 5)
hist(residuals(interaction_model_president), main = 'Histogram of Residuals', xlab = 'Residuals')
# residuals aren't perfect but they're pretty good
# there are influential points but hard to argue getting rid of them
# president party and related terms explain significant variation
#### plot the components of deficits ####
long_budget_components = pivot_longer(reg_dat,
cols = c('diff_value_GGREV', 'diff_value_GGEXP')) %>%
mutate(
# adjust expenditures to have same directionality as net lending and revenues
value = ifelse(name == 'diff_value_GGEXP', value * -1, value)
)
president_starts_stops = group_by(US_wide, President, president_party) %>%
summarize(
start_year = min(Year), end_year = max(Year)
) %>%
filter(start_year >= min(long_budget_components$Year)) %>%
mutate(
midpoint = (start_year + end_year)/2,
pres_last_name = str_extract(President, '([ ]{1}[A-Za-z]+)$') %>% str_trim()
)
ggplot(long_budget_components, aes(Year, value)) +
theme_bw() +
geom_rect(data = president_starts_stops, aes(xmin = start_year, xmax = end_year,
x = NULL, y = NULL, ymin = -6, ymax = 4,
colour = president_party),
stat = 'identity', alpha = 0.3, show.legend = F, fill = NA) +
scale_fill_manual(
name = '',
values = c('diff_value_GGREV' = 'steelblue', 'diff_value_GGEXP' = 'orange', 'DEM' = '#00aef3', 'REP' = '#d8171e'),
labels = c('diff_value_GGREV' = 'Revenue/GDP', 'diff_value_GGEXP' = 'Expenditure/GDP')
) +
geom_bar(aes(fill= name), stat = 'identity', colour = 'black') +
geom_point(data = reg_dat, aes(Year, diff_value_GGNLEND), size = 2.5, shape = 18) +
labs(
y = 'Change from Prior Year (% of GDP)\n',
x = '',
title = 'Contributions to Changes in Budget Deficits\nU.S. 1977-2018',
caption = caption_text,
subtitle = 'Points Show the Deficit Change for the Year'
) +
scale_colour_manual(guide = F, values = c('DEM'='#00aef3', 'REP' = '#d8171e')) +
scale_x_continuous(breaks = seq(1977, 2018, by = 4)) +
geom_text(data = president_starts_stops,
aes(y = 4.5, x = midpoint, label = pres_last_name, colour = president_party), hjust = 0.5, size = 4.5) +
# geom_segment(data = president_starts_stops, aes(y = 4, yend = 4, x = start_year, xend = end_year)) +
theme(
legend.position = 'bottom',
axis.text.x = element_text(angle = 0),
title = element_text(size = 20),
axis.text = element_text(size = 16),
axis.title = element_text(size = 18),
legend.text = element_text(size = 14)
) +
geom_segment(
aes(x = 1976, xend = 1976, y = 1.5, yend = 3),
lineend = 'butt', linejoin = 'mitre',
size = 1, arrow = arrow(length = unit(0.1, "inches"))
) +
geom_segment(
aes(x = 1976, xend = 1976, y = -1.5, yend = -3),
lineend = 'butt', linejoin = 'mitre',
size = 1, arrow = arrow(length = unit(0.1, "inches"))
) +
geom_text(
aes(x = 1975, y = 2.5, label = 'Decreases Deficit'), angle = 90, hjust = 0.5, size = 4.5
) +
geom_text(
aes(x = 1975, y = -2.5, label = 'Increases Deficit'), angle = 90, hjust = 0.5, size = 4.5
)
ggsave('output/contributions_to_deficits.png', height = 8, width = 10, units = 'in', dpi = 600)
##### find contributions to deficits after controlling for economic conditions #####
# imagine if all presidents were democratic
reg_dat_dems = mutate(reg_dat, president_party = 'DEM')
# if all presidents were republican
reg_dat_reps = mutate(reg_dat, president_party = 'REP')
# find the difference between the democratic and republican predictions
reg_dat = mutate(
reg_dat,
predicted_dem_diff_GGNLEND = predict(interaction_model_president, newdata = reg_dat_dems),
predicted_rep_diff_GGNLEND = predict(interaction_model_president, newdata = reg_dat_reps),
rep_dem_diff_GGNLEND = predicted_rep_diff_GGNLEND - predicted_dem_diff_GGNLEND,
predicted_diff_GGNLEND = predict(interaction_model_president)
)
dem_rep_diff = pivot_longer(reg_dat, cols = c('predicted_dem_diff_GGNLEND', 'predicted_rep_diff_GGNLEND'))
ggplot(reg_dat, aes(Year, -rep_dem_diff_GGNLEND)) +
theme_bw() +
geom_bar(aes(fill = recession_year), stat = 'identity', colour = 'black') +
scale_fill_manual(name = 'Recession Year', values = c('TRUE' = 'firebrick', 'FALSE' = 'steelblue')) +
geom_segment(
aes(x = 1976, xend = 1976, y = 0.5, yend = 2),
lineend = 'butt', linejoin = 'mitre',
size = 1, arrow = arrow(length = unit(0.1, "inches"))
) +
geom_segment(
aes(x = 1976, xend = 1976, y = -0.5, yend = -2),
lineend = 'butt', linejoin = 'mitre',
size = 1, arrow = arrow(length = unit(0.1, "inches"))
) +
geom_text(
aes(x = 1975, y = 0.5, label = 'Rep Increase'), angle = 90, hjust = 0, size = 4.5
) +
geom_text(
aes(x = 1975, y = -0.5, label = 'Dem Increase'), angle = 90, hjust = 1, size = 4.5
) +
theme(
axis.text.x = element_text(angle = 0),
legend.position = 'bottom',
title = element_text(size = 16),
axis.text = element_text(size = 16),
axis.title = element_text(size = 18),
legend.text = element_text(size = 14)
) +
labs(
title = 'Predicted Difference, Democratic and Republican Deficit Changes',
subtitle = 'U.S. 1977-2018',
x = '',
y = 'Predicted Difference in Deficit Changes (% of GDP)',
caption = caption_text
) +
scale_x_continuous(breaks = seq(1977, 2018, by = 4))
ggsave('output/dem_rep_difference_deficits.png', height = 8, width = 10, units = 'in', dpi = 600)
#### plot model predictions ####
ggplot(reg_dat, aes(-predicted_diff_GGNLEND, -diff_value_GGNLEND)) +
theme_bw() +
geom_point(aes(colour = president_party)) +
stat_smooth(method = 'lm', colour = 'black') +
labs(
x = 'Predicted Deficit Change (% of GDP)',
y = 'Actual Deficit Change (% of GDP)',
title = 'Predicted vs. Actual Deficit Changes',
caption = caption_text
) +
scale_colour_manual(name = 'President Party', values = c('DEM'='#00aef3', 'REP' = '#d8171e')) +
geom_text(aes(x = -2, y = 5.5,
label = paste0('R Squared: ', summary(interaction_model_president)$r.squared %>% round(2))),
hjust = 0) +
geom_text(aes(x = -2, y = 4.75,
label = paste0('R Squared Adj.: ', summary(interaction_model_president)$adj.r.squared %>% round(2))),
hjust = 0) +
geom_text(aes(x = -2, y = 4,
label = paste0('DF: ', summary(interaction_model_president)$df[2] %>% round(2))),
hjust = 0) +
theme(legend.position = 'bottom')
ggsave('output/predicted_vs_actual_deficit_changes.png', height = 6, width = 6, units = 'in', dpi = 600)
#
# ggplot(US_wide, aes(real_gdp_per_capita_z, -diff_value_GGNLEND, colour = president_party)) +
# geom_hline(aes(yintercept = 0), linetype = 'dashed', size = 1) +
# geom_vline(aes(xintercept = 0), linetype = 'dashed', size = 1) +
# geom_point(aes(shape = recession_year, size = pct_of_year_in_recession)) +
# stat_smooth(method = 'lm', se = F) +
# scale_colour_manual(
# name = "President's Party",
# values = c('DEM' = 'blue', 'REP' = 'red')
# ) +
# labs(
# title = 'Economic Growth and Budget Deficits by Presidential Party\n1971-2018',
# subtitle = sprintf('Republicans add %s more debt each year than Democrats',
# percent(deficit_model$coefficients['president_partyREP']/100, accuracy = 0.01)),
# y = 'Annual Deficit (% of GDP)\n',
# x = '\nReal GDP Per Capita Growth\nStandard Deviations (Z Value)',
# caption = caption_text
# ) +
# scale_size(
# guide = F,
# range = c(3, 8)
# ) +
# scale_shape(name = 'Recession Year', na.translate = FALSE) +
# geom_text(aes(x = 1, y = -3, label = '(4) Strong Growth\nBudget Surplus'), colour = 'black', hjust=0) +
# geom_text(aes(x = -2, y = -3, label = '(3) Poor Growth\nBudget Surplus'), colour = 'black', hjust=0) +
# geom_text(aes(x = 1, y = 7, label = '(1) Strong Growth\nBudget Deficit'), colour = 'black', hjust=0) +
# geom_text(aes(x = -2, y = 7, label = '(2) Poor Growth\nBudget Deficit'), colour = 'black', hjust=0)
#
# ggsave('output/deficits_vs_economic_growth_by_party.png', height = 7, width = 7.5, units = 'in', dpi = 600)
#
save(reg_dat,
base_model, base_model_president, interaction_model,
interaction_model_president, interaction_model_president_median,
file = 'output/reg_dat_diff_GNLEND_models.rdata')
summary(interaction_model_president_median, se = 'boot')
reg_dat$rep_dem_diff_GGNLEND %>% mean()
reg_dat$rep_dem_diff_GGNLEND %>% summary()
|
6666ecdbe9ba3a91eb0c13731c953442a24f0cd3
|
c9ae35fbb115dd9553f5740d798bbecd803a9f27
|
/man/sst.Rd
|
639f3c541659bdd75f87ebb7740f70e479097b62
|
[] |
no_license
|
abhinavwidak/ggplottimeseries
|
471fc90f176d00d5cda2bf0c244559ea70450c1f
|
1f26965cddacabf747be2b1425b4347676359852
|
refs/heads/master
| 2023-03-20T11:42:57.261707
| 2019-02-15T15:34:56
| 2019-02-15T15:34:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 495
|
rd
|
sst.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sst.R
\docType{data}
\name{sst}
\alias{sst}
\title{Sea surface temperature data}
\format{a sample dataframe with 2142 observations on the following variables.
\describe{
\item{\code{date}}{a date vector of the time of the observation}
\item{\code{sst}}{a numeric vector}}}
\usage{
sst
}
\description{
Daily sea surface temperature data downloaded from Giovanni
}
\author{
Brisneve Edullantes
}
\keyword{datasets}
|
f72a13660a1b7cc8594a98a836b0646b98c527c2
|
acca4ebb9fec1728a5a9004193b98b830c0c74ac
|
/r28_statics.R
|
e019ab9e667f44e70f88dfd55bed9b1ace51a19c
|
[] |
no_license
|
Minki96/lab-r
|
8e43bcff537319511e6a2694bd0afb885370333b
|
c274088237e99057f8c9fa6b2e6b6bb98b686948
|
refs/heads/master
| 2022-06-17T06:10:15.525771
| 2020-05-06T01:48:18
| 2020-05-06T01:48:18
| 261,624,227
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,739
|
r
|
r28_statics.R
|
# 조건부 확률, Mosacit Plot, Decison Tree
str(Titanic) # 4-dimensional array
# 4-d array를 data.frame으로 변환
Titanic_data <-as.data.frame(Titanic)
Titanic_data
# 4-d array를 이용한 mosaic plot
mosaicplot( ~ Class,data = Titanic)
mosaicplot( ~ Class + Sex, data = Titanic )
mosaicplot( ~ Class + Sex + Age, data = Titanic )
mosaicplot( ~ Class + Sex + Age + Survived, data = Titanic, color = TRUE )
mosaicplot( ~ Age + Sex + Class + Survived, data = Titanic, color = TRUE)
# 전체 탑승객 숫자
n_total <- sum(Titanic_data$Freq)
# 생존자 숫자
n_survived <- Titanic_data %>% filter(Survived == "Yes") %>% select(Freq)%>% summarise(sum(Freq))
n_survived
n_survived / n_total
Titanic_data %>%
filter(Sex == "Male") %>%
summarise(sum(Freq))
Titanic_data %>%
filter(Age == "Adult") %>%
summarise(sum(Freq))
# 1) 성별(Sex)로 분할한 경우
# 남자인 경우 생존 비율
male_n <- Titanic_data %>%
filter(Sex == "Male") %>%
summarise(sum(Freq))
Titanic_data %>%
filter( Sex == "Male" & Survived == "Yes") %>%
summarise(sum(Freq)) / male_n
# 여자인 경우 생존 비율
female_n <- Titanic_data %>%
filter(Sex == "Female") %>%
summarise(sum(Freq))
Titanic_data %>%
filter( Sex == "Female" & Survived == "Yes") %>%
summarise(sum(Freq)) / female_n
# 2) 나이(Age)로 분할한 경우
Adult_n <- Titanic_data %>%
filter(Age == "Adult") %>%
summarise(sum(Freq))
Titanic_data %>%
filter(Survived == "Yes" & Age == "Adult" ) %>%
summarise(sum(Freq)) / Adult_n
Child_n <- Titanic_data %>%
filter(Age == "Child") %>%
summarise(sum(Freq))
Titanic_data %>%
filter(Survived == "Yes" & Age == "Child" ) %>%
summarise(sum(Freq)) / Child_n
# 3등급인 경우 생존 비율
n_3rd <- Titanic_data %>%
filter(Class == "3rd") %>%
summarise(sum(Freq))
Titanic_data %>%
filter(Survived == "Yes" & Class == "3rd" ) %>%
summarise(sum(Freq)) / n_3rd
# 3등급이 아닌 경우 생존 비율
not_3rd <- Titanic_data %>%
filter(Class != "3rd") %>%
summarise(sum(Freq))
Titanic_data %>%
filter(Survived == "Yes" & Class != "3rd" ) %>%
summarise(sum(Freq)) / not_3rd
2512921+398067+1366342
git_titanic<- read.csv(file = "data/titanic3.csv", na.strings = "")
# read.csv() 함수의 na.string = "" argument는
# csv 파일에 있는 빈 문자열("")을 NA로 처리함.
str(git_titanic)
#git_titanic$home.dest <- ifelse(git_titanic$home.dest == "", NA, git_titanic$home.dest)
head(git_titanic)
summary(git_titanic)
# pclass 변수를 categorical 변수로 변환 (factor)
# survived 변수를 categorical 변수로 변환(factor)
# levels를 "no"(0), "yes"(1) 지정.
git_titanic$pclass <- factor(git_titanic$pclass)
git_titanic$survived <- factor(git_titanic$survived)
levels(git_titanic$survived) <- c("no","yes")
levels(git_titanic$survived)
table(git_titanic$survived)
# mosaic plot
mosaicplot(~ sex + pclass + survived, data =git_titanic,
color = TRUE)
library(tidyverse)
# git titanic 데이터 프레임에 adult 변수를 추가
# age <= 10 이하면 "no", 그렇지 않으면 "yes"
# adult 변수를 포함한 : mosaic plot
git_titanic <- git_titanic %>%
mutate(adult = ifelse(age <= 10, "no","yes"))
table(git_titanic$adult)
mosaicplot(~ sex + pclass + adult +survived, data =git_titanic,
color = TRUE)
# rpart 패키지 : recursive partitioning & regression tree
# R을 설치하면 포함되어 있음.
# rpart.plot 패키지 : rpart의 내용을 tree로 시각화
install.packages("rpart.plot")
library(rpart.plot)
rp_titanic <- rpart()
|
3a7f780a6e18ada08be498bfd89018dd0811b8a3
|
17886959ef58846d110f1bfc300e60f750d3ed90
|
/NLP.R
|
0f5f648eb7ad28728a57a6a25ca0ec253c375e10
|
[] |
no_license
|
siavrluk/Coursera-Capstone
|
f94e3947fbec0c016be80f7033fbd0e830bb00d7
|
74213b5d603e5e324d9ad626a3a090bad45d97ef
|
refs/heads/main
| 2023-07-29T03:28:48.172965
| 2021-09-15T16:34:26
| 2021-09-15T16:34:26
| 366,791,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,658
|
r
|
NLP.R
|
library(dplyr)
library(ggplot2)
library(stringr)
library(ngram)
library(tm)
library(RWeka)
library(wordcloud)
blogs_file <- "final/en_US/en_US.blogs.txt"
news_file <- "final/en_US/en_US.news.txt"
twitter_file <- "final/en_US/en_US.twitter.txt"
# File size
blogs_file_size <- file.size(blogs_file)/(2^20)
news_file_size <- file.size(news_file)/(2^20)
twitter_file_size <- file.size(blogs_file)/(2^20)
# Read in the data files and check their length
blogs <- readLines(blogs_file, skipNul = TRUE)
blogs_lines_ct <- length(blogs)
news <- readLines(news_file, skipNul = TRUE)
news_lines_ct <- length(news)
twitter <- readLines(twitter_file, skipNul = TRUE)
twitter_lines_ct <- length(twitter)
# Check number of words per file
blogs_words_ct <- wordcount(blogs, sep = " ")
news_words_ct <- wordcount(news, sep = " ")
twitter_words_ct <- wordcount(twitter, sep = " ")
# Put in a data frame
summary_df <- data.frame(Dataset = c("blogs", "news", "twitter"),
FileSizeMB = c(blogs_file_size, news_file_size, twitter_file_size),
LinesCt = c(blogs_lines_ct, news_lines_ct, twitter_lines_ct),
WordsCt = c(blogs_words_ct, news_words_ct, twitter_words_ct))
names(summary_df) <- c("Dataset", "File size (MB)", "Lines Count", "Words Count")
saveRDS(summary_df, file = "summary.rds")
# Files are too big, will sample 5% of each
set.seed(3213)
sample_size <- 0.05
blogs_small <- sample(blogs, sample_size*length(blogs), replace = FALSE)
news_small <- sample(news, sample_size*length(news), replace = FALSE)
twitter_small <- sample(twitter, sample_size*length(twitter), replace = FALSE)
# Combine into one dataset
data_small <- c(blogs_small, news_small, twitter_small)
length(data_small)
small_words_ct <- wordcount(data_small, sep = " ")
saveRDS(data_small, file = "sampledData.rds")
# Free up memory
rm(blogs, news, twitter, blogs_small, news_small, twitter_small)
data_small_clean <- data_small %>%
gsub("(s?)(f|ht)tp(s?)://\\S+\\b", "", .) %>% # remove urls
gsub("\\S+@\\S+", " ", .) %>% # remove email addresses
gsub("@\\S+", " ", .) %>% # remove twitter handles
gsub("#\\S+", " ", .) %>% # remove hashtags
tolower() %>%
str_squish()
# Corpus
small_corpus <- data_small_clean %>%
VectorSource() %>%
VCorpus()
# Remove redundant information such as urls, twitter handles, email addresses, special characters, punctuations, numbers
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
clean_corpus <- small_corpus %>%
# tm_map(content_transformer(function(x) gsub("(s?)(f|ht)tp(s?)://\\S+\\b", ""))) %>%
# tm_map(toSpace, ., "(s?)(f|ht)tp(s?)://\\S+\\b") %>% # remove urls
# tm_map(toSpace, ., "\\S+@\\S+") %>% # remove email addresses
# tm_map(toSpace, ., "@[^\\s]+") %>% # remove twitter handles
tm_map(removeNumbers) %>%
tm_map(removePunctuation)
# Create tdm
Tokenizer1 <- function (x) NGramTokenizer(x, Weka_control(min = 1, max = 1))
tdm1 <- TermDocumentMatrix(clean_corpus, control = list(tokenize = Tokenizer1))
tdm1 <- removeSparseTerms(tdm1, 0.9999)
words <- sort(rowSums(as.matrix(tdm1)),decreasing=TRUE)
uni_df <- data.frame(word = names(words),freq=words)
saveRDS(uni_df, file = "unigrams.rds")
# Number of unique words
length(uni_df$word)
uni_df_coverage <- uni_df %>%
mutate(coverage = 100*cumsum(freq)/sum(freq))
word_coverage_plot <- ggplot(uni_df_coverage, aes(coverage)) +
stat_bin(aes(y = cumsum(..count..)/sum(..count..)*100), geom = "step", bins = 50) +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10)) +
xlab("Percent Covered") +
ylab("Percent of Words") +
ggtitle("Word coverage") +
coord_flip()
ggsave(word_coverage_plot,file="wordCoverage.png")
# Word cloud
wordCloud <- wordcloud(words = uni_df$word, freq = uni_df$freq, min.freq = 1, max.words=200,
random.order=FALSE, rot.per=0.35, colors=brewer.pal(8, "RdBu"))
ggsave(wordCloud,file="wordCloud.png")
# Unigram
uni_words90_df <- uni_df_coverage[uni_df_coverage$coverage < 90, ]
uni_plot <- ggplot(uni_words90_df[1:10, ], aes(x = reorder(word, freq), y = freq / sum(uni_words90_df$freq), fill = freq, alpha = 0.1)) +
geom_bar(stat = "identity", color = "black") +
xlab("Unigram") +
ylab("Proportion") +
ggtitle("Top 10 Unigrams by Proportion") +
coord_flip() +
guides(fill = FALSE, alpha = FALSE)
ggsave(uni_plot,file="unigrams.png")
# Bigram
Tokenizer2 <- function (x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
tdm2 <- TermDocumentMatrix(clean_corpus, control = list(tokenize = Tokenizer2))
tdm2 <- removeSparseTerms(tdm2, 0.9999)
words <- sort(rowSums(as.matrix(tdm2)),decreasing=TRUE)
bi_df <- data.frame(word = names(words),freq=words)
bi_df_coverage <- bi_df %>%
mutate(coverage = 100*cumsum(freq)/sum(freq))
bi_words90_df <- bi_df_coverage[bi_df_coverage$coverage < 90, ]
saveRDS(bi_df, file = "bigrams.rds")
bi_plot <- ggplot(bi_words90_df[1:10, ], aes(x = reorder(word, freq), y = freq / sum(bi_words90_df$freq), fill = freq, alpha = 0.1)) +
geom_bar(stat = "identity", color = "black") +
xlab("Bigram") +
ylab("Proportion") +
ggtitle("Top 10 Bigrams by Proportion") +
coord_flip() +
guides(fill = FALSE, alpha = FALSE)
ggsave(bi_plot,file="bigrams.png")
# Trigram
Tokenizer3 <- function (x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
tdm3 <- TermDocumentMatrix(clean_corpus, control = list(tokenize = Tokenizer3))
tdm3 <- removeSparseTerms(tdm3, 0.9999)
words <- sort(rowSums(as.matrix(tdm3)),decreasing=TRUE)
tri_df <- data.frame(word = names(words),freq=words)
tri_df_coverage <- tri_df %>%
mutate(coverage = 100*cumsum(freq)/sum(freq))
tri_words90_df <- tri_df_coverage[tri_df_coverage$coverage < 90, ]
saveRDS(tri_df, file = "trigrams.rds")
tri_plot <- ggplot(tri_words90_df[1:10, ], aes(x = reorder(word, freq), y = freq / sum(tri_words90_df$freq), fill = freq, alpha = 0.1)) +
geom_bar(stat = "identity", color = "black") +
xlab("Trigram") +
ylab("Proportion") +
ggtitle("Top 10 Trigrams by Proportion") +
coord_flip() +
guides(fill = FALSE, alpha = FALSE)
ggsave(tri_plot,file="trigrams.png")
# Quadrigram
Tokenizer4 <- function (x) NGramTokenizer(x, Weka_control(min = 4, max = 4))
tdm4 <- TermDocumentMatrix(clean_corpus, control = list(tokenize = Tokenizer4))
tdm4 <- removeSparseTerms(tdm4, 0.99999)
words <- sort(rowSums(as.matrix(tdm4)),decreasing=TRUE)
quad_df <- data.frame(word = names(words),freq=words)
quad_df_coverage <- quad_df %>%
mutate(coverage = 100*cumsum(freq)/sum(freq))
quad_words90_df <- quad_df_coverage[quad_df_coverage$coverage < 90, ]
saveRDS(quad_df, file = "quadrigrams.rds")
quad_plot <- ggplot(quad_words90_df[1:10, ], aes(x = reorder(word, freq), y = freq / sum(quad_words90_df$freq), fill = freq, alpha = 0.1)) +
geom_bar(stat = "identity", color = "black") +
xlab("Quadrigram") +
ylab("Proportion") +
ggtitle("Top 10 Quadrigrams by Proportion") +
coord_flip() +
guides(fill = FALSE, alpha = FALSE)
ggsave(quad_plot,file="quadrigrams.png")
# Split n-grams into beginning + last word
bi_df_split <- bi_df %>%
mutate(beg = word(word , 1 , -2),
last_word = word(word, -1))
tri_df_split <- tri_df %>%
mutate(beg = word(word , 1 , -2),
last_word = word(word, -1))
quad_df_split <- quad_df %>%
mutate(beg = word(word , 1 , -2),
last_word = word(word, -1))
saveRDS(bi_df_split, file = "bigrams_split.rds")
saveRDS(tri_df_split, file = "trigrams_split.rds")
saveRDS(quad_df_split, file = "quadrigrams_split.rds")
|
6b140bac36efc2ca40b632136c96e9bfa366e8ec
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RRTCS/examples/ChaudhuriChristofidesDatapij.Rd.R
|
043b66caa5e542555018c3d2c0d1ae7c5fb9d8a3
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
ChaudhuriChristofidesDatapij.Rd.R
|
library(RRTCS)
### Name: ChaudhuriChristofidesDatapij
### Title: Matrix of the second-order inclusion probabilities
### Aliases: ChaudhuriChristofidesDatapij
### Keywords: datasets
### ** Examples
data(ChaudhuriChristofidesDatapij)
#Now, let select only the first-order inclusion probabilities
diag(ChaudhuriChristofidesDatapij)
|
c7684cae8b7d4d736d6ad35378e1771cbb310744
|
afa7488f8e3e98817ac5e4ebbc789daa8e833288
|
/Assignment 2.R
|
7a2841c52d2fdac842236963d69fc00517e2cce5
|
[] |
no_license
|
daniellehatt/Ecologyworkshop
|
6a08e277d48ffbd4ca9a2d98fb2c4e26e12d589c
|
617fefe48719a8a25a094d404018151d76e607ba
|
refs/heads/master
| 2020-12-08T22:53:33.333979
| 2020-04-01T06:17:02
| 2020-04-01T06:17:02
| 233,117,660
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,526
|
r
|
Assignment 2.R
|
file.choose()
load("/Users/daniellehatt/Desktop/Ecologyworkshop/NLM_Workshop.RData")
install.packages("nlstools")
library(nlstools)
#visualizing data
par(mai=c(1,1,0.1,0.1))
plot(harv$TIMESTAMP,harv$NEE,
ylab=expression(paste("NEE(",mu,"mol m"^{-2} ~ s^{-1} ~ ")" )),
xlab="")
#fitting light response
plot( NEE ~ PAR, data= day)
y = nls(NEE ~ (a1 * PAR * ax)/(a1 * PAR + ax) + r, data=day[which(day$MONTH == 07),], start=list(a1= -1 , ax= -1, r= 1), na.action=na.exclude, trace=F, control=nls.control(warnOnly=T))
summary(y)
#START VALUES
# 1. Create a function of the model:
lrcModel <- function(PAR, a1, ax, r) {
NEE <- (a1 * PAR * ax)/(a1 * PAR + ax) + r
return(NEE)
}
# 2. Initial: create a function that calculates the initial values from the data.
lrc.int <- function (mCall, LHS, data){ x <- data$PAR
y <- data$NEE
r <- max(na.omit(y), na.rm=T) # Maximum NEE
ax <- min(na.omit(y), na.rm=T) # Minimum NEE
a1 <- (r + ax)/2 # Midway between r and a1
# Create limits for the parameters:
a1[a1 > 0]<- -0.1
r[r > 50] <- ax*-1
r[r < 0] <- 1
value = list(a1, ax, r) # Must include this for the selfStart function
names(value) <- mCall[c("a1", "ax", "r")] # Must include this for the selfStart function
return(value)
}
# Selfstart function
SS.lrc <- selfStart(model=lrcModel,initial= lrc.int)
# 3. Find initial values:
iv <- getInitial(NEE ~ SS.lrc('PAR', "a1", "ax", "r"), data = day[which(day$MONTH == 07),])
iv
y = nls( NEE ~ (a1 * PAR * ax)/(a1 * PAR + ax) + r, day[which(day$MONTH == 07),], start=list(a1= iv$a1 , ax= iv$ax, r= iv$r),
na.action=na.exclude, trace=F, control=nls.control(warnOnly=T))
summary(y)
#checking assumptions
res.lrc <- nlsResiduals(y)
par(mfrow=c(2,2))
plot(res.lrc, which=1)# Residulas vs fitted values (Constant Variance)
plot(res.lrc, which=3) # Standardized residuals
plot(res.lrc, which=4) # Autocorrelation
plot(res.lrc, which=5) # Histogram (Normality)
test.nlsResiduals(res.lrc)
#Bootstrap
results <- nlsBoot(y, niter=100 )
summary(results)
plot(results, type = "boxplot")
#EXERCISE2
parms.Month <- data.frame(
MONTH=numeric(),
a1=numeric(),
ax=numeric(),
r=numeric(),
a1.pvalue=numeric(),
ax.pvalue=numeric(),
r.pvalue=numeric(), stringsAsFactors=FALSE, row.names=NULL)
parms.Month[1:12, 1] <- seq(1,12,1) # Adds months to the file
nee.day <- function(dataframe){ y = nls( NEE ~ (a1 * PAR * ax)/(a1 * PAR + ax) + r, dataframe,
start=list(a1= iv$a1 , ax= iv$ax, r= iv$r),
na.action=na.exclude, trace=F,
control=nls.control(warnOnly=T))
y.df <- as.data.frame(cbind(t(coef(summary(y)) [1:3, 1]), t(coef(summary(y)) [1:3, 4])))
names(y.df) <-c("a1","ax", "r", "a1.pvalue", "ax.pvalue", "r.pvalue")
return (y.df )}
try(for(j in unique(day$MONTH)){
# Determines starting values:
iv <- getInitial(NEE ~ SS.lrc('PAR', "a1", "ax", "r"), data = day[which(day$MONTH == j),])
# Fits light response curve:
y3 <- try(nee.day(day[which(day$MONTH == j),]), silent=T)
# Extracts data and saves it in the dataframe
try(parms.Month[c(parms.Month$MONTH == j ), 2:7 ] <- cbind(y3), silent=T)
rm(y3)
}, silent=T)
parms.Month
#Bootstrapping
# Create file to store parms and se
boot.NEE <- data.frame(parms.Month[, c("MONTH")]);names (boot.NEE) <- "MONTH"
boot.NEE$a1.est <- 0
boot.NEE$ax.est<- 0
boot.NEE$r.est<- 0
boot.NEE$a1.se<- 0
boot.NEE$ax.se<- 0
boot.NEE$r.se<- 0
for ( j in unique(boot.NEE$Month)){
y1 <-day[which(day$MONTH == j),] # Subsets data
# Determines the starting values:
iv <- getInitial(NEE ~ SS.lrc('PAR', "a1", "ax", "r"), data = y1)
# Fit curve:
day.fit <- nls( NEE ~ (a1 * PAR * ax)/(a1 * PAR + ax) + r, data=y1,
start=list(a1= iv$a1 , ax= iv$ax, r= iv$r),
na.action=na.exclude, trace=F, control=nls.control(warnOnly=T))
# Bootstrap and extract values:
try(results <- nlsBoot(day.fit, niter=100 ), silent=T)
try(a <- t(results$estiboot)[1, 1:3], silent=T)
try(names(a) <- c('a1.est', 'ax.est', 'r.est'), silent=T)
try( b <- t(results$estiboot)[2, 1:3], silent=T)
try(names(b) <- c('a1.se', 'ax.se', 'r.se'), silent=T)
try(c <- t(data.frame(c(a,b))), silent=T)
# Add bootstrap data to dataframe:
try(boot.NEE[c(boot.NEE$MONTH == j), 2:7] <- c[1, 1:6], silent=T)
try(rm(day.fit, a, b, c, results, y1), silent=T)
}
lrc <- merge( parms.Month, boot.NEE, by.x="MONTH", by.y="MONTH") # Merge dataframes
lrc
|
181eec708fc81a7097245798f95594c14bc1d480
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/clue/examples/cl_boot.Rd.R
|
0c4de658166de9eec7654ef4db00c5f2723ece66
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 333
|
r
|
cl_boot.Rd.R
|
library(clue)
### Name: cl_boot
### Title: Bootstrap Resampling of Clustering Algorithms
### Aliases: cl_boot
### Keywords: cluster
### ** Examples
## Study e.g. the effect of random kmeans() initializations.
data("Cassini")
pens <- cl_boot(Cassini$x, 15, 3)
diss <- cl_dissimilarity(pens)
summary(c(diss))
plot(hclust(diss))
|
65fd4c77b59f8af9990910a26977855cd20d3335
|
ad4a0111b56b82ce3c145e7dfd3710019727ed6d
|
/R/7_Table.R
|
c17636e64e59e8ce5ae61cd64716bc5cc80adf0e
|
[] |
no_license
|
georgegui/MarketingRegression
|
6c159ce7467bab281748bfb0ddd32a52c5e204a2
|
fd3757d3f3e935cd366b8dc579008d3717a0369e
|
refs/heads/master
| 2021-04-09T11:52:08.058129
| 2018-03-16T22:35:28
| 2018-03-16T22:35:28
| 109,545,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,681
|
r
|
7_Table.R
|
#' Format an exlx column
#'
#' @export
GenerateColumnFormat <- function(dt, wb){
cell_style_list <- list()
for(cur_col in names(dt)){
if(dt[, is.numeric(get(cur_col))]){
is_integer <- max(dt[, (get(cur_col) + 1e-8)%%1]) < 1e-6
if(cur_col == 'significant_percentage' & !is_integer){
print(dt[, get(cur_col)])
}
if(is_integer){
cell_style_list[[cur_col]] <- CellStyle(wb) + DataFormat("#,##0")
} else {
cell_style_list[[cur_col]] <- CellStyle(wb) + DataFormat("#,##0.000")
}
} else {
cell_style_list[[cur_col]] <- CellStyle(wb)
}
}
names(cell_style_list) <- 1:length(cell_style_list)
return(cell_style_list)
}
#' Summarize key quantiles of the results
#'
#' @export
TableSummary <- function(results){
quantile_list <- c(0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99)
quantile_name <- paste0(quantile_list * 100, '%')
results_q <- results[, list(quantile_value =
wtd.quantile(coefficient, brand_revenue,
quantile_list, normwt = TRUE)),
by = model_name]
results_q[, var_name := quantile_name]
results_q <- dcast(results_q, model_name ~ var_name, value.var = 'quantile_value')
results_summary <- results[, list(
mean = wtd.mean.trim(coefficient, brand_revenue, 0.01),
median = wtd.quantile(coefficient, brand_revenue, 0.5, normwt = TRUE),
min = min(coefficient),
max = max(coefficient),
n_observation = .N,
positive_percentage = wtd.mean((coefficient > 0), brand_revenue),
negative_percentage = wtd.mean((coefficient < 0), brand_revenue),
less_than_1_percentage = wtd.mean((coefficient < -1), brand_revenue),
significant_percentage = wtd.mean((significant == 1), brand_revenue)),
by = model_name]
results_summary <- merge(results_summary, results_q, by = 'model_name')
results_summary <- merge(ordered_models, results_summary, by = 'model_name')
summary_cols <- c('model_id','model_name', 'model_abbrev', 'n_observation',
'mean', 'median',
'positive_percentage', 'negative_percentage',
'less_than_1_percentage', 'significant_percentage',
'min', quantile_name, 'max')
setcolorder(results_summary, summary_cols)
setkey(results_summary, model_id)
return(results_summary)
}
wtd.mean.trim <- function(x, w, trim, ...){
cutoff <- wtd.quantile(x, w, c(trim, 1- trim), normwt = TRUE)
include_index <- (x < cutoff[[2]]) & (x > cutoff[[1]])
trimmed_x <- x[include_index]
trimmed_w <- w[include_index]
wtd.mean(trimmed_x, trimmed_w, ...)
}
|
7deb6be63ab21719ca62df4b3cc3089e40d0c8c1
|
677145112960e3ae894785aa338b9b37871f076e
|
/Project1.R
|
d7e2d2c96bd0a1c640d5c8fb1045e627687c8a33
|
[] |
no_license
|
Nicolas-Andreas/stat-452-regression-machine-learning
|
512daa50303a25191ac49de85d974b9d2b22f17d
|
f953d41416f1bc62fbd68b2ac8691585bec8bbb3
|
refs/heads/master
| 2023-04-11T21:36:20.695778
| 2021-01-15T23:42:19
| 2021-01-15T23:42:19
| 367,553,796
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,726
|
r
|
Project1.R
|
#Project 1
get.folds = function(n, k) {
n.fold = ceiling(n / k)
fold.ids.raw = rep(1:k, times = n.fold)
fold.ids = fold.ids.raw[1:n]
folds.rand = fold.ids[sample.int(n)]
return(folds.rand)
}
getMSPE = function(y, y.hat) {
resid = y - y.hat
resid.sq = resid^2
SSPE = sum(resid.sq)
MSPE = SSPE / length(y)
return(MSPE)
}
rescale = function(x1, x2) {
for(col in 1:ncol(x1)) {
a = min(x2[,col])
b = max(x2[,col])
x1[,col] = (x1[,col] - a) / (b - a)
}
x1
}
data = read.csv("Data2020.csv")
pairs(data)
#Set up training and test set
n = nrow(data)
group1 = rep(1, times = round(n * 0.75))
group2 = rep(2, times = n - round(n * 0.75))
group.raw = c(group1, group2)
group = group.raw[sample.int(n)]
data.train = data[group == 1,]
data.valid = data[group == 2,]
y.valid = data.valid$Y
#Testing models
#Linear Regression
fit.lm = lm(Y ~ ., data = data.train)
pred.lm = predict(fit.lm, data.valid)
MSPE.lm = getMSPE(pred.lm, y.valid)
MSPE.lm
fit.lm = lm(Y ~ X12 + X4 + X2, data = data.train)
pred.lm = predict(fit.lm, data.valid)
MSPE.lm = getMSPE(pred.lm, y.valid)
MSPE.lm
fit.lm2 = lm(Y ~ .^2, data = data.train)
pred.lm2 = predict(fit.lm2, data.valid)
MSPE.lm2 = getMSPE(pred.lm2, y.valid)
MSPE.lm2
fit.start = lm(Y ~ 1, data = data)
fit.end = lm(Y ~ ., data = data)
#Stepwise Regression
step.BIC = step(fit.start, list(upper = fit.end), k = log(nrow(data.train)))
pred.BIC = predict(step.BIC, data.valid)
MSPE.BIC = getMSPE(y.valid, pred.BIC)
#GAM
library(mgcv)
fit.gam = gam(Y ~ s(X1) + s(X2) + s(X3) + X4 + s(X5) + s(X6) + s(X7) + s(X8) + s(X9) + X10
+ s(X11) + X12 + s(X13) + s(X14) + s(X15), data = data)
summary(fit.gam)
#all sub regression
library(leaps)
matrix = model.matrix(Y ~ ., data = data)
y = data$Y
all.subsets = regsubsets(x = matrix, y = y, nvmax = 20, intercept = FALSE)
info.subsets = summary(all.subsets)$which
n.models = nrow(info.subsets)
all.BIC = rep(0, times = n.models)
for(i in 1:n.models) {
this.data.matrix = matrix[,info.subsets[i,]]
fit = lm(y ~ this.data.matrix - 1)
this.BIC = extractAIC(fit, k = log(nrow(data)))[2]
all.BIC[i] = this.BIC
}
bestBIC = info.subsets[which.min(all.BIC),]
#Models with tuning
#Neural Nets
library(nnet)
nnetRep = 10
all.n.hidden = c(1, 3, 5, 7)
all.shrink = c(0.1, 0.5, 1, 2)
all.pars = expand.grid(n.hidden = all.n.hidden, shrink = all.shrink)
n.pars = nrow(all.pars)
K = 10
folds = get.folds(nrow(data), K)
CV.MSPEs = array(0, dim = c(K, n.pars))
for(i in 1:K) {
print(paste0(i, " of ", K))
data.train = data[folds != i,]
x.train.raw = data.train[, -1]
x.train = rescale(x.train.raw, x.train.raw)
y.train = data.train[, 1]
data.valid = data[folds == i,]
x.valid.raw = data.valid[, -1]
x.valid = rescale(x.valid.raw, x.train.raw)
y.valid = data.valid[, 1]
for(j in 1:n.pars) {
this.n.hidden = all.pars[j,1]
this.shrink = all.pars[i,2]
all.nnets = list(1:nnetRep)
all.SSEs = rep(0, times = nnetRep)
for(l in 1:nnetRep) {
fit.nnet = nnet(x.train, y.train, linout = TRUE, size = this.n.hidden, decay = this.shrink, maxit = 500, trace = FALSE)
SSE.nnet = fit.nnet$value
all.nnets[[l]] = fit.nnet
all.SSEs[l] = SSE.nnet
}
ind.best = which.min(all.SSEs)
fit.nnet.best = all.nnets[[ind.best]]
pred.nnet = predict(fit.nnet.best, x.valid)
MSPE.nnet = getMSPE(y.valid, pred.nnet)
CV.MSPEs[i, j] = MSPE.nnet
}
}
#Random forest
library(randomForest)
fit.rf = randomForest(Y ~ ., data = data.train, importance = T)
importance(fit.rf)
varImpPlot(fit.rf)
oob.pred = predict(fit.rf)
oob.MSPE = getMSPE(data$Y, oob.pred)
sample.pred = predict(fit.rf, data.valid)
sample.MSPE = getMSPE(y.valid, sample.pred)
all.mtry = 3:9
all.nodesize = c(2, 3, 5)
all.pars = expand.grid(mtry = all.mtry, nodesize = all.nodesize)
n.pars = nrow(all.pars)
M = 5
OOB.MSPEs = array(0, dim = c(M, n.pars))
for(i in 1:n.pars) {
print(paste0(i, " of ", n.pars))
this.mtry = all.pars[i, "mtry"]
this.nodesize = all.pars[i, "nodesize"]
for(j in 1:M) {
fit.rf = randomForest(Y ~ ., data = data, importance = FALSE, mtry = this.mtry, nodesize = this.nodesize)
OOB.pred = predict(fit.rf)
OOB.MSPE = getMSPE(data$Y, OOB.pred)
OOB.MSPEs[j, i] = OOB.MSPE
}
}
names.pars = paste0(all.pars$mtry, "-", all.pars$nodesize)
colnames(OOB.MSPEs) = names.pars
boxplot(OOB.MSPEs, las = 2)
OOB.RMSPEs = apply(OOB.MSPEs, 1, function(w) w/min(w))
OOB.RMSPEs = t(OOB.RMSPEs)
boxplot(OOB.RMSPEs, las = 2)
fit.rf.2 = randomForest(Y ~ ., data = data.train, importance = TRUE, mtry = 3, nodesize = 2)
plot(fit.rf.2)
varImpPlot(fit.rf.2)
sample.pred.2 = predict(fit.rf.2, data.valid)
sample.MSPE.2 = getMSPE(y.valid, sample.pred.2)
#CV Comparison
library(mgcv)
library(randomForest)
library(nnet)
data = read.csv("Data2020.csv")
set.seed(6232493)
n = nrow(data)
k = 20
folds = get.folds(n, k)
all.models = c("LS", "LSpart", "Step", "GAM", "RF", "NNET")
all.MSPEs = array(0, dim = c(k, length(all.models)))
colnames(all.MSPEs) = all.models
max.terms = 15
for(i in 1:k) {
print(paste0(i, " of ", k))
data.train = data[folds != i,]
x.train.raw = data.train[, -1]
x.train = rescale(x.train.raw, x.train.raw)
y.train = data.train[, 1]
data.valid = data[folds == i,]
x.valid.raw = data.valid[, -1]
x.valid = rescale(x.valid.raw, x.train.raw)
n.train = nrow(data.train)
y.train = data.train$Y
y.valid = data.valid$Y
fit.ls = lm(Y ~ ., data = data.train)
pred.ls = predict(fit.ls, newdata = data.valid)
MSPE.ls = getMSPE(y.valid, pred.ls)
all.MSPEs[i, "LS"] = MSPE.ls
fit.ls.part = lm(Y ~ X12 + X4 + X2, data = data.train)
pred.ls.part = predict(fit.ls.part, newdata = data.valid)
MSPE.ls.part = getMSPE(y.valid, pred.ls.part)
all.MSPEs[i, "LSpart"] = MSPE.ls.part
fit.gam = gam(Y ~ s(X1) + s(X2) + s(X3) + X4 + s(X5) + s(X6) + s(X7) + s(X8) + s(X9) + X10
+ s(X11) + X12 + s(X13) + s(X14) + s(X15), data = data.train)
pred.gam = predict(fit.gam, data.valid)
MSPE.gam = getMSPE(y.valid, pred.gam)
all.MSPEs[i, "GAM"] = MSPE.gam
fit.start = lm(Y ~ 1, data = data)
fit.end = lm(Y ~ ., data = data)
step.BIC = step(fit.start, list(upper = fit.end), k = log(nrow(data.train)), trace = FALSE)
pred.BIC = predict(step.BIC, data.valid)
MSPE.BIC = getMSPE(y.valid, pred.BIC)
all.MSPEs[i, "Step"] = MSPE.BIC
fit.rf.7.3 = randomForest(Y ~ ., data = data.train, importance = TRUE, mtry = 7, nodesize = 3)
sample.pred.7.3 = predict(fit.rf.7.3, data.valid)
sample.MSPE.7.3 = getMSPE(y.valid, sample.pred.7.3)
all.MSPEs[i, "RF"] = sample.MSPE.7.3
all.nnets = list(1:nnetRep)
all.SSEs = rep(0, times = nnetRep)
for(l in 1:nnetRep) {
fit.nnet = nnet(x.train, y.train, linout = TRUE, size = 1, decay = 0.1, maxit = 500, trace = FALSE)
SSE.nnet = fit.nnet$value
all.nnets[[l]] = fit.nnet
all.SSEs[l] = SSE.nnet
}
ind.best = which.min(all.SSEs)
fit.nnet.best = all.nnets[[ind.best]]
pred.nnet = predict(fit.nnet.best, x.valid)
MSPE.nnet = getMSPE(y.valid, pred.nnet)
all.MSPEs[i, "NNET"] = MSPE.nnet
}
boxplot(all.MSPEs)
all.RMSPE = apply(all.MSPEs, 1, function(w) {
best = min(w)
return(w / best)
})
all.RMSPE = t(all.RMSPE)
boxplot(all.RMSPE)
#Prediction
library(mgcv)
testData2020 = read.csv("Data2020testX.csv")
set.seed(4828347)
fit.gam = gam(Y ~ s(X1) + s(X2) + s(X3) + X4 + s(X5) + s(X6) + s(X7) + s(X8) + s(X9) + X10
+ s(X11) + X12 + s(X13) + s(X14) + s(X15), data = data)
pred.gam = predict(fit.gam, testData2020)
write.table(pred.gam, "Project1Prediction.txt", sep = ",", row.names = F, col.names =
F)
|
9dae96a7ecc69c604161bd1a60c107ad45141ca2
|
c5fac476b276f2d1c65547ec4f89292d3abf8ba8
|
/man/print.summary.complmrob.Rd
|
95d22b81c4c6881367dbd60abe17e5b814852b67
|
[] |
no_license
|
dakep/complmrob
|
1f3090343de2cb6319b87fae289047ff60049b92
|
c904ac453cb501417acc1bb7ec41a3c80ecc4015
|
refs/heads/master
| 2020-05-09T12:11:05.675243
| 2019-09-17T18:25:06
| 2019-09-17T18:25:06
| 181,104,338
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 634
|
rd
|
print.summary.complmrob.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary-methods.R
\name{print.summary.complmrob}
\alias{print.summary.complmrob}
\title{Print the summary information}
\usage{
\method{print}{summary.complmrob}(x, digits = max(3, getOption("digits")
- 3), signif.stars = getOption("show.signif.stars"), ...)
}
\arguments{
\item{x}{the summary object.}
\item{digits}{the number of digits for the reported figures}
\item{signif.stars}{should stars be displayed to show the significance of certain figures}
\item{...}{further arguments currently not used}
}
\description{
Print the summary information
}
|
2dfe90bfcb3ab421f38340413a5c31aebf20fafe
|
b2e2db0e13cad433a29dd4f0f46e29be62133190
|
/R/search_coefs_server.R
|
60dd067fa20abf46f4493fc7974ff849b8d1e0fc
|
[] |
no_license
|
JARS3N/LLP
|
e6cf41438255c88ac6fc02363f8a52e3e95e88e9
|
aca875abeede2a11642082c5a2eff60dd99f4c22
|
refs/heads/master
| 2023-02-09T20:19:58.910563
| 2023-01-21T20:35:31
| 2023-01-21T20:35:31
| 105,937,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,079
|
r
|
search_coefs_server.R
|
search_coefs_server <- function() {
require(shiny)
require(dplyr)
lotstuff <- LLP::coef_lots()
shinyServer(function(input, output, session) {
updateSelectInput(session, 'Lot', choices = lotstuff$Lot)
observeEvent(input$Lot, {
if (input$Lot != 'SELECT'){
BMID <- lotstuff$BMID[lotstuff$LotNumber == input$Lot]
print(BMID)
info <- LLP::get_coefs(BMID)
output$Lottable <- renderTable(data.frame(Lot = input$Lot))
output$oxtable <- renderTable(select(info, contains('PH')) %>%
mutate(PH_A = as.character(round(PH_A, 0))),
digits = 6)
output$pHtable <- renderTable(select(info, contains('O2')) %>%
mutate(O2_A = as.character(round(O2_A, 0))),
digits = 6)
if(info$BF==0){
output$bftbl <- renderTable(data.frame( Cartridge_BufferFactor = NA),digits=0)
}else{
output$bftbl <-
renderTable(select(info, Cartridge_BufferFactor = BF), digits = 6)
}
}
})
})
}
|
4b8f94053c67d36e467bb9621e07d1082809cdc4
|
5f8da4d4d01c6947759af8db517cf295980bfc11
|
/stppResid/R/print.stgrid.R
|
47b1046bf3404c8c1fd75c360481af192025f092
|
[] |
no_license
|
r-clements/stppResid
|
8f3042a12c0189ccd28764f9ad6fba29a00c79ea
|
471286070dc4a4866860a4e82da656d39ce8ce01
|
refs/heads/master
| 2021-01-23T05:44:32.818058
| 2018-06-06T03:36:09
| 2018-06-06T03:36:09
| 5,900,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 79
|
r
|
print.stgrid.R
|
print.stgrid <- function(x, ...)
{
cat("Spatial grid\n")
print(x$grid.full)
}
|
ef7378fc58a466b14345ba34967cd8a4083f1483
|
10c97b033b7d93d500a4dd563234eef128dc43ab
|
/tests/testthat/www.fleaflicker.com/api/FetchLeagueTransactions-0618ef.R
|
e805c967c8255b55c9b9d6ae465292116f1f0c4d
|
[
"MIT"
] |
permissive
|
tonyelhabr/ffscrapr
|
f38e7c87bb65ddbf6e1c9736c16e56944760af46
|
4e0944da56d8890c441c4abe9c25bc2477a1e388
|
refs/heads/main
| 2023-03-10T08:48:01.840281
| 2020-12-16T06:19:07
| 2020-12-16T06:19:07
| 328,791,006
| 0
| 0
|
NOASSERTION
| 2021-01-11T23:59:24
| 2021-01-11T21:03:44
| null |
UTF-8
|
R
| false
| false
| 113,258
|
r
|
FetchLeagueTransactions-0618ef.R
|
structure(list(
url = "https://www.fleaflicker.com/api/FetchLeagueTransactions?sport=NFL&league_id=206154&team_id=1373475&result_offset=120",
status_code = 200L, headers = structure(list(
date = "Tue, 24 Nov 2020 01:19:57 GMT",
`content-type` = "application/json;charset=utf-8", vary = "accept-encoding",
`content-encoding` = "gzip"
), class = c(
"insensitive",
"list"
)), all_headers = list(list(
status = 200L, version = "HTTP/2",
headers = structure(list(
date = "Tue, 24 Nov 2020 01:19:57 GMT",
`content-type` = "application/json;charset=utf-8",
vary = "accept-encoding", `content-encoding` = "gzip"
), class = c(
"insensitive",
"list"
))
)), cookies = structure(list(
domain = logical(0),
flag = logical(0), path = logical(0), secure = logical(0),
expiration = structure(numeric(0), class = c(
"POSIXct",
"POSIXt"
)), name = logical(0), value = logical(0)
), row.names = integer(0), class = "data.frame"),
content = charToRaw("{\"items\":[{\"timeEpochMilli\":\"1588029157000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":12244,\"nameFull\":\"Joe Walker\",\"nameShort\":\"J. Walker\",\"proTeamAbbreviation\":\"SF\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12244.png\",\"nflByeWeek\":11,\"injury\":{\"typeAbbreviaition\":\"CVD\",\"description\":\"Undisclosed\",\"severity\":\"OUT\",\"typeFull\":\"COVID-19\"},\"nameFirst\":\"Joe\",\"nameLast\":\"Walker\",\"proTeam\":{\"abbreviation\":\"SF\",\"location\":\"San Francisco\",\"name\":\"49ers\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":754,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":136,\"formatted\":\"136\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":3.75,\"formatted\":\"3.75\"},\"duration\":3},{\"value\":{\"value\":3.75,\"formatted\":\"3.75\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.5,\"formatted\":\"7.5\"},\"seasonAverage\":{\"value\":3.75,\"formatted\":\"3.75\"},\"seasonsStandartDeviation\":{\"value\":2.25,\"formatted\":\"2.25\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1588028878000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":9445,\"nameFull\":\"Theo Riddick\",\"nameShort\":\"T. Riddick\",\"proTeamAbbreviation\":\"LV\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/9445.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"CVD\",\"description\":\"Undisclosed\",\"severity\":\"OUT\",\"typeFull\":\"COVID-19\"},\"news\":[{\"timeEpochMilli\":\"1606015474000\",\"contents\":\"Riddick (undisclosed) was placed on the reserve/COVID-19 list Saturday, Paul Gutierrez of ESPN.com reports.\",\"analysis\":\"The move means Riddick either tested positive for the virus or was in close contact with an infected individual. The 29-year-old will be unavailable until he clears the league's COVID-19 protocols.\",\"title\":\"Shifts to COVID list\"}],\"nameFirst\":\"Theo\",\"nameLast\":\"Riddick\",\"proTeam\":{\"abbreviation\":\"LV\",\"location\":\"Las Vegas\",\"name\":\"Raiders\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6310,\"away\":{\"abbreviation\":\"KC\",\"location\":\"Kansas City\",\"name\":\"Chiefs\"},\"home\":{\"abbreviation\":\"LV\",\"location\":\"Las Vegas\",\"name\":\"Raiders\"},\"startTimeEpochMilli\":\"1606094400000\",\"status\":\"FINAL_SCORE\",\"awayScore\":35,\"homeScore\":31,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":884,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":150,\"formatted\":\"150\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.8,\"formatted\":\"1.8\"},\"duration\":1},{\"value\":{\"value\":1.8,\"formatted\":\"1.8\"},\"duration\":3},{\"value\":{\"value\":1.8,\"formatted\":\"1.8\"},\"duration\":5}],\"seasonTotal\":{\"value\":1.8,\"formatted\":\"1.8\"},\"seasonAverage\":{\"value\":1.8,\"formatted\":\"1.8\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1588028448000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":8532,\"nameFull\":\"Tavon Wilson\",\"nameShort\":\"T. Wilson\",\"proTeamAbbreviation\":\"IND\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/8532.png\",\"nflByeWeek\":7,\"nameFirst\":\"Tavon\",\"nameLast\":\"Wilson\",\"proTeam\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6305,\"away\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"home\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.49,\"formatted\":\"0.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.26,\"formatted\":\"1.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":0.49,\"formatted\":\"0.5\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":1.26,\"formatted\":\"1.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":571,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":91,\"formatted\":\"91\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":4.8,\"formatted\":\"4.8\"},\"duration\":5}],\"seasonTotal\":{\"value\":24.0,\"formatted\":\"24\"},\"seasonAverage\":{\"value\":4.8,\"formatted\":\"4.8\"},\"seasonsStandartDeviation\":{\"value\":6.6,\"formatted\":\"6.6\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1588028306000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13956,\"nameFull\":\"Dylan Cantrell\",\"nameShort\":\"D. Cantrell\",\"proTeamAbbreviation\":\"FA\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13956.png\",\"nameFirst\":\"Dylan\",\"nameLast\":\"Cantrell\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"WR\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1588028140000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":6595,\"nameFull\":\"Morgan Burnett\",\"nameShort\":\"M. Burnett\",\"proTeamAbbreviation\":\"FA\",\"position\":\"S\",\"nameFirst\":\"Morgan\",\"nameLast\":\"Burnett\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"S\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1588027883000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":13869,\"nameFull\":\"Antonio Callaway\",\"nameShort\":\"A. Callaway\",\"proTeamAbbreviation\":\"MIA\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13869.png\",\"nflByeWeek\":7,\"nameFirst\":\"Antonio\",\"nameLast\":\"Callaway\",\"proTeam\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6413,\"away\":{\"abbreviation\":\"MIA\",\"location\":\"Miami\",\"name\":\"Dolphins\"},\"home\":{\"abbreviation\":\"DEN\",\"location\":\"Denver\",\"name\":\"Broncos\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":13,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":13.0,\"formatted\":\"13\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.48,\"formatted\":\"15.5\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.05,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.48,\"formatted\":\"15.5\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.02,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.05,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374271,\"name\":\"Clutch City Ballers\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374271_0_150x150.jpg\",\"initials\":\"CC\"},\"displayGroup\":\"RECEIVER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1588027868000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":3986,\"nameFull\":\"Stephen Gostkowski\",\"nameShort\":\"S. Gostkowski\",\"proTeamAbbreviation\":\"TEN\",\"position\":\"K\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/3986.png\",\"news\":[{\"timeEpochMilli\":\"1606149119000\",\"contents\":\"Gostkowski made all three of his field-goal attempts and his only extra-point attempt in Week 11 against the Ravens.\",\"analysis\":\"Gostkowski has been inconsistent throughout the season, but delivered three field goals to help the Titans to a win. After connecting on two 40-yard attempts, Gostkowski has now hit 10 of his 16 attempts from 40 yards or more on the campaign. Despite his struggles, the Titans have shown little motivation to bring in competition at the kicker position meaning that Gostkowski will look to build on this perfect effort in Week 12 against the Colts.\",\"title\":\"Has perfect day\"}],\"nameFirst\":\"Stephen\",\"nameLast\":\"Gostkowski\",\"proTeam\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"positionEligibility\":[\"K\"]},\"requestedGames\":[{\"game\":{\"id\":6301,\"away\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"home\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":30,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"},\"value\":{\"value\":3.0,\"formatted\":\"3\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"},\"value\":{\"value\":3.0,\"formatted\":\"3\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}}],\"statsProjected\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"},\"value\":{\"value\":1.86,\"formatted\":\"1.9\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"},\"value\":{\"value\":1.78,\"formatted\":\"1.8\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"},\"value\":{\"value\":1.78,\"formatted\":\"1.8\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"},\"value\":{\"value\":1.86,\"formatted\":\"1.9\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"},\"value\":{\"value\":1.78,\"formatted\":\"1.8\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"},\"value\":{\"value\":1.78,\"formatted\":\"1.8\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373991,\"name\":\"Top City Terrors\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373991_0_150x150.jpg\",\"initials\":\"TC\"},\"displayGroup\":\"KICKER\",\"rankFantasy\":{\"ordinal\":296,\"positions\":[{\"position\":{\"label\":\"K\",\"group\":\"START\",\"eligibility\":[\"K\"],\"colors\":[\"DRAFT_BOARD_GRAY\"]},\"ordinal\":23,\"formatted\":\"23\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.0,\"formatted\":\"7\"},\"duration\":1},{\"value\":{\"value\":5.33,\"formatted\":\"5.33\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":4.8,\"formatted\":\"4.8\"},\"duration\":5}],\"seasonTotal\":{\"value\":69.0,\"formatted\":\"69\"},\"seasonAverage\":{\"value\":7.6666665,\"formatted\":\"7.67\"},\"seasonsStandartDeviation\":{\"value\":6.896054,\"formatted\":\"6.9\"},\"seasonConsistency\":\"RATING_VERY_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1576666800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13114,\"nameFull\":\"Chuck Clark\",\"nameShort\":\"C. Clark\",\"proTeamAbbreviation\":\"BAL\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13114.png\",\"nflByeWeek\":7,\"nameFirst\":\"Chuck\",\"nameLast\":\"Clark\",\"proTeam\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"game\":{\"id\":6301,\"away\":{\"abbreviation\":\"TEN\",\"location\":\"Tennessee\",\"name\":\"Titans\"},\"home\":{\"abbreviation\":\"BAL\",\"location\":\"Baltimore\",\"name\":\"Ravens\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":30,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":4.0,\"formatted\":\"4\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.82,\"formatted\":\"1.8\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.3,\"formatted\":\"2.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.16,\"formatted\":\"0.2\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":1.82,\"formatted\":\"1.8\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":2.3,\"formatted\":\"2.3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.16,\"formatted\":\"0.2\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374255,\"name\":\"Mushroom City Karts\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374255_0_150x150.jpg\",\"initials\":\"MC\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":118,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":13,\"formatted\":\"13\",\"rating\":\"RATING_GOOD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.5,\"formatted\":\"7.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":13.33,\"formatted\":\"13.33\"},\"duration\":3},{\"value\":{\"value\":13.02,\"formatted\":\"13.02\"},\"duration\":5}],\"seasonTotal\":{\"value\":106.1,\"formatted\":\"106.1\"},\"seasonAverage\":{\"value\":11.788889,\"formatted\":\"11.79\"},\"seasonsStandartDeviation\":{\"value\":6.3360424,\"formatted\":\"6.34\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1576666800000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":10249,\"nameFull\":\"Jimmie Ward\",\"nameShort\":\"J. Ward\",\"proTeamAbbreviation\":\"SF\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/10249.png\",\"nflByeWeek\":11,\"nameFirst\":\"Jimmie\",\"nameLast\":\"Ward\",\"proTeam\":{\"abbreviation\":\"SF\",\"location\":\"San Francisco\",\"name\":\"49ers\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":334,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":58,\"formatted\":\"58\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":12.0,\"formatted\":\"12\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":8.0,\"formatted\":\"8\"},\"duration\":3},{\"value\":{\"value\":7.5,\"formatted\":\"7.5\"},\"duration\":5}],\"seasonTotal\":{\"value\":60.5,\"formatted\":\"60.5\"},\"seasonAverage\":{\"value\":6.7222223,\"formatted\":\"6.72\"},\"seasonsStandartDeviation\":{\"value\":2.4845192,\"formatted\":\"2.48\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1576666800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13616,\"nameFull\":\"Greg Ward\",\"nameShort\":\"G. Ward\",\"proTeamAbbreviation\":\"PHI\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13616.png\",\"nflByeWeek\":9,\"news\":[{\"timeEpochMilli\":\"1606082531000\",\"contents\":\"Philadelphia Eagles wide receiver Greg Ward caught three passes, gaining just nine yards in a Week 11 loss to the Cleveland Browns. Ward was targeted four times and basically caught the ball and fell down each time he made a catch. He's been useful on the NFL field for short completions and check downs, but hasn't made much noise in fantasy leagues this season. That role will likely continue throughout the rest of the season, but Week 12 against the struggling secondary of the Seahawks ...\",\"url\":\"https://www.rotoballer.com/player-news/greg-ward-catches-three-passes-in-a-week-11-loss/806804\",\"title\":\"Greg Ward Catches Three Passes In A Week 11 Loss\"}],\"nameFirst\":\"Greg\",\"nameLast\":\"Ward\",\"proTeam\":{\"abbreviation\":\"PHI\",\"location\":\"Philadelphia\",\"name\":\"Eagles\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6303,\"away\":{\"abbreviation\":\"PHI\",\"location\":\"Philadelphia\",\"name\":\"Eagles\"},\"home\":{\"abbreviation\":\"CLE\",\"location\":\"Cleveland\",\"name\":\"Browns\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":17,\"homeScore\":22,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":75.0,\"formatted\":\"3/4\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":9.0,\"formatted\":\"9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":34.23,\"formatted\":\"34.2\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.15,\"formatted\":\"0.1\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":34.23,\"formatted\":\"34.2\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"value\":0.01,\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.15,\"formatted\":\"0.1\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374255,\"name\":\"Mushroom City Karts\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374255_0_150x150.jpg\",\"initials\":\"MC\"},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":248,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":60,\"formatted\":\"60\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.4,\"formatted\":\"7.4\"},\"duration\":1},{\"value\":{\"value\":9.32,\"formatted\":\"9.32\"},\"duration\":3},{\"value\":{\"value\":8.7,\"formatted\":\"8.7\"},\"duration\":5}],\"seasonTotal\":{\"value\":77.40001,\"formatted\":\"77.4\"},\"seasonAverage\":{\"value\":8.600001,\"formatted\":\"8.6\"},\"seasonsStandartDeviation\":{\"value\":5.115715,\"formatted\":\"5.12\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1576666800000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":7561,\"nameFull\":\"Bilal Powell\",\"nameShort\":\"B. Powell\",\"proTeamAbbreviation\":\"FA\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/7561.png\",\"nameFirst\":\"Bilal\",\"nameLast\":\"Powell\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"RB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1576666800000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13324,\"nameFull\":\"Younghoe Koo\",\"nameShort\":\"Y. Koo\",\"proTeamAbbreviation\":\"ATL\",\"position\":\"K\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13324.png\",\"nflByeWeek\":10,\"news\":[{\"timeEpochMilli\":\"1606111513000\",\"contents\":\"Koo went 3-for-3 on field-goal attempts during Sunday's 24-9 loss to the Saints.\",\"analysis\":\"During a game in which Atlanta made just one trip to New Orleans' red zone, Koo provided all nine of the Falcons' points, succeeding on attempts from 28, 51 and 52 yards out. With his incredible 96 percent hit rate on field-goal tries, the 26-year-old leads the NFL with 24 conversions despite missing one game this season. Koo is a perfect 5-for-5 from 50-plus yards as Atlanta prepares for a Week 12 matchup against the Raiders.\",\"title\":\"Sinks all three of his kicks\"},{\"timeEpochMilli\":\"1606094844000\",\"contents\":\"Atlanta Falcons kicker Younghoe Koo made all three of his field-goal attempts in a 24-9 loss to the New Orleans Saints. Koo has now made multiple field goals in eight of nine contests this year and remains a top-five option at the position ahead of a Week 12 matchup with the Los Angeles Chargers. Los Angeles has surrendered the sixth-most fantasy points per game to opposing kickers.\",\"url\":\"https://www.rotoballer.com/player-news/younghoe-koo-nails-three-field-goals-in-loss/806930\",\"title\":\"Younghoe Koo Nails Three Field Goals In Loss\"}],\"nameFirst\":\"Younghoe\",\"nameLast\":\"Koo\",\"proTeam\":{\"abbreviation\":\"ATL\",\"location\":\"Atlanta\",\"name\":\"Falcons\"},\"positionEligibility\":[\"K\"]},\"requestedGames\":[{\"game\":{\"id\":6304,\"away\":{\"abbreviation\":\"ATL\",\"location\":\"Atlanta\",\"name\":\"Falcons\"},\"home\":{\"abbreviation\":\"NO\",\"location\":\"New Orleans\",\"name\":\"Saints\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":24,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"},\"value\":{\"value\":3.0,\"formatted\":\"3\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"},\"value\":{\"value\":3.0,\"formatted\":\"3\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"},\"value\":{\"value\":2.23,\"formatted\":\"2.2\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"},\"value\":{\"value\":2.3,\"formatted\":\"2.3\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"},\"value\":{\"value\":2.3,\"formatted\":\"2.3\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":101,\"abbreviation\":\"FG\",\"nameSingular\":\"Field Goal Made\",\"namePlural\":\"Field Goals Made\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":107,\"abbreviation\":\"Att\",\"nameSingular\":\"Field Goal Attempt\",\"namePlural\":\"Field Goal Attempts\"},\"value\":{\"value\":2.23,\"formatted\":\"2.2\"}},{\"category\":{\"id\":104,\"abbreviation\":\"XP\",\"nameSingular\":\"XP\",\"namePlural\":\"XPs\"},\"value\":{\"value\":2.3,\"formatted\":\"2.3\"}},{\"category\":{\"id\":108,\"abbreviation\":\"Att\",\"nameSingular\":\"Extra Point Attempt\",\"namePlural\":\"Extra Point Attempts\"},\"value\":{\"value\":2.3,\"formatted\":\"2.3\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"displayGroup\":\"KICKER\",\"rankFantasy\":{\"ordinal\":177,\"positions\":[{\"position\":{\"label\":\"K\",\"group\":\"START\",\"eligibility\":[\"K\"],\"colors\":[\"DRAFT_BOARD_GRAY\"]},\"ordinal\":6,\"formatted\":\"6\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":12.5,\"formatted\":\"12.5\"},\"duration\":1},{\"value\":{\"value\":8.83,\"formatted\":\"8.83\"},\"duration\":3},{\"value\":{\"value\":11.9,\"formatted\":\"11.9\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":91.5,\"formatted\":\"91.5\"},\"seasonAverage\":{\"value\":11.4375,\"formatted\":\"11.44\"},\"seasonsStandartDeviation\":{\"value\":5.346947,\"formatted\":\"5.35\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1576062000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":12244,\"nameFull\":\"Joe Walker\",\"nameShort\":\"J. Walker\",\"proTeamAbbreviation\":\"SF\",\"position\":\"LB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12244.png\",\"nflByeWeek\":11,\"injury\":{\"typeAbbreviaition\":\"CVD\",\"description\":\"Undisclosed\",\"severity\":\"OUT\",\"typeFull\":\"COVID-19\"},\"nameFirst\":\"Joe\",\"nameLast\":\"Walker\",\"proTeam\":{\"abbreviation\":\"SF\",\"location\":\"San Francisco\",\"name\":\"49ers\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":754,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":136,\"formatted\":\"136\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":3.75,\"formatted\":\"3.75\"},\"duration\":3},{\"value\":{\"value\":3.75,\"formatted\":\"3.75\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.5,\"formatted\":\"7.5\"},\"seasonAverage\":{\"value\":3.75,\"formatted\":\"3.75\"},\"seasonsStandartDeviation\":{\"value\":2.25,\"formatted\":\"2.25\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1576062000000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11557,\"nameFull\":\"Neville Hewitt\",\"nameShort\":\"N. Hewitt\",\"proTeamAbbreviation\":\"NYJ\",\"position\":\"LB\",\"nflByeWeek\":10,\"news\":[{\"timeEpochMilli\":\"1606101993000\",\"contents\":\"Hewitt racked up 11 tackles (nine solo) and a tackle for loss in Sunday's 34-28 loss to the Chargers.\",\"analysis\":\"Hewitt was one of three Jets defenders to record double-digit tackles as the Chargers dominated time of possession, joining Harvey Langi and Ashtyn Davis. The 27-year-old linebacker has already set a new career high with 85 tackles and still has six games left to build on that total, starting with a Week 12 tilt against Miami.\",\"title\":\"Climbs to career-best 85 tackles\"}],\"nameFirst\":\"Neville\",\"nameLast\":\"Hewitt\",\"proTeam\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"positionEligibility\":[\"LB\",\"LB\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":2.0,\"formatted\":\"2\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":9.0,\"formatted\":\"9\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"statsProjected\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":3.21,\"formatted\":\"3.2\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":3.02,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"value\":3.21,\"formatted\":\"3.2\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":3.02,\"formatted\":\"3\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"value\":0.04,\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373393,\"name\":\"Philadelphia Fire\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373393_0_150x150.jpg\",\"initials\":\"PF\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":167,\"positions\":[{\"position\":{\"label\":\"LB\",\"group\":\"START\",\"eligibility\":[\"LB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":36,\"formatted\":\"36\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":8.5,\"formatted\":\"8.5\"},\"duration\":1},{\"value\":{\"value\":11.0,\"formatted\":\"11\"},\"duration\":3},{\"value\":{\"value\":10.9,\"formatted\":\"10.9\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":94.0,\"formatted\":\"94\"},\"seasonAverage\":{\"value\":10.444445,\"formatted\":\"10.44\"},\"seasonsStandartDeviation\":{\"value\":3.130888,\"formatted\":\"3.13\"},\"seasonConsistency\":\"RATING_VERY_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1576062000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":10249,\"nameFull\":\"Jimmie Ward\",\"nameShort\":\"J. Ward\",\"proTeamAbbreviation\":\"SF\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/10249.png\",\"nflByeWeek\":11,\"nameFirst\":\"Jimmie\",\"nameLast\":\"Ward\",\"proTeam\":{\"abbreviation\":\"SF\",\"location\":\"San Francisco\",\"name\":\"49ers\"},\"positionEligibility\":[\"S\"]},\"requestedGames\":[{\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"isBye\":true,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":334,\"positions\":[{\"position\":{\"label\":\"S\",\"group\":\"START\",\"eligibility\":[\"S\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":58,\"formatted\":\"58\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":12.0,\"formatted\":\"12\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":8.0,\"formatted\":\"8\"},\"duration\":3},{\"value\":{\"value\":7.5,\"formatted\":\"7.5\"},\"duration\":5}],\"seasonTotal\":{\"value\":60.5,\"formatted\":\"60.5\"},\"seasonAverage\":{\"value\":6.7222223,\"formatted\":\"6.72\"},\"seasonsStandartDeviation\":{\"value\":2.4845192,\"formatted\":\"2.48\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1576062000000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":4945,\"nameFull\":\"Eric Weddle\",\"nameShort\":\"E. Weddle\",\"proTeamAbbreviation\":\"FA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/4945.png\",\"nameFirst\":\"Eric\",\"nameLast\":\"Weddle\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"S\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1576062000000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13851,\"nameFull\":\"Justin Watson\",\"nameShort\":\"J. Watson\",\"proTeamAbbreviation\":\"TB\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13851.png\",\"nflByeWeek\":13,\"nameFirst\":\"Justin\",\"nameLast\":\"Watson\",\"proTeam\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6311,\"away\":{\"abbreviation\":\"LAR\",\"location\":\"Los Angeles\",\"name\":\"Rams\"},\"home\":{\"abbreviation\":\"TB\",\"location\":\"Tampa Bay\",\"name\":\"Buccaneers\"},\"startTimeEpochMilli\":\"1606180500000\",\"status\":\"IN_PROGRESS\",\"segment\":1,\"segmentSecondsRemaining\":790,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"stateFootball\":{\"down\":2,\"distance\":8,\"fieldLine\":40,\"fieldLineAbsolute\":40,\"description\":\"2nd & 8 at TB 40\"}},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"statsProjected\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":2.86,\"formatted\":\"2.9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"hasPossession\":true}],\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"formatted\":\"0/0\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":2.86,\"formatted\":\"2.9\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.02,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":515,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":123,\"formatted\":\"123\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":13.5,\"formatted\":\"13.5\"},\"duration\":1,\"overPerforming\":true},{\"value\":{\"value\":7.38,\"formatted\":\"7.38\"},\"duration\":3},{\"value\":{\"value\":6.41,\"formatted\":\"6.41\"},\"duration\":5}],\"seasonTotal\":{\"value\":32.05,\"formatted\":\"32.05\"},\"seasonAverage\":{\"value\":6.41,\"formatted\":\"6.41\"},\"seasonsStandartDeviation\":{\"value\":4.248106,\"formatted\":\"4.25\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1576062000000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":14823,\"nameFull\":\"KeeSean Johnson\",\"nameShort\":\"K. Johnson\",\"proTeamAbbreviation\":\"ARI\",\"position\":\"WR\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/14823.png\",\"nflByeWeek\":8,\"injury\":{\"typeAbbreviaition\":\"OUT\",\"description\":\"Coach's Decision\",\"severity\":\"OUT\",\"typeFull\":\"Out\"},\"nameFirst\":\"KeeSean\",\"nameLast\":\"Johnson\",\"proTeam\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"positionEligibility\":[\"WR\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":26,\"abbreviation\":\"Fum\",\"nameSingular\":\"Fumble\",\"namePlural\":\"Fumbles\",\"lowerIsBetter\":true}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RECEIVER\",\"rankFantasy\":{\"ordinal\":792,\"positions\":[{\"position\":{\"label\":\"WR\",\"group\":\"START\",\"eligibility\":[\"WR\"],\"colors\":[\"DRAFT_BOARD_BLUE\"]},\"ordinal\":179,\"formatted\":\"179\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":2.7,\"formatted\":\"2.7\"},\"duration\":3},{\"value\":{\"value\":2.7,\"formatted\":\"2.7\"},\"duration\":5}],\"seasonTotal\":{\"value\":5.4,\"formatted\":\"5.4\"},\"seasonAverage\":{\"value\":2.7,\"formatted\":\"2.7\"},\"seasonsStandartDeviation\":{\"value\":1.2,\"formatted\":\"1.2\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1576027382000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":7592,\"nameFull\":\"Tyrod Taylor\",\"nameShort\":\"T. Taylor\",\"proTeamAbbreviation\":\"LAC\",\"position\":\"QB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/7592.png\",\"nflByeWeek\":6,\"news\":[{\"timeEpochMilli\":\"1606074142000\",\"contents\":\"Taylor (ribs) is active for Sunday's game against the Jets.\",\"analysis\":\"The veteran was considered questionable with the injury to his ribs, but he'll be suiting up for Sunday's contest. Taylor will continue to serve as the backup to starter Justin Herbert.\",\"title\":\"Active Week 11\"}],\"nameFirst\":\"Tyrod\",\"nameLast\":\"Taylor\",\"proTeam\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"positionEligibility\":[\"QB\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"statsProjected\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":12.0,\"formatted\":\"12\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"viewingProjectedStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"value\":100.0,\"formatted\":\"100.0\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":12.0,\"formatted\":\"12\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"PASSER\",\"rankFantasy\":{\"ordinal\":753,\"positions\":[{\"position\":{\"label\":\"QB\",\"group\":\"START\",\"eligibility\":[\"QB\"],\"colors\":[\"DRAFT_BOARD_RED\"]},\"ordinal\":44,\"formatted\":\"44\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.52,\"formatted\":\"7.52\"},\"duration\":1},{\"value\":{\"value\":7.52,\"formatted\":\"7.52\"},\"duration\":3},{\"value\":{\"value\":7.52,\"formatted\":\"7.52\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.5199995,\"formatted\":\"7.52\"},\"seasonAverage\":{\"value\":7.5199995,\"formatted\":\"7.52\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1575594091000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":4945,\"nameFull\":\"Eric Weddle\",\"nameShort\":\"E. Weddle\",\"proTeamAbbreviation\":\"FA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/4945.png\",\"nameFirst\":\"Eric\",\"nameLast\":\"Weddle\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"S\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1575594091000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11861,\"nameFull\":\"Quinton Dunbar\",\"nameShort\":\"Q. Dunbar\",\"proTeamAbbreviation\":\"SEA\",\"position\":\"CB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11861.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"IR\",\"description\":\"Knee\",\"severity\":\"OUT\",\"typeFull\":\"Injured Reserve\"},\"nameFirst\":\"Quinton\",\"nameLast\":\"Dunbar\",\"proTeam\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"positionEligibility\":[\"CB\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":240,\"positions\":[{\"position\":{\"label\":\"CB\",\"group\":\"START\",\"eligibility\":[\"CB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":43,\"formatted\":\"43\",\"rating\":\"RATING_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":9.0,\"formatted\":\"9\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":10.67,\"formatted\":\"10.67\"},\"duration\":3},{\"value\":{\"value\":13.3,\"formatted\":\"13.3\"},\"duration\":5}],\"seasonTotal\":{\"value\":78.5,\"formatted\":\"78.5\"},\"seasonAverage\":{\"value\":13.083333,\"formatted\":\"13.08\"},\"seasonsStandartDeviation\":{\"value\":5.747585,\"formatted\":\"5.75\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1575457200000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":11861,\"nameFull\":\"Quinton Dunbar\",\"nameShort\":\"Q. Dunbar\",\"proTeamAbbreviation\":\"SEA\",\"position\":\"CB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11861.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"IR\",\"description\":\"Knee\",\"severity\":\"OUT\",\"typeFull\":\"Injured Reserve\"},\"nameFirst\":\"Quinton\",\"nameLast\":\"Dunbar\",\"proTeam\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"positionEligibility\":[\"CB\"]},\"requestedGames\":[{\"game\":{\"id\":6299,\"away\":{\"abbreviation\":\"ARI\",\"location\":\"Arizona\",\"name\":\"Cardinals\"},\"home\":{\"abbreviation\":\"SEA\",\"location\":\"Seattle\",\"name\":\"Seahawks\"},\"startTimeEpochMilli\":\"1605835200000\",\"status\":\"FINAL_SCORE\",\"awayScore\":21,\"homeScore\":28,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":240,\"positions\":[{\"position\":{\"label\":\"CB\",\"group\":\"START\",\"eligibility\":[\"CB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":43,\"formatted\":\"43\",\"rating\":\"RATING_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":9.0,\"formatted\":\"9\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":10.67,\"formatted\":\"10.67\"},\"duration\":3},{\"value\":{\"value\":13.3,\"formatted\":\"13.3\"},\"duration\":5}],\"seasonTotal\":{\"value\":78.5,\"formatted\":\"78.5\"},\"seasonAverage\":{\"value\":13.083333,\"formatted\":\"13.08\"},\"seasonsStandartDeviation\":{\"value\":5.747585,\"formatted\":\"5.75\"},\"seasonConsistency\":\"RATING_GOOD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1575457200000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":4945,\"nameFull\":\"Eric Weddle\",\"nameShort\":\"E. Weddle\",\"proTeamAbbreviation\":\"FA\",\"position\":\"S\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/4945.png\",\"nameFirst\":\"Eric\",\"nameLast\":\"Weddle\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"S\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"DEFENDER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1575211718000\",\"transaction\":{\"player\":{\"proPlayer\":{\"id\":7592,\"nameFull\":\"Tyrod Taylor\",\"nameShort\":\"T. Taylor\",\"proTeamAbbreviation\":\"LAC\",\"position\":\"QB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/7592.png\",\"nflByeWeek\":6,\"news\":[{\"timeEpochMilli\":\"1606074142000\",\"contents\":\"Taylor (ribs) is active for Sunday's game against the Jets.\",\"analysis\":\"The veteran was considered questionable with the injury to his ribs, but he'll be suiting up for Sunday's contest. Taylor will continue to serve as the backup to starter Justin Herbert.\",\"title\":\"Active Week 11\"}],\"nameFirst\":\"Tyrod\",\"nameLast\":\"Taylor\",\"proTeam\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"positionEligibility\":[\"QB\"]},\"requestedGames\":[{\"game\":{\"id\":6414,\"away\":{\"abbreviation\":\"NYJ\",\"location\":\"New York\",\"name\":\"Jets\"},\"home\":{\"abbreviation\":\"LAC\",\"location\":\"Los Angeles\",\"name\":\"Chargers\"},\"startTimeEpochMilli\":\"1606079100000\",\"status\":\"FINAL_SCORE\",\"awayScore\":28,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"statsProjected\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"value\":100.0,\"formatted\":\"1/1\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":12.0,\"formatted\":\"12\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"viewingProjectedStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"value\":100.0,\"formatted\":\"100.0\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":12.0,\"formatted\":\"12\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.07,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"value\":0.03,\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"PASSER\",\"rankFantasy\":{\"ordinal\":753,\"positions\":[{\"position\":{\"label\":\"QB\",\"group\":\"START\",\"eligibility\":[\"QB\"],\"colors\":[\"DRAFT_BOARD_RED\"]},\"ordinal\":44,\"formatted\":\"44\",\"rating\":\"RATING_VERY_BAD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.52,\"formatted\":\"7.52\"},\"duration\":1},{\"value\":{\"value\":7.52,\"formatted\":\"7.52\"},\"duration\":3},{\"value\":{\"value\":7.52,\"formatted\":\"7.52\"},\"duration\":5}],\"seasonTotal\":{\"value\":7.5199995,\"formatted\":\"7.52\"},\"seasonAverage\":{\"value\":7.5199995,\"formatted\":\"7.52\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1575211718000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11184,\"nameFull\":\"Marcus Mariota\",\"nameShort\":\"M. Mariota\",\"proTeamAbbreviation\":\"LV\",\"position\":\"QB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11184.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"OUT\",\"description\":\"Coach's Decision\",\"severity\":\"OUT\",\"typeFull\":\"Out\"},\"nameFirst\":\"Marcus\",\"nameLast\":\"Mariota\",\"proTeam\":{\"abbreviation\":\"LV\",\"location\":\"Las Vegas\",\"name\":\"Raiders\"},\"positionEligibility\":[\"QB\"]},\"requestedGames\":[{\"game\":{\"id\":6310,\"away\":{\"abbreviation\":\"KC\",\"location\":\"Kansas City\",\"name\":\"Chiefs\"},\"home\":{\"abbreviation\":\"LV\",\"location\":\"Las Vegas\",\"name\":\"Raiders\"},\"startTimeEpochMilli\":\"1606094400000\",\"status\":\"FINAL_SCORE\",\"awayScore\":35,\"homeScore\":31,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"statsProjected\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"formatted\":\"0/1\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":10.4,\"formatted\":\"10.4\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.1,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"viewingProjectedStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"formatted\":\"0.0\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":10.4,\"formatted\":\"10.4\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.1,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374255,\"name\":\"Mushroom City Karts\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374255_0_150x150.jpg\",\"initials\":\"MC\"},\"displayGroup\":\"PASSER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1574852400000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":7561,\"nameFull\":\"Bilal Powell\",\"nameShort\":\"B. Powell\",\"proTeamAbbreviation\":\"FA\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/7561.png\",\"nameFirst\":\"Bilal\",\"nameLast\":\"Powell\",\"proTeam\":{\"abbreviation\":\"FA\",\"location\":\"Free\",\"name\":\"Agent\",\"isFreeAgent\":true},\"positionEligibility\":[\"RB\"]},\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"RUSHER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"bidAmount\":32,\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"bid\":32}]}},{\"timeEpochMilli\":\"1574852400000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":11373,\"nameFull\":\"Mike Davis\",\"nameShort\":\"M. Davis\",\"proTeamAbbreviation\":\"CAR\",\"position\":\"RB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11373.png\",\"nflByeWeek\":13,\"news\":[{\"timeEpochMilli\":\"1606081315000\",\"contents\":\"Davis rushed 19 times for 64 yards and a touchdown and brought in both his targets for 15 yards in the Panthers' 20-0 win over the Lions on Sunday.\",\"analysis\":\"Making another start for Christian McCaffrey (shoulder), Davis looked appreciably better than he had in weeks while facing some tough matchups. The veteran opened the scoring on the day with a one-yard walk-in touchdown to cap off a 10-play, 95-yard march late in the first quarter, a play that helped ensure he wouldn't have another pedestrian fantasy performance. Davis will be called upon again Week 12 if McCaffrey remains sidelined for a road matchup against the Vikings.\",\"title\":\"Gets back into end zone Sunday\"},{\"timeEpochMilli\":\"1606085862000\",\"contents\":\"Carolina Panthers running back Mike Davis compiled 19 rushes for 64 yards and a score along with two receptions for 15 yards on two targets in Week 11. Davis continued to fill in as the RB1 while Christian McCaffrey recovers from a shoulder injury. If McCaffrey returns, then Davis is no longer fantasy relevant. If the star RB cannot play, Davis should remain in the starting tier due to volume. Nevertheless, the backup RB has not rushed for more than 66 yards since Week 5.\",\"url\":\"https://www.rotoballer.com/player-news/mike-davis-has-rushing-touchdown/806838\",\"title\":\"Mike Davis Has Rushing Touchdown\"}],\"nameFirst\":\"Mike\",\"nameLast\":\"Davis\",\"proTeam\":{\"abbreviation\":\"CAR\",\"location\":\"Carolina\",\"name\":\"Panthers\"},\"positionEligibility\":[\"RB\"]},\"requestedGames\":[{\"game\":{\"id\":6302,\"away\":{\"abbreviation\":\"DET\",\"location\":\"Detroit\",\"name\":\"Lions\"},\"home\":{\"abbreviation\":\"CAR\",\"location\":\"Carolina\",\"name\":\"Panthers\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":64.0,\"formatted\":\"64\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"2/2\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":15.0,\"formatted\":\"15\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":1.0,\"formatted\":\"1\"}}],\"statsProjected\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":58.36,\"formatted\":\"58.4\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":27.03,\"formatted\":\"27\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.72,\"formatted\":\"0.7\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"}}],\"viewingProjectedStats\":[{\"category\":{\"id\":22,\"abbreviation\":\"Yd\",\"nameSingular\":\"Rushing Yard\",\"namePlural\":\"Rushing Yards\"},\"value\":{\"value\":58.36,\"formatted\":\"58.4\"}},{\"category\":{\"id\":175,\"abbreviation\":\"Rec\",\"nameSingular\":\"Target % Caught\",\"namePlural\":\"Target % Caught\"},\"value\":{\"value\":100.0,\"formatted\":\"3/3\"}},{\"category\":{\"id\":42,\"abbreviation\":\"Yd\",\"nameSingular\":\"Receiving Yard\",\"namePlural\":\"Receiving Yards\"},\"value\":{\"value\":27.03,\"formatted\":\"27\"}},{\"category\":{\"id\":29,\"abbreviation\":\"TD\",\"nameSingular\":\"Offensive + Special Teams TD\",\"namePlural\":\"Offensive + Special Teams TDs\"},\"value\":{\"value\":0.72,\"formatted\":\"0.7\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373393,\"name\":\"Philadelphia Fire\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373393_0_150x150.jpg\",\"initials\":\"PF\"},\"displayGroup\":\"RUSHER\",\"rankFantasy\":{\"ordinal\":64,\"positions\":[{\"position\":{\"label\":\"RB\",\"group\":\"START\",\"eligibility\":[\"RB\"],\"colors\":[\"DRAFT_BOARD_GREEN\"]},\"ordinal\":13,\"formatted\":\"13\",\"rating\":\"RATING_GOOD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":7.4,\"formatted\":\"7.4\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":8.1,\"formatted\":\"8.1\"},\"duration\":3,\"underPerforming\":true},{\"value\":{\"value\":9.08,\"formatted\":\"9.08\"},\"duration\":5}],\"seasonTotal\":{\"value\":131.99998,\"formatted\":\"132\"},\"seasonAverage\":{\"value\":13.199999,\"formatted\":\"13.2\"},\"seasonsStandartDeviation\":{\"value\":8.4833975,\"formatted\":\"8.48\"}},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1574247600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":11184,\"nameFull\":\"Marcus Mariota\",\"nameShort\":\"M. Mariota\",\"proTeamAbbreviation\":\"LV\",\"position\":\"QB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/11184.png\",\"nflByeWeek\":6,\"injury\":{\"typeAbbreviaition\":\"OUT\",\"description\":\"Coach's Decision\",\"severity\":\"OUT\",\"typeFull\":\"Out\"},\"nameFirst\":\"Marcus\",\"nameLast\":\"Mariota\",\"proTeam\":{\"abbreviation\":\"LV\",\"location\":\"Las Vegas\",\"name\":\"Raiders\"},\"positionEligibility\":[\"QB\"]},\"requestedGames\":[{\"game\":{\"id\":6310,\"away\":{\"abbreviation\":\"KC\",\"location\":\"Kansas City\",\"name\":\"Chiefs\"},\"home\":{\"abbreviation\":\"LV\",\"location\":\"Las Vegas\",\"name\":\"Raiders\"},\"startTimeEpochMilli\":\"1606094400000\",\"status\":\"FINAL_SCORE\",\"awayScore\":35,\"homeScore\":31,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"LOSE\",\"awayResult\":\"WIN\"},\"stats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"statsProjected\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"formatted\":\"0/1\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":10.4,\"formatted\":\"10.4\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.1,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingProjectedPoints\":{\"formatted\":\"0\"},\"viewingActualStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"viewingProjectedStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"},\"value\":{\"formatted\":\"0.0\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"},\"value\":{\"value\":10.4,\"formatted\":\"10.4\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"},\"value\":{\"value\":0.1,\"formatted\":\"0.1\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true},\"value\":{\"formatted\":\"0\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1374255,\"name\":\"Mushroom City Karts\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1374255_0_150x150.jpg\",\"initials\":\"MC\"},\"displayGroup\":\"PASSER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}},{\"timeEpochMilli\":\"1574247600000\",\"transaction\":{\"type\":\"TRANSACTION_DROP\",\"player\":{\"proPlayer\":{\"id\":12232,\"nameFull\":\"Brandon Allen\",\"nameShort\":\"B. Allen\",\"proTeamAbbreviation\":\"CIN\",\"position\":\"QB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/12232.png\",\"nflByeWeek\":9,\"news\":[{\"timeEpochMilli\":\"1606160818000\",\"contents\":\"The Bengals will sign Allen to the active roster Monday, Tom Pelissero of NFL Network reports.\",\"analysis\":\"Allen was on the Bengals' practice squad but will slot into the backup role behind Ryan Finley following news that Joe Burrow suffered a torn ACL and MCL during Sunday's loss to Washington. Over three games with the Broncos last season, Allen completed 39 of 84 passes (46 percent) for 515 yards, three touchdowns and two interceptions.\",\"title\":\"Signs with Bengals' active roster\"}],\"nameFirst\":\"Brandon\",\"nameLast\":\"Allen\",\"proTeam\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"positionEligibility\":[\"QB\"]},\"requestedGames\":[{\"game\":{\"id\":6306,\"away\":{\"abbreviation\":\"CIN\",\"location\":\"Cincinnati\",\"name\":\"Bengals\"},\"home\":{\"abbreviation\":\"WAS\",\"location\":\"Washington\",\"name\":\"Football Team\"},\"startTimeEpochMilli\":\"1606068000000\",\"status\":\"FINAL_SCORE\",\"awayScore\":9,\"homeScore\":20,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"pointsActual\":{\"formatted\":\"—\"},\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":12,\"abbreviation\":\"%\",\"nameSingular\":\"Completion Percentage\",\"namePlural\":\"Completion Percentage\"}},{\"category\":{\"id\":3,\"abbreviation\":\"Yd\",\"nameSingular\":\"Passing Yard\",\"namePlural\":\"Passing Yards\"}},{\"category\":{\"id\":5,\"abbreviation\":\"TD\",\"nameSingular\":\"Passing TD\",\"namePlural\":\"Passing TDs\"}},{\"category\":{\"id\":7,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\",\"lowerIsBetter\":true}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"displayGroup\":\"PASSER\",\"lastX\":[{\"duration\":1},{\"duration\":3},{\"duration\":5}]},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}},{\"timeEpochMilli\":\"1574247600000\",\"transaction\":{\"type\":\"TRANSACTION_CLAIM\",\"player\":{\"proPlayer\":{\"id\":13397,\"nameFull\":\"Kenny Moore\",\"nameShort\":\"K. Moore\",\"proTeamAbbreviation\":\"IND\",\"position\":\"CB\",\"headshotUrl\":\"https://d26bvpybnxg29h.cloudfront.net/nfl/13397.png\",\"nflByeWeek\":7,\"nameFirst\":\"Kenny\",\"nameLast\":\"Moore\",\"proTeam\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"positionEligibility\":[\"CB\"]},\"requestedGames\":[{\"game\":{\"id\":6305,\"away\":{\"abbreviation\":\"GB\",\"location\":\"Green Bay\",\"name\":\"Packers\"},\"home\":{\"abbreviation\":\"IND\",\"location\":\"Indianapolis\",\"name\":\"Colts\"},\"startTimeEpochMilli\":\"1606080300000\",\"status\":\"FINAL_SCORE\",\"awayScore\":31,\"homeScore\":34,\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"homeResult\":\"WIN\",\"awayResult\":\"LOSE\"},\"stats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"},\"value\":{\"value\":10.0,\"formatted\":\"10\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"},\"value\":{\"formatted\":\"0\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"},\"value\":{\"formatted\":\"0\"}}],\"pointsActual\":{\"formatted\":\"—\"},\"participant\":\"HOME\",\"period\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true}}],\"viewingActualPoints\":{\"formatted\":\"—\"},\"viewingActualStats\":[{\"category\":{\"id\":82,\"abbreviation\":\"Ast\",\"nameSingular\":\"Assisted Tackle\",\"namePlural\":\"Assisted Tackles\"}},{\"category\":{\"id\":83,\"abbreviation\":\"Solo\",\"nameSingular\":\"Solo Tackle\",\"namePlural\":\"Solo Tackles\"}},{\"category\":{\"id\":84,\"abbreviation\":\"INT\",\"nameSingular\":\"Interception\",\"namePlural\":\"Interceptions\"}},{\"category\":{\"id\":85,\"abbreviation\":\"Sack\",\"nameSingular\":\"Sack\",\"namePlural\":\"Sacks\"}}],\"transactionStatus\":{\"locked\":{},\"isLineupStatusLocked\":true},\"requestedGamesPeriod\":{\"ordinal\":11,\"startEpochMilli\":\"1605610800000\",\"isNow\":true},\"viewingFormat\":\"TOTAL\",\"viewingRange\":{\"low\":-1,\"high\":-2},\"owner\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"displayGroup\":\"DEFENDER\",\"rankFantasy\":{\"ordinal\":98,\"positions\":[{\"position\":{\"label\":\"CB\",\"group\":\"START\",\"eligibility\":[\"CB\"],\"colors\":[\"DRAFT_BOARD_PURPLE\"]},\"ordinal\":11,\"formatted\":\"11\",\"rating\":\"RATING_VERY_GOOD\"}],\"season\":2020},\"lastX\":[{\"value\":{\"value\":1.5,\"formatted\":\"1.5\"},\"duration\":1,\"underPerforming\":true},{\"value\":{\"value\":11.13,\"formatted\":\"11.13\"},\"duration\":3},{\"value\":{\"value\":12.08,\"formatted\":\"12.08\"},\"duration\":5}],\"isKeeper\":true,\"seasonTotal\":{\"value\":111.5,\"formatted\":\"111.5\"},\"seasonAverage\":{\"value\":12.388889,\"formatted\":\"12.39\"},\"seasonsStandartDeviation\":{\"value\":9.238259,\"formatted\":\"9.24\"},\"seasonConsistency\":\"RATING_BAD\"},\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"},\"waiverResolutionTeams\":[{\"team\":{\"id\":1373475,\"name\":\"Winterfell Dire Wolves\",\"logoUrl\":\"https://s3.amazonaws.com/fleaflicker/t1373475_0_150x150.jpg\",\"initials\":\"WD\"}}]}}],\"resultOffsetNext\":150}"),
date = structure(1606180797, class = c("POSIXct", "POSIXt"), tzone = "GMT"), times = c(
redirect = 0, namelookup = 3.8e-05,
connect = 4.1e-05, pretransfer = 0.000138, starttransfer = 0.038625,
total = 0.039008
)
), class = "response")
|
5306cdf9b0291a6ad88f0fdb8c3e1cb000cf3ccf
|
fbb23e88df629fc696b48844772f7db137d18460
|
/man/StageRefClass-class.Rd
|
eff40fa94bc894eb1b12101c036c63deeca39258
|
[] |
no_license
|
BigelowLab/genologicsr
|
4dc9941bdc7ad531baabb1dc010081a20a5e35fe
|
df5ed969f7258bff2cc29fba82dc07cce980d8c1
|
refs/heads/master
| 2020-04-04T21:15:23.113184
| 2018-07-19T18:32:55
| 2018-07-19T18:32:55
| 38,256,262
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 318
|
rd
|
StageRefClass-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Stage.R
\docType{class}
\name{StageRefClass-class}
\alias{StageRefClass-class}
\alias{StageRefClass}
\title{A Stage representation that sublcasses from NodeRefClass}
\description{
A Stage representation that sublcasses from NodeRefClass
}
|
90876b27dd70655024a857b5353ab2d11bbd9c98
|
273280b690b0af2b20941d0218c3191ff84bf3bd
|
/cmap4r/R/table_info.R
|
a3089dffd2b7e80a851d223facf3373871508067
|
[] |
no_license
|
simonscmap/cmap4r
|
ecbd9072f25ae22a793e0f81d39cb18af6921c1c
|
d594215666e0a6617281e1268fc1165b956e87b3
|
refs/heads/master
| 2022-09-03T00:30:29.503796
| 2022-08-17T22:07:20
| 2022-08-17T22:07:20
| 193,917,501
| 8
| 4
| null | 2022-08-17T22:07:21
| 2019-06-26T14:10:00
|
HTML
|
UTF-8
|
R
| false
| false
| 16,219
|
r
|
table_info.R
|
#########################################################################
### All functions here are provides table informations functions ###
#########################################################################
#' Returns a boolean outcome checking if a field (varName) exists in a table (data set).
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @export
#' @return boolean outcome
#' @examples
#' \donttest{
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' var_exist <- has_field(tableName, varName)
#' var_exist
#' #
#' }
has_field <- function(tableName, varName){
apiKey = get_api_key()
myquery = sprintf("SELECT COL_LENGTH('%s', '%s') AS RESULT ",
tableName, varName)
return(length(query(myquery, apiKey)[1, 'RESULT']) > 0)
}
#' Returns top n records from a table on the Simons CMAP.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param nrows number of rows to retrieve.
#' @return return table as dataframe
#' @export
#' @examples
#' \donttest{
#'
#' ## Input: Table name;
#' tableName <- "tblArgoMerge_REP" # table name
#' #
#' ## Top n rows:
#' tbl.subset <- get_head(tableName, nrows=10)
#' tbl.subset
#' }
get_head <- function(tableName, nrows = 5){
apiKey = get_api_key()
return(query(sprintf('select TOP(%d) * FROM %s' , nrows, tableName), apiKey))
}
#' Returns the list of column variables in a table.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @return column variables name of a table as dataframe
#' @export
#' @examples
#' \donttest{
#' #
#' ## Input: Table name;
#' tableName <- "tblAMT13_Chisholm" # table name
#' #
#' ## Subset selection:
#' tbl.columns <- get_columns(tableName)
#' tbl.columns
#' #
#' }
get_columns <- function(tableName){
apiKey = get_api_key()
return(query(sprintf("SELECT COLUMN_NAME [Columns] FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'%s'", tableName), apiKey))
}
#' Returns a catalog of the Simons CMAP database.
#'
#'
#' @return Simons CMAP catalog as dataframe object
#' @export
#' @examples
#' \donttest{
#' #
#' ## Variable attribute:
#' cmap.catalog <- get_catalog()
#' cmap.catalog
#' #
#' }
get_catalog <- function(){
apiKey = get_api_key()
myquery = 'EXEC uspCatalog'
df = query(myquery, apiKey)
return(df)
}
#' Returns a single-row dataframe containing the attribute of a variable associated with a table on the Simons CMAP database.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @export
#' @return attributes of variable as dataframe
#' @examples
#' \donttest{
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' tbl.var <- get_var(tableName, varName)
#' tbl.var
#' }
get_var <- function(tableName, varName){
apiKey = get_api_key()
myquery = sprintf("SELECT * FROM tblVariables WHERE Table_Name='%s' AND Short_Name='%s'", tableName, varName)
return(query(myquery, apiKey))
}
#' Return a single-row dataframe about a table variable from the catalog of the Simons CMAP database.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @return attributes of variable on the catalog as dataframe.
#' @export
#' @examples
#' \donttest{
#'
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' tbl.catlog.var <- get_var_catalog(tableName, varName)
#' tbl.catlog.var
#' #
#' }
get_var_catalog <- function(tableName, varName){
apiKey = get_api_key()
myquery = sprintf("SELECT * FROM [dbo].udfCatalog() WHERE Table_Name='%s' AND Variable='%s'" ,tableName, varName)
return(query(myquery, apiKey))
}
#' Returns the long name of a given variable.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @return long name of a table variable
#' @export
#' @examples
#' \donttest{
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' varLonName <- get_var_long_name(tableName, varName)
#' varLonName
#' #
#' }
get_var_long_name <- function(tableName, varName){
df = get_var(tableName, varName)
return(df[1, 'Long_Name'])
}
#' Returns the unit of a table variable on the Simons CMAP database.
#'
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @return measuring unit of a table variable as dataframe
#' @export
#' @examples
#' \donttest{
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' unitName <- get_var_unit(tableName, varName)
#' unitName
#' #
#' }
get_var_unit = function(tableName, varName){
return(get_var_catalog(tableName, varName)[1,'Unit'])
}
#' Returns a single-row dataframe from the database catalog containing the
#' variable's spatial and temporal resolutions.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @return resolution of a table variable as dataframe
#' @export
#' @examples
#' \donttest{
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' varResolution <- get_var_resolution(tableName, varName)
#' varResolution
#' #
#'
get_var_resolution <- function(tableName, varName){
return(get_var_catalog(tableName, varName)[,c('Temporal_Resolution',
'Spatial_Resolution')])
}
#' Returns a single-row dataframe from the database catalog containing the
#' variable's spatial and temporal coverage.
#'
#'
#' @param tableName table name.
#' @param varName variable name.
#' @export
#' @return spatio-temporal range information of a table variable as dataframe
#' @examples
#' \donttest{
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' varCoverage <- get_var_coverage(tableName, varName)
#' varCoverage
#' #
#' }
get_var_coverage <- function(tableName, varName){
mynames = c('Time_Min', 'Time_Max', 'Lat_Min', 'Lat_Max', 'Lon_Min', 'Lon_Max', 'Depth_Min', 'Depth_Max')
return(get_var_catalog(tableName, varName)[, mynames])
}
#' Returns a single-row dataframe from the database catalog containing the variable's summary statistics.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @return numerical attribute of a table variable as dataframe
#' @export
#' @examples
#' \donttest{
#' #
#' ## Input: Table name; variable name
#' tableName <- "tblArgoMerge_REP" # table name
#' varName <- "argo_merge_chl_adj" # variable name
#' #
#' ## Variable attribute:
#' varStats <- get_var_stat(tableName, varName)
#' varStats
#' #
#' }
get_var_stat <- function(tableName, varName){
mynames = c('Variable_Min', 'Variable_Max', 'Variable_Mean', 'Variable_Std', 'Variable_Count', 'Variable_25th', 'Variable_50th', 'Variable_75th')
return(get_var_catalog(tableName, varName)[, mynames])
}
#' Returns a boolean indicating whether the variable is a gridded product or has irregular spatial resolution.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param varName specify short name of a variable in the table.
#' @return boolean
#' @export
#' @examples
#' \donttest{
#'
#' #
#' ## Input:
#' table <- c("tblArgoMerge_REP") # table name
#' variable <- c("argo_merge_chl_adj") # variable name
#' #
#' is_grid(table, variable)
#'
#' #
#' }
is_grid <- function(tableName, varName){
apiKey = get_api_key()
grid = TRUE
myquery = "SELECT Spatial_Res_ID, RTRIM(LTRIM(Spatial_Resolution)) AS Spatial_Resolution FROM tblVariables "
myquery = paste(myquery, "JOIN tblSpatial_Resolutions ON [tblVariables].Spatial_Res_ID=[tblSpatial_Resolutions].ID ", sep = "")
myquery = paste(myquery,sprintf("WHERE Table_Name='%s' AND Short_Name='%s' ",tableName,varName), sep = "")
df <- query(myquery,apiKey)
if (nrow(df)<1) return(NULL)
if (tolower(df$Spatial_Resolution[1])=='irregular'){
grid = FALSE
}
return(grid)
}
#' Returns True if the table represents a climatological data set.
#' Currently, the logic is based on the table name.
#' Ultimately, it should query the DB to determine if it's a climatological data set.
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @export
#' @return boolean
#' @examples
#' \donttest{
#' #
#' ## Input:
#' table <- "tblDarwin_Plankton_Climatology" # table name
#' #
#' is_climatology(table)
#'
#' #
#' }
is_climatology <- function(tableName){
return(length(grep('_Climatology', tableName)) != 0)
}
#' Returns a dataframe containing the associated metadata. The inputs can be strings (if only one table, and variable is passed) or a list of string literals.
#'
#' @param tables vector of table names from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param variables specify short name of the corresponding table variables.
#' @return metadata associated with all the table variables as dataframe.
#' @export
#' @examples
#' \donttest{
#'
#' #
#' ## Input:
#' tables <- c('tblsst_AVHRR_OI_NRT', 'tblArgoMerge_REP') # table name
#' variables <- c('sst', 'argo_merge_salinity_adj') # variable name
#'
#' metadata <- get_metadata(tables, variables)
#' metadata
#' }
get_metadata <- function(tables, variables){
append_df = function(df, a){
out <- data.frame(matrix(NaN, a, ncol(df)))
names(out) <- names(df)
out[1,] <- data.frame(df)[1,]
out
}
metadata = data.frame()
for(i in 1:length(tables)){
df <- get_metadata_noref(tables[i], variables[i])
datasetID = df$Dataset_ID[1]
refs = get_references(datasetID)
df <- append_df(df,length(refs$Reference))
df$Reference = refs$Reference
if(i == 1){
metadata <- df
} else {
metadata <- rbind(metadata,df)
}
}
return(metadata)
}
# romCatalog boolean variable to obtain number of observation in a table from the simons CMAP catalog
#' Retrieve the number of observations in the subset of a table from the Simons CMAP databse using the space-time range inputs (dt1, dt2, lat1, lat2, lon1, lon2, depth1, depth2).
#'
#' @param tableName table name from the Simons CMAP database. Use "get_catalog()" to retrieve list of tables on the database.
#' @param dt1 start date or datetime (lower bound of temporal cut). Example values: '2016-05-25' or '2017-12-10 17:25:00'
#' @param dt2 end date or datetime (upper bound of temporal cut). Example values: '2016-04-30' or '2016-04-30 17:25:00'
#' @param lat1 start latitude [degree N] of the meridional cut; ranges from -90° to 90°.
#' @param lat2 end latitude [degree N] of the meridional cut; ranges from -90° to 90°.
#' @param lon1 start longitude [degree E] of the zonal cut; ranges from -180° to 180°.
#' @param lon2 end longitude [degree E] of the zonal cut; ranges from -180° to 180°.
#' @param depth1 positive value specifying the start depth [m] of the vertical cut. Note that depth is 0 at surface and grows towards ocean floor. Defaults to 0 if not provided.
#' @param depth2 positive value specifying the end depth [m]of the vertical cut. Note that depth is 0 at surface and grows towards ocean floor. Defaults to 0 if not provided.
#' @param fromCatalog boolean variable to obtain number of observation in a table from the simons CMAP catalog
#' @return required subset of the table is ordered by time, lat, lon, and depth (if exists).
#' @export
#' @examples
#' \donttest{
#' ## Input: Table name; variable name, space time range information
#' tableName <- "tblsst_AVHRR_OI_NRT" # table name
#' # Range variable [lat,lon,time]
#' lat1 = 10; lat2 = 70
#' lon1 = -180; lon2 = -80
#' dt1 = "2016-04-30"; dt2 = "2016-04-30"
#' #
#' ## Subset selection:
#' ncount <- get_count(tableName, lat1, lat2, lon1, lon2, dt1, dt2)
#' ncount
#' #
#' }
get_count = function(tableName, lat1 = NULL, lat2 = NULL,
lon1 = NULL, lon2 = NULL,
dt1 = NULL, dt2 = NULL,
depth1 = NULL, depth2 = NULL,
fromCatalog = FALSE){
range_var <- list()
range_var$time <- c(dt1, dt2)
range_var$lat <- c(lat1, lat2)
range_var$lon <- c(lon1, lon2)
range_var$depth <- c(depth1, depth2)
if (!fromCatalog) {
# in case if only table names are provided
if (length(range_var) == 0) {
full_query <- sprintf("select count(*) from %s",tableName)
} else {
tout <- NULL
for (tmp in names(range_var)) {
if (length(range_var[[tmp]]) == 1)
range_var[[tmp]] <- rep(range_var[[tmp]],2)
if (tmp == 'time')
range_var[[tmp]] <- paste("\n",range_var[[tmp]],"\n", sep = '')
tout <- c( tout, paste(tmp, 'between',range_var[[tmp]][1],'and',range_var[[tmp]][2]))
}
filt_query <- paste0(tout, collapse = ' and ')
sub_query <- sprintf("select count(*) from %s where",tableName )
# full_query <- "select count(*) from tblESV"
full_query <- paste(sub_query, filt_query)
full_query <- gsub('\n',"'",full_query)
}
tmp <- exec_manualquery(full_query)
ncount <- as.numeric(names(tmp))
} else {
ab <- get_catalog()
index <- tolower(ab$Table_Name) == tolower(tableName)
ncount <- max(ab$Variable_Count[index],na.rm = T)
}
return(ncount)
}
# # in case if only table names are provided
# if (length(range_var) == 0) {
# ab <- get_catalog()
# index <- tolower(ab$Table_Name) == tolower(tableName)
# range_var$time <- c(ab$Time_Min[index], ab$Time_Max[index])
# range_var$lat <- c(ab$Lat_Min[index], ab$Lat_Max[index])
# range_var$lon <- c(ab$Lon_Min[index], ab$Lon_Max[index])
# range_var$depth <- c(ab$Depth_Min[index], ab$Depth_Max[index])
# range_var <- lapply(range_var, function(x){
# if (any(is.na(x))) x = NULL
# x
# })
# range_var[sapply(range_var, is.null)] <- NULL
# }
# # if (!fromCatalog) {
# tout <- NULL
# for (tmp in names(range_var)) {
# if (length(range_var[[tmp]]) == 1)
# range_var[[tmp]] <- rep(range_var[[tmp]],2)
# if (tmp == 'time')
# range_var[[tmp]] <- paste("\n",range_var[[tmp]],"\n", sep = '')
# tout <- c( tout, paste(tmp, 'between',range_var[[tmp]][1],'and',range_var[[tmp]][2]))
# }
# filt_query <- paste0(tout, collapse = ' and ')
# sub_query <- sprintf("select count(*) from %s where",tableName )
# full_query <- paste(sub_query, filt_query)
# full_query <- gsub('\n',"'",full_query)
# tmp <- exec_manualquery(full_query)
# ncount <- as.numeric(names(tmp))
# }
# else {
# ab <- get_catalog()
# index <- tolower(ab$Table_Name) == tolower(tableName)
# ncount <- max(ab$Variable_Count[index],na.rm = T)
# }
|
623ecdc7ac8724a3b2e9a3cb7f2bc7342c866d2c
|
7a1fc7bd0f79ea344c6aae208ad058e1346c4678
|
/EDA/functions/DiscreteBar.R
|
d408dede72448329a1f9a5690b0274c7187808e3
|
[] |
no_license
|
ShuqiYao/myeda
|
d03e27d4516b76a6ed6b3268f95776d7583dfc11
|
cfb5c9a02667976ec4ee15d3aea6d30a9968ab57
|
refs/heads/master
| 2021-05-09T14:04:37.077992
| 2018-02-25T07:41:11
| 2018-02-25T07:41:11
| 119,053,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 390
|
r
|
DiscreteBar.R
|
## 条形图
DiscreteBar <- function(x,...) {
is_data_table <- is.data.frame(x)
if (!is_data_table) {x <- data.frame(x)}
if (sum(table(na.omit(x)))>500){
ggplot(x, aes_string(x = names(x))) +
geom_bar(color = "black",na.rm = TRUE,alpha=0.4,...)+
labs(title= paste(names(x),'barplot'))} else {
print("There are too many categlories to plot barplot")
}
}
#test
# DiscreteBar(x)
|
b54c001fd69e1431015c9ef5fcb937e46846cf75
|
bfa3ab3a8584fb0bc48a4bafec31076d1fec5382
|
/data/beta.R
|
acee3be2d90cb688d49fd86ea282c6aff76365d5
|
[] |
no_license
|
vittoriomaggio/RBusinessProject
|
6ec1081fd9ad10534f973e00bc59bcd200ab20b6
|
ce58f23ea56e5474b7163c10640d6e9b23221d5a
|
refs/heads/master
| 2020-04-21T02:54:55.084261
| 2019-02-06T10:13:15
| 2019-02-06T10:13:15
| 169,268,753
| 0
| 1
| null | 2019-02-05T17:14:49
| 2019-02-05T16:02:48
|
R
|
UTF-8
|
R
| false
| false
| 5,276
|
r
|
beta.R
|
library(quantmod)
library(PerformanceAnalytics)
#Get returns
start_stream <- '2016-01-01'
end_stream = '2019-01-01'
PSX.xts <- getSymbols("PSX", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
PSX.xts <- to.monthly(PSX.xts)
PSX <- na.omit(diff(log(PSX.xts$PSX.xts.Adjusted)))
colnames(PSX) <- c("PSX")
AXP.xts <- getSymbols("AXP", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
AXP.xts <- to.monthly(AXP.xts)
AXP <- na.omit(diff(log(AXP.xts$AXP.xts.Adjusted)))
colnames(AXP) <- c("AXP")
KO.xts <- getSymbols("KO", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
KO.xts <- to.monthly(KO.xts)
KO <- na.omit(diff(log(KO.xts$KO.xts.Adjusted)))
colnames(KO) <- c("KO")
KHC.xts <- getSymbols("KHC", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
KHC.xts <- to.monthly(KHC.xts)
KHC <- na.omit(diff(log(KHC.xts$KHC.xts.Adjusted)))
colnames(KHC) <- c("KHC")
WFC.xts <- getSymbols("WFC", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
WFC.xts <- to.monthly(WFC.xts)
WFC <- na.omit(diff(log(WFC.xts$WFC.xts.Adjusted)))
colnames(WFC) <- c("WFC")
USB.xts <- getSymbols("USB", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
USB.xts <- to.monthly(USB.xts)
USB <- na.omit(diff(log(USB.xts$USB.xts.Adjusted)))
colnames(USB) <- c("USB")
IBM.xts <- getSymbols("IBM", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
IBM.xts <- to.monthly(IBM.xts)
IBM <- na.omit(diff(log(IBM.xts$IBM.xts.Adjusted)))
colnames(IBM) <- c("IBM")
# Market
SP500.xts <- getSymbols("^GSPC", from=start_stream, to=end_stream, src='yahoo', auto.assign = FALSE )
SP500.xts <- to.monthly(SP500.xts)
SP500 <- na.omit(diff(log(SP500.xts$SP500.xts.Adjusted)))
colnames(SP500) <- c("SP500")
# Time series to save beta's values
PSX_betas.xts <- NULL
AXP_betas.xts <- NULL
KO_betas.xts <- NULL
KHC_betas.xts <- NULL
WFC_betas.xts <- NULL
USB_betas.xts <- NULL
IBM_betas.xts <- NULL
delta_t <- 12 # move time windows for beta value
length_period = dim(PSX)[1] # length period for the time series
start <- delta_t+1 # first month after the 20 months to calculate the first value of beta
# Beta function to calculate beta value
beta_function <- function(stock, market_index){
beta <- cov(stock, market_index)/var(market_index)
return(beta)
}
#Betas calculation
for (i in start:(length_period + 1)){
beta_val_PSX <- beta_function(PSX[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_val_AXP <- beta_function(AXP[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_val_KO <- beta_function(KO[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_val_KHC <- beta_function(KHC[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_val_WFC <- beta_function(WFC[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_val_USB <- beta_function(USB[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_val_IBM <- beta_function(IBM[(i-delta_t):(i-1)], SP500[(i-delta_t):(i-1)])
beta_xts_PSX <- as.xts(beta_val_PSX, order.by = index(PSX[(i-1)]))
beta_xts_AXP <- as.xts(beta_val_AXP, order.by = index(AXP[(i-1)]))
beta_xts_KO <- as.xts(beta_val_KO, order.by = index(KO[(i-1)]))
beta_xts_KHC <- as.xts(beta_val_KHC, order.by = index(KHC[(i-1)]))
beta_xts_WFC <- as.xts(beta_val_WFC, order.by = index(WFC[(i-1)]))
beta_xts_USB <- as.xts(beta_val_USB, order.by = index(USB[(i-1)]))
beta_xts_IBM <- as.xts(beta_val_IBM, order.by = index(IBM[(i-1)]))
# Create a time series of beta for each stock
if(is.null(PSX_betas.xts)){
PSX_betas.xts <- beta_xts_PSX
AXP_betas.xts <- beta_xts_AXP
KO_betas.xts <- beta_xts_KO
KHC_betas.xts <- beta_xts_KHC
WFC_betas.xts <- beta_xts_WFC
USB_betas.xts <- beta_xts_USB
IBM_betas.xts <- beta_xts_IBM
}else{
PSX_betas.xts <- rbind(PSX_betas.xts,beta_xts_PSX)
AXP_betas.xts <- rbind(AXP_betas.xts,beta_xts_AXP)
KO_betas.xts <- rbind(KO_betas.xts,beta_xts_KO)
KHC_betas.xts <- rbind(KHC_betas.xts,beta_xts_KHC)
WFC_betas.xts <- rbind(WFC_betas.xts,beta_xts_WFC)
USB_betas.xts <- rbind(USB_betas.xts,beta_xts_USB)
IBM_betas.xts <- rbind(IBM_betas.xts,beta_xts_IBM)
}
# Print the time window considered for calculation of betas values
print('------time windows-------')
print(paste("Start time window:", index(PSX)[i-delta_t]))
print(paste("End time window: ", index(PSX)[i-1]))
print('------date for beta------')
print(paste("Time index beta: ", index(PSX)[i]))
print(paste("PSX beta:", beta_val_PSX))
print(paste("AXP beta:", beta_val_AXP))
print(paste("KO beta:", beta_val_KO))
print(paste("KHC beta:", beta_val_KHC))
print(paste("WFC beta:", beta_val_WFC))
print(paste("USB beta:", beta_val_USB))
print(paste("IBM beta:", beta_val_IBM))
}
plot(PSX_betas.xts)
plot(AXP_betas.xts)
colnames(PSX_betas.xts) = "PSX_Beta"
colnames(AXP_betas.xts) = "AXP_Beta"
colnames(KO_betas.xts) = "KO_Beta"
colnames(KHC_betas.xts) = "KHC_Beta"
colnames(WFC_betas.xts) = "WFC_Beta"
colnames(USB_betas.xts) = "USB_Beta"
colnames(IBM_betas.xts) = "IBM_Beta"
library(dygraphs)
#plot of Betas
dygraph(PSX_betas.xts)
dygraph(AXP_betas.xts)
dygraph(KO_betas.xts)
dygraph(KHC_betas.xts)
dygraph(WFC_betas.xts)
dygraph(USB_betas.xts)
dygraph(IBM_betas.xts)
|
02556cd098e86cc72393335e2f8f526bd9e64b69
|
b92b0e9ba2338ab311312dcbbeefcbb7c912fc2e
|
/build/shogun_lib/examples/documented/r_static/kernel_linear.R
|
c3902755d9c010d6a3f4fc6c406c8ed8da39d872
|
[] |
no_license
|
behollis/muViewBranch
|
384f8f97f67723b2a4019294854969d6fc1f53e8
|
1d80914f57e47b3ad565c4696861f7b3213675e0
|
refs/heads/master
| 2021-01-10T13:22:28.580069
| 2015-10-27T21:43:20
| 2015-10-27T21:43:20
| 45,059,082
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 693
|
r
|
kernel_linear.R
|
# This is an example for the initialization of a linear kernel on real valued
# data using scaling factor 1.2.
library("sg")
size_cache <- 10
fm_train_real <- t(as.matrix(read.table('../data/fm_train_real.dat')))
fm_test_real <- t(as.matrix(read.table('../data/fm_test_real.dat')))
# Linear
print('Linear')
dump <- sg('set_kernel', 'LINEAR', 'REAL', size_cache)
dump <- sg('set_features', 'TRAIN', fm_train_real)
dump <- sg('set_kernel_normalization', 'SQRTDIAG')
km1 <- sg('get_kernel_matrix', 'TRAIN')
dump <- sg('set_kernel_normalization', 'AVGDIAG')
km2 <- sg('get_kernel_matrix', 'TRAIN')
#dump <- sg('set_features', 'TEST', fm_test_real)
#km <- sg('get_kernel_matrix', 'TEST')
|
685d021eeb7821d0ce1a2e0247c44e3fc3b74f7f
|
01d3ca8e2d6f10fb9ec98f15673ef9ef4adfed46
|
/man/subset.mcmc.Rd
|
f614c9ca2108e870e71cf075329051c5599c9393
|
[
"MIT"
] |
permissive
|
poissonconsulting/nlist
|
3626376778579afdf1a3edf95fc40a9e0e733b00
|
33d0fbe3f5a4988260cd36d979260b958955dd9b
|
refs/heads/main
| 2023-06-09T05:04:45.643117
| 2023-05-28T22:55:03
| 2023-05-28T22:55:03
| 194,123,871
| 4
| 1
|
NOASSERTION
| 2023-05-28T22:55:04
| 2019-06-27T15:51:53
|
R
|
UTF-8
|
R
| false
| true
| 880
|
rd
|
subset.mcmc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subset.R
\name{subset.mcmc}
\alias{subset.mcmc}
\title{Subset mcmc Object}
\usage{
\method{subset}{mcmc}(x, iters = NULL, pars = NULL, iterations = NULL, parameters = NULL, ...)
}
\arguments{
\item{x}{An mcmc object.}
\item{iters}{An integer vector of iterations.}
\item{pars}{A character vector of parameter names.}
\item{iterations}{An integer vector (or NULL) of the iterations to subset by.}
\item{parameters}{A character vector (or NULL) of the parameters to subset by.}
\item{...}{Unused.}
}
\value{
An mcmc object.
}
\description{
Subsets an mcmc object by its parameters and/or iterations.
}
\details{
Future versions should allow it to be reordered by its parameters.
}
\examples{
mcmc <- as_mcmc(nlist(beta = 1:2, theta = 1))
subset(mcmc, pars = "beta")
subset(mcmc, iters = c(1L,1L))
}
|
b87f9631d43a5775779427f4773ddb670e17e306
|
66332bb30c8d14f824af71a9d418c5d6345f58d1
|
/server.R
|
6e8a40f9de1d58f1337f948a611dbf535bfe1979
|
[] |
no_license
|
moggces/ActivityProfilingGUI
|
f2c2f073733e6c3feba6679e8620e81cf4d78c1e
|
f2bb6533cceba979e31aa91a3a42cfe538c9052e
|
refs/heads/master
| 2020-04-15T23:48:14.361354
| 2017-10-06T15:50:04
| 2017-10-06T15:50:04
| 17,751,883
| 1
| 0
| null | 2017-03-27T15:21:38
| 2014-03-14T16:08:17
|
R
|
UTF-8
|
R
| false
| false
| 28,564
|
r
|
server.R
|
# shiny note: 1) it can't discriminate R vs. r in the r script file
# 2) the renderTable has trouble with encoding issue (cannot recognize ppp file iconv -t UTF-8 -f ISO-8859-1)
# looks like the new read.table can automatically select best encoding
# 3) in shiny server, once you delete a file but replace a file with same name. somwhow don't know how to refresh its
# but if you update .R file, you can refresh to get new functions
# chemical_loader() out: list(id, ?(nwauc.logit or npod or nec50 or unknown))
# matrix_subsetter() out: list(activities or ?(nwauc.logit or npod or nec50 or unknown), struct)
# activity_filter() out: same as above
# matrix_editor() out: list(nwauc.logit, npod, nec50,wauc.logit, struct, cv_mark, label)
# heatmap_para_generator() out: list(dcols, drows, annotation, annt_colors, act=act, struct, cv, label)
# tox21_data_generator()
# cas_data_generator()
# select_plot()
# todo:
# 1. download potency plot
# 2. broaden the "unknown" color scheme
# 6. filter by call meta
library(shiny)
library(plyr)
library(reshape2)
library(pheatmap)
library(RColorBrewer)
library(ggplot2)
library(scales)
library(tibble)
library(tidyr)
library(dplyr)
library(stringr)
library(Cairo)
options(stringsAsFactors = FALSE)
#Sys.setlocale(locale="C")
#setwd("~/ShinyApps/profiling/")
source(paste(getwd(), "/source/customized.R", sep=""), local=TRUE)
#source(paste(getwd(), "/source/pheatmap_display_number.R", sep=""), local=TRUE)
source(paste(getwd(), "/source/get.R", sep=""), local=TRUE)
source(paste(getwd(), "/source/load.R", sep=""), local=TRUE)
source(paste(getwd(), "/source/mis.R", sep=""), local=TRUE)
#environment(pheatmap_new_label) <- environment(pheatmap) pheatmap v. < 1.0
# load assay related parameters
logit_para_file <- './data/tox21_call_descriptions_v2.txt' #tox21_assay_collection.txt
assay_names <- load_profile(logit_para_file) # global, dataframe output
# load chemical information (will include purity later)
profile_file <- './data/tox21_compound_id_v5a7.txt' #colunm name has to be GSID # v5a3
master <- load_profile(profile_file) # global, dataframe output
# load the activities (all data) and the structure fp matrix
struct_mat_rdata <- './data/struct_mat.RData'
load(struct_mat_rdata, verbose=TRUE) # global, matrix output, struct_mat
activities <- readRDS('./data/activities_combined_170306.rds')
# remove the structures with low purity
#struct_mat <- struct_mat[rownames(struct_mat) %in% rownames(activities[[1]]),]
# very weird!! this line causes no error frozen on shiny public server
# heatmap settings
# the negative direction breaks won't capture wauc with very small values
wauc_breaks <- c( -1, -0.75, -0.5, -0.25, -0.1, -0.02, 0, 0.0001, 0.1, 0.25, 0.5, 0.75, 1) # upper is filled , lower is empty
wauc_colors <- c("#053061" ,"#2166AC" ,"#4393C3" ,"#92C5DE", "#D1E5F0", "#F7F7F7", "gray", "#FDDBC7" ,"#F4A582" ,"#D6604D" ,"#B2182B", "#67001F" ) #RdBu
wauc_leg_breaks <- c(-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1 )
wauc_leg_labels <- c("-1", "-0.75", "-0.5", "-0.25", "0", "0.25", "0.5", "0.75", "1")
potency_breaks <- c(-0.02, 0, 0.0001, 4, 4.5, 5, 7.5, 9, 10)
potency_colors <- c("#F5F5F5", "gray", "#C7EAE5", "#80CDC1", "#35978F", "#01665E", "#003C30", "chartreuse") #BrBG
potency_leg_breaks <- c( 0, 4, 4.5, 5, 7.5, 9,10 )
potency_leg_labels <- c( "inactive", "100uM", "30uM", "10uM", "0.3uM", "1nM", "0.1nM")
# potency_breaks <- c(-10, -9, -7.5, -5, -4.5, -4, -0.02, 0, 0.0001, 4, 4.5, 5, 7.5, 9, 10)
# potency_colors <- c("darkorange","#543005", "#8C510A", "#BF812D", "#DFC27D", "#F6E8C3", "#F5F5F5", "gray", "#C7EAE5", "#80CDC1", "#35978F", "#01665E", "#003C30", "chartreuse") #BrBG
# potency_leg_breaks <- c(-10, -9, -7.5, -5, -4.5, -4, 0, 4, 4.5, 5, 7.5, 9,10 )
# potency_leg_labels <- c("-10", "-9", "-7.5", "-5", "-4.5", "-4", "0", "4", "4.5", "5", "7.5", "9", "10")
shinyServer(function(input, output) {
# chemical_loader()
chemical_loader <- reactive({
result <- NULL
path <- NULL
# input file
inFile <- input$file1
# input textarea
textdata <- input$cmpds
if (! is.null(inFile)) { path <- inFile$datapath; filen <- inFile$name }
if (textdata != '' ) result <- load_text_2_df(textdata)
if (! is.null(path)) result <- load_data_matrix(path, filen) # as long as path or file has something it will override
return(result)
})
# matrix_subsetter()
matrix_subsetter <- reactive({
partial <- NULL
reg_sel <- input$reg_sel # select the assays
inv_sel <- input$inv_sel # inverse the selection
nolowQC <- input$nolowQC # remove low QC
rename_assay <- FALSE # use the assay_names df
# get all chemical information
id_info <- chemical_loader()
chem_id_df <- get_lookup_list(id_info[['id']], master)
#ip <- subset(chem_id_df, ! is.na(StructureID), select=c(CAS, Cluster))
# the basic identifies , GSID + Cluster
ip <- subset(chem_id_df, GSID != '' & CAS != '', select=c(GSID, Cluster))
# collect all the matrices and store in full (list)
full <- list()
full <- activities$cas_qc
if(! nolowQC) full <- activities$cas
# if it is a data matrix input, only CAS ID is allowd
input_chemical_name <- NULL
if (length(id_info) > 1) # for loading the data matrix function
{
full <- id_info[! grepl('id', names(id_info))]
chemical_name_ref <- conversion(master, inp='CAS', out='GSID')
#rownames(full[[1]]) <- chemical_name_ref[as.character(rownames(full[[1]]))]
avail_name <- chemical_name_ref[as.character(rownames(full[[1]]))]
full[[1]] <- full[[1]][! is.na(avail_name), ]
rownames(full[[1]]) <- avail_name[!is.na(avail_name)]
if (! is.null(id_info[['id']]$input_Chemical.Name)) {
input_chemical_name <- conversion(join(id_info[['id']], master), inp='GSID', out='input_Chemical.Name')
}
rename_assay <- FALSE
}
# the structure fingerprint matrix
full[['struct']] <- struct_mat
# subset the matrices by chemicals
partial <- get_input_chemical_mat(ip, full)
# rename the assays & chemicals
partial <- rename_mat_col_row(partial, master, assay_names, input_chemical_name, rename_chemical=TRUE, rename_assay=rename_assay)
# subset the matrices by assay names
partial <- get_assay_mat(partial, reg_sel, invSel=inv_sel)
#print(partial[['npod']])
# sort the matrix
#partial <- sort_matrix(partial)
return(partial)
})
# activity_filter()
activity_filter <- reactive({
# load all the activity filter parameters
profile_type <- input$proftype
activity_type <- input$acttype
nwauc_thres <- input$nwauc_thres
ncmax_thres <- input$ncmax_thres
npod_thres <- ifelse(is.na(input$npod_thres), 3, log10(input$npod_thres/1000000)*-1)
nec50_thres <- ifelse(is.na(input$nec50_thres), 3, log10(input$nec50_thres/1000000)*-1)
#pod_diff_thres <- input$pod_diff_thres
wauc_fold_thres <- input$wauc_fold_thres
#isstrong <- input$isstrong
nocyto <- input$nocyto
isgoodcc2 <- input$isgoodcc2
nohighcv <- input$nohighcv
cytofilter <- input$cytofilter
noauto <- input$noauto
noch2issue <- input$noch2issue
partial <- matrix_subsetter()
# if it is data matrix input, don't change
if (length(partial) == 2) return(partial)
act_mat_names <- c('npod', 'nec50', 'nwauc.logit')
# reverse direction of mitotox could be meaningful
#partial <- fix_mitotox_reverse(partial,act_mat_names=act_mat_names )
# filtering
partial <- filter_activity_by_type(partial, 'nwauc.logit', nwauc_thres, act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'ncmax', ncmax_thres,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'npod', npod_thres,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'nec50', nec50_thres,act_mat_names=act_mat_names)
#partial <- filter_activity_by_type(partial, 'pod_med_diff', pod_diff_thres,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'label_cyto', thres=NULL, decision=cytofilter,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'wauc_fold_change', wauc_fold_thres,act_mat_names=act_mat_names)
#partial <- filter_activity_by_type(partial, 'hitcall', thres=NULL, decision=isstrong,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'wauc_fold_change', thres=1, decision=nocyto,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'cc2', thres=NULL, decision=isgoodcc2,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'label_autof', thres=NULL, decision=noauto,act_mat_names=act_mat_names)
partial <- filter_activity_by_type(partial, 'label_ch2', thres=NULL, decision=noch2issue,act_mat_names=act_mat_names)
# it has to be the end
partial <- filter_activity_by_type(partial, 'cv.wauc', thres=NULL, decision=nohighcv,act_mat_names=act_mat_names)
#print(partial[['npod']])
return(partial)
})
# matrix_editor()
matrix_editor <- reactive({
noincon_label <- input$noinconlab #inconclusive label
act_mat_names <- c('npod', 'nec50', 'nwauc.logit')
partial <- activity_filter()
#print(partial[['npod']])
# if it is data matrix input, skip
if (length(partial) == 2) return(partial)
# create CV marks
cv_mark <- get_cv_mark_mat(partial[['cv.wauc']], partial[['nwauc.logit']])
partial[['cv_mark']] <- cv_mark
# make activities matrix (< 0 and NA) as 0.0001
partial <- assign_reverse_na_number(partial, act_mat_names=act_mat_names)
#print(partial[['npod']])
# remove inconclusive label (0.0001 as 0 ) (but keep the untested ones = 0.0001)
if (noincon_label) partial <- remove_inconclusive_label(partial, act_mat_names=act_mat_names)
acts <- partial[c( act_mat_names, 'wauc.logit', 'struct', 'cv_mark', 'label')]
#print(partial[['npod']])
return(acts)
})
#heatmap_para_generator()
heatmap_para_generator <- reactive({
sort_meth <- input$sort_method
profile_type <- input$proftype
activity_type <- ''
# get all chemical information
input_chemical_name <- NULL
chem_id_df <- get_lookup_list(chemical_loader()[['id']], master)
if (! is.null(chem_id_df$input_Chemical.Name)) {
input_chemical_name <- conversion(chem_id_df, inp='Chemical.Name', out='input_Chemical.Name')
}
# the basic identifies , GSID + Cluster
# can add the Chemical.Name here
ip <- subset(chem_id_df, GSID != '' & CAS != '', select=c(GSID, Cluster,Chemical.Name))
# the cleaned matrices
dt <- matrix_editor()
if (is.null(dt)) return(NULL)
# if the input is data matrix, creat a blank CV matrix
if (length(dt) == 2 )
{
activity_type <- names(dt)[1]
act <- dt[[1]]
cv <- matrix("", nrow(act), ncol(act), dimnames=dimnames(act))
label <- matrix("", nrow(act), ncol(act), dimnames=dimnames(act))
} else
{
# it has to be here to add more lines for the duplicates
dt <- duplicate_chemical_row(dt, ip)
if (profile_type == 'activity')
{
activity_type <- input$acttype
act <- dt[[activity_type]]
} else
{
act <- dt[['wauc.logit']]
}
cv <- dt[['cv_mark']]
label <- dt[['label']]
}
# struct matrix
struct <- dt[['struct']]
# first, cluster the chemicals
#print(str_c("line271", rownames(struct)))
dcols <- dist(struct, method = "binary") ## chemicals
# very, very cumbersome functions. better to split, merge dt + activity_type
annotation <- get_heatmap_annotation(dcols, ip, master, input_chemical_name=input_chemical_name, dmat=dt, actType=activity_type) #data.frame output
annt_colors <- get_heatmap_annotation_color(annotation, actType=activity_type)
# cluster compounds by various methods
if (sort_meth == 'actclust')
{
dcols <- dist(act, method = "euclidean") ## chemicals by assays
} else if (sort_meth == 'toxscore' )
{
tox_order <- rownames(annotation)[order(annotation$toxScore)]
act <- act[tox_order, ]
cv <- cv[tox_order, ]
label <- label[tox_order, ]
}
# cluster assays by similarity
drows <- dist(t(act) , method = "euclidean") ## assays
return(list(dcols=dcols, drows=drows, annotation=annotation, annt_colors=annt_colors, act=act, struct=struct, cv=cv, label=label))
})
chemical_enricher <- reactive({
paras <- heatmap_para_generator()
if (is.null(paras)) return(NULL)
# chemical information
chem_id_df <- get_lookup_list(chemical_loader()[['id']], master)
ip <- subset(chem_id_df, GSID != '' & CAS != '', select=c(GSID, Cluster,Chemical.Name))
# parameters
reg_sel <- input$reg_sel # select the assays
inv_sel <- input$inv_sel # inverse the selection
nolowQC <- input$nolowQC # remove the low QC
rename_assay <- FALSE # use the assay_names df
profile_type <- input$proftype
activity_type <- input$acttype
act_mat_names <- activity_type
if (profile_type != 'activity') return(NULL)
# get the partial matrix
partial <- activity_filter()
# if it is data matrix input, skip
if (length(partial) == 2) return(NULL)
#filtered activies < 0, active >0, inactive =0 or inconclusive in the beginning, NA non tested
partial[[act_mat_names]][ (is.na(partial[[act_mat_names]]) | partial[[act_mat_names]] == 0.0001) & ! is.na(partial[['cc2']]) ] <- 0
# add duplicate rows due to duplicate cluster information
partial <- duplicate_chemical_row(partial, ip)
#print(str_c("line324", rownames(partial[[act_mat_names]])))
# load all the activity filter parameters
nwauc_thres <- input$nwauc_thres
ncmax_thres <- input$ncmax_thres
npod_thres <- ifelse(is.na(input$npod_thres), 3, log10(input$npod_thres/1000000)*-1)
nec50_thres <- ifelse(is.na(input$nec50_thres), 3, log10(input$nec50_thres/1000000)*-1)
#pod_diff_thres <- input$pod_diff_thres
#isstrong <- input$isstrong
nocyto <- input$nocyto
isgoodcc2 <- input$isgoodcc2
nohighcv <- input$nohighcv
cytofilter <- input$cytofilter
wauc_fold_thres <- input$wauc_fold_thres
noauto <- input$noauto
noch2issue <- input$noch2issue
full <- activities$cas_qc
if (! nolowQC) full <- activities$cas
# subset the matrices by assay names
# rename the assays & chemicals
full <- rename_mat_col_row(full, master, assay_names, input_chemical_name=NULL, rename_chemical=FALSE, rename_assay=rename_assay)
# subset the matrices by assay names
full <- get_assay_mat(full, reg_sel, invSel=inv_sel)
# filtering
full <- filter_activity_by_type(full, 'nwauc.logit', nwauc_thres, act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'ncmax', ncmax_thres,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'npod', npod_thres,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'nec50', nec50_thres,act_mat_names=act_mat_names)
#full <- filter_activity_by_type(full, 'pod_med_diff', pod_diff_thres,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'label_cyto', thres=NULL, decision=cytofilter,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'wauc_fold_change', wauc_fold_thres, act_mat_names=act_mat_names)
#full <- filter_activity_by_type(full, 'hitcall', thres=NULL, decision=isstrong,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'wauc_fold_change', thres=1, decision=nocyto,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'cc2', thres=NULL, decision=isgoodcc2,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'label_autof', thres=NULL, decision=noauto,act_mat_names=act_mat_names)
full <- filter_activity_by_type(full, 'label_ch2', thres=NULL, decision=noch2issue,act_mat_names=act_mat_names)
# it has to be the end
full <- filter_activity_by_type(full, 'cv.wauc', thres=NULL, decision=nohighcv,act_mat_names=act_mat_names)
#filtered activies < 0, active >0, inactive =0 or inconclusive in the beginning, NA non tested
full[[act_mat_names]][ (is.na(full[[act_mat_names]]) | full[[act_mat_names]] == 0.0001) & ! is.na(full[['cc2']]) ] <- 0
#print(paras[['annotation']])
#print(rownames(paras[['annotation']]))
result <- get_clust_assay_enrichment(partial[[act_mat_names]], full[[act_mat_names]], paras[['annotation']], calZscore=FALSE)
return(result)
})
select_plot <- reactive({
showDendrogram <- input$showdendro
keepsize <- input$keepsize
profile_type <- input$proftype
sort_meth <- input$sort_method
fsize <- input$fontsize
color <- wauc_colors
breaks <- wauc_breaks
leg_labels <- wauc_leg_labels
leg_breaks <- wauc_leg_breaks
if (profile_type == 'activity')
{
activity_type <- input$acttype
if (activity_type != 'nwauc.logit')
{
color <- potency_colors
breaks <- potency_breaks
leg_labels <- potency_leg_labels
leg_breaks <- potency_leg_breaks
}
}
if (! is.null(chemical_loader()) )
{
# note pheatmap input has to have the same order!!!
paras <- heatmap_para_generator()
act <- paras[['act']]
cv <- paras[['cv']]
dcols <- paras[['dcols']]
drows <- paras[['drows']]
annotation <- paras[['annotation']]
annt_colors <- paras[['annt_colors']]
if (! showDendrogram)
{
if (profile_type == 'signal')
{
p <- pheatmap(t(act), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks, breaks=breaks, color=color, clustering_distance_rows = drows, clustering_distance_cols = dcols, clustering_method = "average")
} else if (sort_meth != 'toxscore')
{
#pheatmap v. < 1.0
#p <- pheatmap_new_label(t(act), t(cv), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks,breaks=breaks, color=color, display_numbers=TRUE, clustering_distance_rows = drows, clustering_distance_cols = dcols, clustering_method = "average")
p <- pheatmap(t(act), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks,breaks=breaks, color=color, display_numbers=t(cv), clustering_distance_rows = drows, clustering_distance_cols = dcols, clustering_method = "average")
} else
{
#pheatmap v. < 1.0
#p <- pheatmap_new_label(t(act), t(cv), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks, breaks=breaks, color=color, display_numbers=TRUE, clustering_distance_rows = drows, cluster_cols = FALSE, clustering_method = "average")
p <- pheatmap(t(act), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks, breaks=breaks, color=color, display_numbers=t(cv), clustering_distance_rows = drows, cluster_cols = FALSE, clustering_method = "average")
}
} else if (sort_meth != 'toxscore' )
{
p <- plot(hclust(dcols, method="average"), hang=-1)
}
}
return(p)
})
tox21id_data_generator <- reactive({
paras <- heatmap_para_generator() #heatmap_para_generator
actf <- paras[['act']]
id_info <- chemical_loader()
id_data <- master
isUpload <- FALSE
if(length(id_info) > 1) {
id_data <- id_info[['id']]
isUpload <- TRUE
}
if (! isUpload)
{
result <- get_source_data_long(source_acts=activities$tox21agencyid, chem_id_master=master, filtered_act=actf)
} else {result <- NULL}
return(result)
})
cas_data_generator <- reactive({
actwithflag <- input$actwithflag
paras <- heatmap_para_generator() #heatmap_para_generator
id_info <- chemical_loader()
id_data <- master
isUpload <- FALSE
if(length(id_info) > 1) {
id_data <- id_info[['id']]
isUpload <- TRUE
}
result <- get_output_df(paras, id_data, isUpload=isUpload, actwithflag=actwithflag)
return(result)
})
output$contents <- renderDataTable({
if ( ! is.null(chemical_loader()) ) get_lookup_list(chemical_loader()[['id']], master)
})
output$casdata <- renderDataTable({
#return(matrix_subsetter()[['nwauc.logit']])
return(cas_data_generator())
# for testing
# paras <- heatmap_para_generator()
# return(data.frame(rownames(paras[['act']])))
})
output$tox21iddata <- renderDataTable({
return(tox21id_data_generator())
})
output$enrich <- renderDataTable({
#return(as.data.frame(chemical_enricher()[['modl_acc']]))
return(chemical_enricher())
})
output$assay_info <- renderDataTable({
#col_n <- c('common_name','technology','cell_type','species','abbreviation', 'PubChem AID')
#result <- assay_names[, colnames(assay_names) %in% col_n]
partial <- matrix_subsetter()
not_want <- c('_for_FDA_A_name', '_target_type_gene_go.biological.process',
'_target_type_gene_ctd.disease', '_technology_long.description',
'_technology_short.description','protocol_call_db.name_parent',
'protocol_call_db.name_readout_primary','protocol_CEBS.batch',
'protocol_call_db.name_readout_secondary',
'protocol_db.name','protocol_time_release',
'protocol_slp','protocol_description')
result <- assay_names[, ! colnames(assay_names) %in% not_want]
result <- result %>%
filter(protocol_call_db.name != '') %>% #the ones with call definition
filter(protocol_call_db.name %in% colnames(partial[['npod']])) %>%
#select(noquote(order(colnames(.)))) #reorder the columns alphabetically
select(protocol_call_db.name, protocol_call_db.name_display.name,
starts_with("target"), starts_with("technology"), starts_with("format"),
starts_with("provider"), starts_with("protocol"))
return(result)
})
getVarWidth <- reactive({
ncmpd <- 0
keepsize <- input$keepsize
if ( ! is.null(chemical_loader()) & ! keepsize)
{
chem_id_df <- get_lookup_list(chemical_loader()[['id']], master)
ip <- subset(chem_id_df, GSID != '' & CAS != '', select=c(GSID, Cluster))
ncmpd <- nrow(ip)
}
if (ncmpd < 40)
{
return(1200)
} else
{
return(ncmpd*30)
}
})
output$profiling <- renderPlot({
select_plot()
}, width=getVarWidth)
output$box <- renderPlot({
profile_type <- input$proftype
fsize <- input$fontsize
sort_meth <- input$sort_method
p <- NULL
if (profile_type == 'activity')
{
activity_type <- input$acttype
if (activity_type == 'npod' | activity_type == 'nec50')
{
paras <- heatmap_para_generator()
act <- paras[['act']]
annotation <- paras[['annotation']]
dcols <- paras[['dcols']]
id_info <- chemical_loader()
id_data <- master
isUpload <- FALSE
if(length(id_info) > 1) {
id_data <- id_info[['id']]
isUpload <- TRUE
}
result <- get_output_df(paras, id_data, isUpload,actwithflag=FALSE)
result <- select(result, -Chemical.Name_original) # remove the new added column after get_output_df
p <- get_pod_boxplot(result, fontsize=fsize, sortby=sort_meth, dcols=dcols, global_para=assay_names)
}
}
if (! is.null(p)) print(p)
}, width=getVarWidth)
output$downloadCASData <- downloadHandler(
filename = function() {
if (input$proftype == 'signal')
{
paste(input$proftype, '_', input$sigtype, '.txt', sep='')
} else
{
paste(input$proftype, '_', input$acttype, '.txt', sep='')
}
},
content = function(file) {
result <- cas_data_generator()
#result <- get_published_data_only_commonname(result, assay_names) # to remove unpublished data
write.table(result, file, row.names = FALSE, col.names = TRUE, sep="\t", quote=FALSE, append=FALSE)
}
)
output$downloadTox21IDData <- downloadHandler(
filename = function() {
paste(as.numeric(as.POSIXct(Sys.time())), ".txt", sep="")
},
content = function(file) {
result <- tox21id_data_generator()
write.table(result, file, row.names = FALSE, col.names = TRUE, sep="\t", quote=FALSE, append=FALSE)
}
)
output$downloadEnrich <- downloadHandler(
filename = function() {
paste(input$proftype, '_', input$acttype, '_enrichment.txt', sep='')
},
content = function(file) {
result <- chemical_enricher()
write.table(result, file, row.names = FALSE, col.names = TRUE, sep="\t", quote=FALSE, append=FALSE)
}
)
output$downloadPlot <- downloadHandler(
filename = function() {
if (input$proftype == 'profile')
{
paste(input$proftype, '_', input$sigtype, '.pdf', sep='')
} else
{
paste(input$proftype, '_', input$acttype, '.pdf', sep='')
}
},
content = function(file) {
#png(file, width=9, height=6.5, units="in", res=600)
pdf(file, width=9, height=6.5)
select_plot2()
dev.off()
}
)
select_plot2 <- function () {
showDendrogram <- input$showdendro
keepsize <- input$keepsize
profile_type <- input$proftype
sort_meth <- input$sort_method
fsize <- input$fontsize
color <- wauc_colors
breaks <- wauc_breaks
leg_labels <- wauc_leg_labels
leg_breaks <- wauc_leg_breaks
if (profile_type == 'activity')
{
activity_type <- input$acttype
if (activity_type != 'nwauc.logit')
{
color <- potency_colors
breaks <- potency_breaks
leg_labels <- potency_leg_labels
leg_breaks <- potency_leg_breaks
}
}
if (! is.null(chemical_loader()) )
{
# note pheatmap input has to have the same order!!!
paras <- heatmap_para_generator()
act <- paras[['act']]
cv <- paras[['cv']]
dcols <- paras[['dcols']]
drows <- paras[['drows']]
annotation <- paras[['annotation']]
annt_colors <- paras[['annt_colors']]
if (! showDendrogram)
{
if (profile_type == 'signal')
{
p <- pheatmap(t(act), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks, breaks=breaks, color=color, clustering_distance_rows = drows, clustering_distance_cols = dcols, clustering_method = "average")
} else if (sort_meth != 'toxscore')
{
#p <- pheatmap_new_label(t(act), t(cv), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks,breaks=breaks, color=color, display_numbers=TRUE, clustering_distance_rows = drows, clustering_distance_cols = dcols, clustering_method = "average")
p <- pheatmap(t(act), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks,breaks=breaks, color=color, display_numbers=t(cv), clustering_distance_rows = drows, clustering_distance_cols = dcols, clustering_method = "average")
} else
{
#p <- pheatmap_new_label(t(act), t(cv), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks, breaks=breaks, color=color, display_numbers=TRUE, clustering_distance_rows = drows, cluster_cols = FALSE, clustering_method = "average")
p <- pheatmap(t(act), fontsize=fsize,annotation=annotation,annotation_colors=annt_colors,legend_labels=leg_labels,legend_breaks=leg_breaks, breaks=breaks, color=color, display_numbers=t(cv), clustering_distance_rows = drows, cluster_cols = FALSE, clustering_method = "average")
}
} else if (sort_meth != 'toxscore' )
{
p <- plot(hclust(dcols, method="average"), hang=-1)
}
}
return(p)
}
})
|
973820fe6b92602fcf0a22362c80ae0e0d558279
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/apsrtable/examples/apsrtable.Rd.R
|
ad17551df1b032fb5984918cc341d0fe21cf20ab
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,254
|
r
|
apsrtable.Rd.R
|
library(apsrtable)
### Name: apsrtable
### Title: APSR-style latex tables with multiple models
### Aliases: apsrtable
### ** Examples
## Use the example from lm() to show both models:
## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
## Page 9: Plant Weight Data.
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2,10,20, labels=c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
glm.D9 <- glm(weight~group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
apsrtable(lm.D90, lm.D9, glm.D9, digits=1, align="center",
stars="default", model.counter=0, order="rl")
## Not run:
##D apsrtable(lm.D90, lm.D9, glm.D9, digits=1, align="l",
##D stars=1, model.counter=0, order="rl",
##D coef.rows=1, col.hspace="3em", float="sidewaystable")
##D
##D ## Omit rows by regular expressions
##D apsrtable(lm.D9, omitcoef=expression(grep("\\(",coefnames)))
##D apsrtable(lm.D90,lm.D9,
##D omitcoef=list("groupCtl",
##D expression(grep("\\(",coefnames,value=TRUE))
##D )
##D )
## End(Not run)
|
b525a8c15290075a4fa0dac66de44fb252bf2d17
|
184940aa0323a4f2a84fbd49e919aedb7e1fcaea
|
/Complete R/MMM.R
|
373a0c8054984e76dc5724b085f1fdb85b2972f0
|
[] |
no_license
|
Dipzmaster/Complete_R
|
7e700b1ae8f21dd07538d8f8e0ace2c374298b82
|
face68fdac71be6f2bf4f744884c401cebbadffd
|
refs/heads/main
| 2023-08-23T02:08:24.794579
| 2021-11-03T18:36:13
| 2021-11-03T18:36:13
| 415,090,983
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,123
|
r
|
MMM.R
|
# Create a vector.
x <- c(12,7,3,4.2,18,2,54,-21,8,-5)
# Find Mean.
result.mean <- mean(x)
print(result.mean)
#[1] 8.22
# Create a vector.
x <- c(12,7,3,4.2,18,2,54,-21,8,-5)
# Find Mean.
result.mean <- mean(x,trim = 0.3)
print(result.mean)
# Create a vector.
x <- c(12,7,3,4.2,18,2,54,-21,8,-5,NA)
# Find mean.
result.mean <- mean(x)
print(result.mean)
# Find mean dropping NA values.
result.mean <- mean(x,na.rm = TRUE)
print(result.mean)
#[1] NA
#[1] 8.22
# Create the vector.
x <- c(12,7,3,4.2,18,2,54,-21,8,-5)
# Find the median.
median.result <- median(x)
print(median.result)
#[1] 5.6
# Create the function.
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
# Create the vector with numbers.
v <- c(2,1,2,3,1,2,3,4,1,5,5,3,2,3)
# Calculate the mode using the user function.
result <- getmode(v)
print(result)
# Create the vector with characters.
charv <- c("o","it","the","it","it")
# Calculate the mode using the user function.
result <- getmode(charv)
print(result)
#[1] 2
#[1] "it"
|
6ac9e83db261574a037295c79a1b48e19cac5c9a
|
abc5e45525d18734b7dd5cf5280c643a054365b8
|
/tests/testthat/test-ultimate.R
|
a50786e3389ba19e3d917889a2685108a935203f
|
[
"MIT"
] |
permissive
|
egnha/valaddin
|
ea03c4dca322a555364c47b9d97c54d050619d71
|
5579d98e8ac13518d991052f3e0cee38a5993b83
|
refs/heads/master
| 2021-01-11T05:23:28.645002
| 2017-10-03T08:27:18
| 2017-10-03T08:27:18
| 79,849,353
| 38
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,685
|
r
|
test-ultimate.R
|
context("Ultimate validation syntax")
f <- function(x, y) NULL
false_x <- only(errmsg_false("isTRUE(x)"), not = errmsg_false("isTRUE(y)"))
false_y <- only(errmsg_false("isTRUE(y)"), not = errmsg_false("isTRUE(x)"))
false_xy <- both(errmsg_false("isTRUE(x)"), errmsg_false("isTRUE(y)"))
test_that("global check is implemented by bare predicate", {
foo <- firmly(f, isTRUE)
expect_error(foo(TRUE, TRUE), NA)
expect_error_perl(foo(FALSE, TRUE), false_x)
expect_error_perl(foo(TRUE, FALSE), false_y)
expect_error_perl(foo(FALSE, FALSE), false_xy)
bar <- firmly(f, base::isTRUE)
expect_error(bar(TRUE, TRUE), NA)
expect_error_perl(
bar(FALSE, TRUE),
only(errmsg_false("base::isTRUE(x)"), not = errmsg_false("base::isTRUE(y)"))
)
expect_error_perl(
bar(TRUE, FALSE),
only(errmsg_false("base::isTRUE(y)"), not = errmsg_false("base::isTRUE(x)"))
)
expect_error_perl(
bar(FALSE, FALSE),
both(errmsg_false("base::isTRUE(x)"), errmsg_false("base::isTRUE(y)"))
)
})
test_that("global check is implemented by anonymous function", {
foo <- firmly(f, {isTRUE(.)})
expect_error(foo(TRUE, TRUE), NA)
expect_error_perl(
foo(FALSE, TRUE),
only(errmsg_false("(function (.) {isTRUE(.)})(x)"), not = "isTRUE(y)")
)
expect_error_perl(
foo(TRUE, FALSE),
only(errmsg_false("(function (.) {isTRUE(.)})(y)"), not = "isTRUE(x)")
)
expect_error_perl(
foo(FALSE, FALSE),
both(
errmsg_false("(function (.) {isTRUE(.)})(x)"),
errmsg_false("(function (.) {isTRUE(.)})(y)")
)
)
bar <- firmly(f, function(.) isTRUE(.))
expect_error(bar(TRUE, TRUE), NA)
expect_error_perl(
bar(FALSE, TRUE),
only(errmsg_false("(function(.) isTRUE(.))(x)"), not = "isTRUE(y)")
)
expect_error_perl(
bar(TRUE, FALSE),
only(errmsg_false("(function(.) isTRUE(.))(y)"), not = "isTRUE(x)")
)
expect_error_perl(
bar(FALSE, FALSE),
both(
errmsg_false("(function(.) isTRUE(.))(x)"),
errmsg_false("(function(.) isTRUE(.))(y)")
)
)
})
test_that("global check is implemented by empty predicate call", {
foo <- firmly(f, isTRUE())
expect_error(foo(TRUE, TRUE), NA)
expect_error_perl(foo(FALSE, TRUE), false_x)
expect_error_perl(foo(TRUE, FALSE), false_y)
expect_error_perl(foo(FALSE, FALSE), false_xy)
})
test_that("local checks are implemented as predicate arguments", {
foo <- firmly(f, isTRUE(x))
expect_error(foo(TRUE), NA)
expect_error_perl(foo(FALSE), false_x)
bar <- firmly(f, isTRUE(x, y))
expect_error(bar(TRUE, TRUE), NA)
expect_error_perl(bar(FALSE, TRUE), false_x)
expect_error_perl(bar(TRUE, FALSE), false_y)
expect_error_perl(bar(FALSE, FALSE), false_xy)
})
test_that("name of global check is error message", {
msg <- "error message"
foo <- firmly(f, "error message" := isTRUE())
expect_error(foo(TRUE, TRUE), NA)
expect_error(foo(FALSE, TRUE), msg)
expect_error(foo(TRUE, FALSE), msg)
expect_error(foo(FALSE, FALSE), msg)
})
test_that("name of local check is error message", {
msg <- "error message"
foo <- firmly(f, isTRUE("error message" := x, y))
expect_error(foo(TRUE, TRUE), NA)
expect_error(foo(FALSE, TRUE), msg)
expect_error_perl(foo(TRUE, FALSE),
only(errmsg_false("isTRUE(y)"), not = msg))
expect_error_perl(foo(FALSE, FALSE),
both(msg, errmsg_false("isTRUE(y)")))
bar <- firmly(f, "global" := isTRUE("local" := x, y))
expect_error(bar(TRUE, TRUE), NA)
expect_error_perl(bar(FALSE, TRUE), only("local", not = "global"))
expect_error_perl(bar(TRUE, FALSE), only("global", not = "local"))
expect_error_perl(bar(FALSE, FALSE), both("local", "global"))
})
|
3286c10485e9be05d2323986ba5f61babf9d98d7
|
212e49d0b5df150e4d0681451925689b9e152eba
|
/MESSAR_WEBSERVER/ui.r
|
a336d1c8670ac16fcd8dc2a3d06894312ab71e31
|
[] |
no_license
|
daniellyz/MESSAR
|
7e3fa9a6fbfba378a37ded366bad6db89d6a226b
|
ebbe4d0b849d074d36d447d5488464ac4773b991
|
refs/heads/master
| 2023-04-14T02:19:20.990578
| 2023-03-28T17:06:07
| 2023-03-28T17:06:07
| 153,620,833
| 1
| 0
| null | 2019-07-03T14:57:32
| 2018-10-18T12:33:50
|
R
|
UTF-8
|
R
| false
| false
| 6,217
|
r
|
ui.r
|
<<<<<<< HEAD
options(repos = BiocManager::repositories())
=======
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
library(shiny)
library("V8")
library(shinyjs)
#library(MSnbase)
library(formattable)
library(stringr)
require(DT, quietly = TRUE)
library(prozor)
<<<<<<< HEAD
#library(ChemmineOB)
load("rules_db.RData")
=======
library(markdown)
load("rule_db_multiple_sub_raw.RData")
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
source('helper.r')
textInputRow<-function (inputId, label, value = "")
{
div(style="display:inline-block",
tags$label(label, `for` = inputId),
tags$input(id = inputId, type = "text", value = value,class="input-small"))
}
shinyUI(navbarPage("MESSAR 0.1 (MEtabolite SubStructure Auto-Recommender)",
tabPanel("A) Start a run",
shinyjs::useShinyjs(),
shinyjs::extendShinyjs(text = "shinyjs.refresh = function() { location.reload(); }"),
column(5,
br(),
h4("Please paste your MS/MS spectrum into the field below:"),
textAreaInput("blank_file1", label = '',width=500,height=200),
br(),
h4("[Optional] Please paste the mass differences into the field below:"),
textAreaInput("blank_file2", label = '',width=500,height=150),
textInput("prec_mz", h4("[Recommended] Precursor mass:"), value = "")),
column(7,
br(),
numericInput("Relative", h4("Relative intensity threshold (base peak %)"),
<<<<<<< HEAD
min = 0, max = 99, value = 0.1, width = '500px'),
br(),
numericInput("max_peaks", h4("Consider only top n intense peaks (0 for all peaks) "),
min = 0, max = 100, value = 50, width = '500px'),
=======
min = 0, max = 99, value = 1, width = '500px'),
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
br(),
numericInput("ppm_search", h4("Tolerance [ppm] for masses and mass differences"),
min = 0, max = 50, value = 20, width = '500px'),
br(),
<<<<<<< HEAD
tags$head(
tags$style(HTML('#exampleButton1{background-color:lightblue}'))
),
tags$head(
tags$style(HTML('#exampleButton2{background-color:lightblue}'))
),
actionButton("exampleButton1", "Load example: Cinnarizine",style='padding:6px; font-size:120%'),
br(),
br(),
actionButton("exampleButton2", "Load example: Glutathion",style='padding:6px; font-size:120%'),
=======
checkboxInput("fdr_control", label = "Filtering rules with a FDR cutoff at 0.05", value = TRUE, width = '500px'),
br(),
br(),
tags$head(
tags$style(HTML('#exampleButton{background-color:lightblue}'))
),
actionButton("exampleButton", "Load example",style='padding:6px; font-size:150%'),
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
br(),
br(),
tags$head(
tags$style(HTML('#goButton{background-color:lightgreen}'))
),
actionButton("goButton", "Submit",style='padding:6px; font-size:150%'),
br(),
br(),
tags$head(
tags$style(HTML('#killButton{background-color:orange}'))
),
actionButton("killButton", "Clear",style='padding:6px; font-size:150%'),
br(),
br(),
br(),
em('Messages from the server:'),
br(),
br(),
textOutput("blank_message1")
)),
tabPanel("B) Annotated features",
<<<<<<< HEAD
tags$style("#blank_message2 {font-size:20px; color:red; display:block; }"),
=======
tags$style("#blank_message2 {font-size:20px;
color:red;
display:block; }"),
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
br(),
div(style="display: inline-block;vertical-align:top; width: 550px;", uiOutput("blank_message2")),
br(),
h4("Here is the list of annotated features (masses and mass differences):"),
br(),
<<<<<<< HEAD
dataTableOutput("table1")
),
tabPanel("C) Substructure suggestions",
column(5,
br(),
selectInput("score_type", label= h4("Please select a scoring method:"),
c("Sum of F-scores [Recommended]"="F1",
"Sum of Lift [Rare but Interesting]"="L1",
"Sum of Mcc [Informative]"="M1"), width = '500px'),
br(),
h3("Here is the list of suggested substructures:"),
br(),
dataTableOutput("table2")),
=======
dataTableOutput("table1"),
br(),
downloadButton("annotated_rules", "Download matched rules",style='padding:6px; font-size:150%'),
br(),
plotOutput("plot_fdr",width = '1600px')
),
tabPanel("C) Sub-structure suggestions",
column(5,
br(),
selectInput("score_type", label= h4("Please select a scoring method:"),
c("Lift sum [Most informative but less confident]"="L1",
"Lift median [Most informative but less confident]"="L2",
"MCC sum [Informative]"="M1",
"MCC median [Informative]"="M2",
"Confidence sum [Not recommended]"="C1",
"Confidence median [Not recommended]"="C2"), width = '500px'),
br(),
h3("Here is the list of suggested substructures:"),
br(),
dataTableOutput("table2"),
br(),
downloadButton("annotated_substructures", "Download substructures",style='padding:6px; font-size:150%')),
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
column(5,
br(),
br(),
br(),
br(),
plotOutput("plot_selected", width = '750px', height = "900px"),
br(), offset=1)),
<<<<<<< HEAD
tabPanel("Help",includeMarkdown("Help.Rmd")),
=======
tabPanel("Help",includeMarkdown("Help.Rmd")),
>>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
tabPanel("About",includeMarkdown("About.Rmd"))
))
|
07244c881786abc2c47ef53e28c475afc3951c25
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/issuestests/QuantTools/R/to_ticks.R
|
3e9891e0f5131758c6806ad1f1dd8f12e61ee108
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,014
|
r
|
to_ticks.R
|
# Copyright (C) 2016 Stanislav Kovalevsky
#
# This file is part of QuantTools.
#
# QuantTools is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# QuantTools is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QuantTools. If not, see <http://www.gnu.org/licenses/>.
#' Convert candles to ticks
#'
#' @param x candles, read 'Candles' in \link{Processor}
#' @name to_ticks
#' @details Convert OHLCV candles to ticks using the following model. One candle is equivalent to four ticks \code{( time, price, volume )}: \code{( time - period, open, volume / 4 ); ( time - period / 2, high, volume / 4 ); ( time - period / 2, low, volume / 4 ); ( time - period / 100, close, volume / 4 )}. Assuming provided candles have frequent period ( less than a minute ) it is a good approximation for tick data which can be used to speed up back testing or if no raw tick data available.
#' @examples
#' \donttest{
#'
#' data( ticks )
#' candles = to_candles( ticks, timeframe = 60 )
#' to_ticks( candles )
#'
#' }
#' @export
to_ticks = function( x ){
period = x[ 1:min( 100, .N ), min( diff( time )[ -1 ] ) ]
time = open = high = low = volume = NULL
ticks = x[, list(
time = c( time - period, time - period / 2, time - period / 2, time - period / 100 ),
price = c( open , high , low , close ),
volume = c( volume / 4 , volume / 4 , volume / 4 , volume / 4 )
) ][ order( time ) ]
ticks[, volume := pmax( volume, 1 ) ]
attributes( ticks$time ) = attributes( x$time )
return( ticks )
}
|
59069c681994bcd15f4d7d534829dbcb289fc465
|
e2592693961bcf364ca99fc360ae4184955139f8
|
/src/Data_preparation.R
|
2d9ff1b0b9f0c4271a67b537e760c97ba9caef6f
|
[] |
no_license
|
Illustratien/Wang_2023_TAAG
|
b30b5fc3858baf84a4a8c98549f1e73f97856fe6
|
c5efc3b4546d00c5aa2b4264fd527197269e148e
|
refs/heads/main
| 2023-04-07T16:14:03.859018
| 2023-01-23T18:57:37
| 2023-01-23T18:57:37
| 570,268,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,526
|
r
|
Data_preparation.R
|
# in order to run this script,
# clean the environment
rm(list = ls())
library(purrr,dplyr)
src_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
dat_dir <- sub('src','data',src_dir)
dir.create(dat_dir, showWarnings = FALSE)
# plase first download the Wang_et_al_TAAG_2023_output_trait.rds file from https://doi.org/10.5281/zenodo.4729637
# and put the it in the sub folder "data"
# Wang_2022_TAAG-main/data
# read the raw_data
system.time(df <- readr::read_rds(paste0(dat_dir,'/Wang_et_al_TAAG_2023_output_trait.rds')))# 124s
# physiological parameters
para <- readr::read_rds(paste0(dat_dir,'/Wang_et_al_TAAG_2023_physiological_parameter.rds'))
# selection for cleaning
# split data to list by genotype
list.dat <- split(df,df$Genotype)
# for each genotype, check the whether na exist in each genotype for all traits
na.check.df <- purrr::map_dfr(list.dat,~{
# labeled
data.frame(Na=ifelse(dim(.x)[1]!=dim(na.omit(.x))[1],1,0),
Genotype=.x$Genotype[1])
})
# extract the genotype with na
geno.na.id <- dplyr::filter(na.check.df,Na==1)$Genotype
# exclude the Genotype which contain NA in any of the trait
new.comb <- dplyr::filter(df,! Genotype%in% geno.na.id) %>%
# paste environments column into one for further use
mutate(Environment=paste(sites,sowing,nitrogen,co2,sep='_'))
new.para <- dplyr::filter(para,!genotype %in% geno.na.id)
# save result
saveRDS(new.comb,paste0(dat_dir,'/nona_combine.rds'),compress = T)
saveRDS(new.para,paste0(dat_dir,'/nona_para.rds'),compress = T)
|
a6038747ee2cad2eb1242060070472240bea1e8e
|
7e52c79f19a82f8a32dd57901f353fa6c23c0f86
|
/R/timesToKeep.R
|
30cebd0b49ec7a6f48000fd139b5767781b29b05
|
[] |
no_license
|
cran/ipcwswitch
|
86f92d584bd28b5b25aacf8919e7fe4ed9c9b936
|
d52676594d91be8f3454a70568524be8e711d367
|
refs/heads/master
| 2021-06-18T23:42:28.023759
| 2021-02-17T07:30:02
| 2021-02-17T07:30:02
| 157,900,036
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,351
|
r
|
timesToKeep.R
|
#' Function to keep all event times
# and times of changes in measurements of time-dpt covariates
#'
#' @param data dataframe containing the following variables
#' @param id patient's id
#' @param tstart date of the beginning of the follow-up (in Date format)
#' @param tstop date of the end of the follow-up (in Date format)
#' @param mes.cov list of vectors, each of them must contain the names (in character format)
#' of the repeated measurements related to one time-dependent covariate
#' @param time.cov list of vectors, each of them must contain the times (in Date format)
#' of the date when the abovementioned measurements were done
#'
#' @return list of two lists, one in Date format the other in numeric format.
#' Each of them contains, for each patient, the event time and
#' the times of changes in time-varying covariates
#' @export
#'
#' @references Graffeo, N., Latouche, A., Le Tourneau C., Chevret, S. (2019) "ipcwswitch: an R package for inverse probability of censoring weighting with an application to switches in clinical trials". Computers in biology and medicine, 111, 103339. doi : "10.1016/j.compbiomed.2019.103339"
#'
#' @examples kept.t <- timesTokeep(toydata, id = "id",
#' tstart = "randt", tstop = "lastdt",
#' mes.cov = list(c("ps1", "ps2", "ps3")),
#' time.cov = list(c("randt", "dt2", "dt3")))
#' # For example, for patient id=3, to obtain the kept times in Date format:
#' kept.t[[1]][[3]]
#' # To obtain the kept times in numeric format:
#' kept.t[[2]][[3]]
#' @seealso \code{\link{SHIdat}}
timesTokeep <- function(data, id, tstart, tstop,
mes.cov, time.cov){
# number of time-dpt confounders ####
L.cov <- length(mes.cov)
L.cov.bis <- length(time.cov)
if(L.cov != L.cov.bis)
stop("Same numbers of measures and times of measurement are required!")
# Maximum follow-up ####
Tend <- data[, tstop]
# browser()
# Retain date when changes occur for all time-dpt cov ####
# Split by patient --> loop on id
tabi <- split(data, data[,id])
L.tabi <- length(tabi)
times <- vector()
Keep <- list()
keep.times <- list()
keep.times.num <- list()
for (i in 1:L.tabi) {
keep.times[[i]] <- tabi[[i]][, tstart]
for(m in seq(L.cov)){
#if(!all(is.na(tabi[[i]][, mes.cov[[m]]])) & (tabi[[i]][, time.cov[[m]][1]] <= Tend[i])){
if(!all(is.na(tabi[[i]][, mes.cov[[m]]])) & !all(is.na(tabi[[i]][, time.cov[[m]]])) & (tabi[[i]][, time.cov[[m]]][!is.na(tabi[[i]][, time.cov[[m]]])][1] <= Tend[i])){
mytimes <- vector()
vect.dat <- vector()
# only keep not missing dates happening before Tend
for (dat in time.cov[[m]]) {
d <- tabi[[i]][, dat]
class.d <- class(d)
if (!is.na(d) & (d <= Tend[i])) {
mytimes <- c(mytimes, d) # value of the visit date
class(mytimes) <- class.d # in format Date
vect.dat <- c(vect.dat, dat) # name of the visit date
}
}
# ordered dates -- corresponding time-dpt measures
ord <- order(mytimes)
mytimes <- mytimes[ord]
vect.dat <- vect.dat[ord]
# corresponding cov
vect.cov <- mes.cov[[m]][time.cov[[m]] %in% vect.dat]
vect.cov <- vect.cov[ord]
# keep 1st time of measurement if not measured at tstart
# and if value at 1st measurement different from that imputed at tstart
# Note: in our case, these values were set to 0
if(!is.na(mytimes[1]) & (mytimes[1]!=tabi[[i]][, tstart]) &
!(mytimes[1]%in%keep.times[[i]]) &
(!is.na(tabi[[i]][, vect.cov[1]])) &
(tabi[[i]][, vect.cov[1]] != 0) ){ # to change if the imputed vaue at tstart is not 0
keep.times[[i]] <- c(keep.times[[i]], mytimes[1])
}
# keep times when there is a change
if(length(vect.cov) != 1){
tempo1 <- tabi[[i]][, vect.cov[1]] # value of 1st measurement of the time-dpt cov
temp.vect1 <- vect.cov[1] # corresponding name
tempo.time1 <- mytimes[1] # correspond. date of measurement
for (k in 2:length(vect.dat)) {
# vect.cov : retain date when change occurs between (k-1) and k
if (!is.na(tabi[[i]][, vect.cov[k]])) {
tempo2 <- tabi[[i]][, vect.cov[k]]
temp.vect2 <- vect.cov[k]
tempo.time2 <- mytimes[k]
if ((tempo.time1 != tempo.time2) &
(tempo1 != tempo2) &
!(tempo.time2 %in% keep.times[[i]])) {
keep.times[[i]] <- c(keep.times[[i]], tempo.time2)
}
tempo1 <- tempo2
temp.vect1 <- temp.vect2
tempo.time1 <- tempo.time2
}
}
}
}
}
# add Tend
class.keep <- class(keep.times[[i]])
if(!(Tend[i] %in% keep.times[[i]])){
keep.times[[i]] <- c(keep.times[[i]], Tend[i])
}
class(keep.times[[i]]) <- class.keep
}
Keep[[1]] <- keep.times
for (i in 1:L.tabi) {
ref.start <- tabi[[i]][, tstart]
keep.times.num[[i]] <- keep.times[[i]]-ref.start
}
Keep[[2]] <- keep.times.num
return(Keep)
}
|
09e7f85b14e719bde7a253d67727d3aecb49f5dc
|
a8a9b1b586d63b1583c3cabdf84592cbbbc3af9c
|
/mat_to_omx_script.R
|
18a59178b6805b7c4113f077b480175ed51ad4df
|
[] |
no_license
|
BFroebRPG/TDM_Scripts
|
e87950eb3f8e04f9f4ba6dcd1fa7b35f45644fe3
|
f792758b088e9f79dd1ffb8894392cb6168030e2
|
refs/heads/main
| 2023-06-27T10:28:44.828853
| 2021-07-23T19:52:17
| 2021-07-23T19:52:17
| 373,624,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,282
|
r
|
mat_to_omx_script.R
|
# Functions ----------------------------------------------------------------
### Add functionality for multiple directories
## Required Package
library(stringr)
#' Create a Line for Cube Script for OMX Export
#'
#' @description
#' Creates a single line script for exporting matrix files in Cube to OMX files.
#' A helper function for mat_to_omx_script
#'
#' @param mat_file The matrix file to be converted to OMX.
#'
#' @param script_name The Cube script
#'
#' @param script_dir The directory where the Cube Script should be saved.
#'
#' @param input_dir The directory where Cube keeps the .mat files.
#'
#' @param output_dir The directory where the OMX files should be saved.
#'
#' @param append Should the line be appended to an existing script.
#'
#' @noRd
mat_to_omx_line <- function(mat_file,
script_name,
script_dir,
input_dir,
output_dir,
append = FALSE){
require(stringr)
script_path <- paste0(script_dir, script_name)
omx_file <- str_replace(mat_file,
".mat",
".omx")
line <- paste0('CONVERTMAT FROM="',
input_dir,
mat_file,
'" TO="',
output_dir,
omx_file,
'" FORMAT=OMX COMPRESSION=0')
write(line, file = script_path, sep = "\n", append = append)
}
#' Create a Cube Script for OMX Export
#'
#' @description
#' Creates a script for exporting matrix files in Cube to OMX files.
#'
#' @param mat_file The matrix file to be converted to OMX.
#'
#' @param script_name The Cube script
#'
#' @param script_dir The directory where the Cube Script should be saved.
#'
#' @param input_dir The directory where Cube keeps the .mat files.
#'
#' @param output_dir The directory where the OMX files should be saved.
#'
#' @param overwrite Should the lines be appended to an existing script or should
#' the existing script be overwritten with the new lines.
#'
mat_to_omx_script <- function(mat_files,
script_name,
script_dir,
input_dir,
output_dir,
overwrite = TRUE){
script_path <- paste0(script_dir, script_name)
if(overwrite == TRUE){
if(file.exists(script_path) == TRUE){
file.remove(script_path)
}
}
for (file in mat_files) {
mat_to_omx_line(mat_file = file,
script_name,
script_dir,
input_dir,
output_dir,
append = TRUE)
}
}
# Testing -----------------------------------------------------------------
#' Set Up
#' These 5 lines set up the function.
#Name of the Script
script_name <- "TDM_SetUp.s"
# Where the script will be saved
script_dir <- "C:\\Users\\NH2user\\Documents\\"
# Where the MAT files are stored
input_dir <- "C:\\FSUTMS\\FLSWM_V7.2_Clean\\Base\\SIS2018\\Output\\"
# Where the OMX Files are saved
output_dir <- "C:\\Users\\NH2user\\Documents\\TDM_Scripts\\"
# Mat files to convert
# currently the file extensions must be lowercase,
# i'll be patching this eventually
mat_file <- c("CONGSKIM.mat")
mat_to_omx_script(mat_file,
script_name,
script_dir,
input_dir,
output_dir)
#' To convert MATs from multiple directories, (i.e future and base year)
#' multiple version of `mat_to_omx_script` need to be run. input_dir and
#' mat_file nee to be redefined before each run as well as setting
#' overwrite to FALSE. For some a differnt output director may be desired as
#' well, especially when MAT files are consistently named in base and future
#' year scenarious.
#'
#' See below.
#'
input_dir <- "C:\\FSUTMS\\FLSWM_V7.2_Clean\\Base\\SIS2045\\Output\\"
output_dir <- "C:\\Users\\NH2user\\Documents\\TDM_Scripts\\SIS2045\\"
mat_file <- c("CONGSKIM.mat")
mat_to_omx_script(mat_file,
script_name,
script_dir,
input_dir,
output_dir,
overwrite = FALSE)
|
4eed234104767aa802164a7024849bd1c8a79f7a
|
d699d825f09dab1b546d14d4a4f464c74a1c5499
|
/1_functions/plot_func.R
|
b4f7491b3ed2db114c065069e2bcd43e10f1bb80
|
[] |
no_license
|
SpTB/observing_bandits
|
bb13f949f626bdc9ee27c6e8c910c58fb32942e0
|
99c068a0f20c50235c27fe1cf538927b1f0b212f
|
refs/heads/master
| 2023-05-07T18:52:35.323253
| 2021-05-26T11:25:14
| 2021-05-26T11:25:14
| 343,011,697
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,203
|
r
|
plot_func.R
|
plot_games <- function (df, type = 'kalman', multiple, pay1, pay2, games_in=list()) {
# df: output of a sim function (dataframe)
# type: either 'kalman' or 'delta'
# multiple: whether plot multiple games (bool)
# pay1, pay2: bandit mean payouts
# games_in: subset of games to plot (numeric list)
#selecting a subset of games
if (length(games_in)>0) df <- df %>% filter(game %in% game)
p = ggplot(df) +
aes(x = trial, y = ev1) +
geom_line(size = 1L, colour = "#0c4c8a") +
geom_line(aes(y = ev2), size = 1L, colour = "orange") +
geom_hline(yintercept = pay1, color = "#0c4c8a", linetype = 'dashed', alpha = .5) +
geom_hline(yintercept = pay2, color = "orange", linetype = 'dashed', alpha = .5) +
geom_point(aes(y=outcome, color=as.factor(choice))) +
scale_color_manual(values = c("#0c4c8a", 'orange')) +
labs(y = 'Expected Reward', colour = 'Bandit') +
theme_classic()
if (type == 'kalman') {
p = p +
geom_ribbon(aes(ymin=ev1-ev_var1, ymax=ev1+ev_var1),fill='#0c4c8a', alpha=.3) +
geom_ribbon(aes(ymin=ev2-ev_var2, ymax=ev2+ev_var2),fill='orange', alpha=.3)
}
if (multiple==T) {
p = p +
facet_wrap(~game)
}
p
}
|
5178fcf6bd0be0e81f538a2c64fd41a31e70e5b4
|
b8018a912000b89d38ca2002636bb5154dc67c64
|
/man/USREC.Rd
|
fb2857a81ec5ff6662a2efbd3914eee5f502edc2
|
[] |
no_license
|
JustinMShea/neverhpfilter
|
8dbc239d8a8b20435f459d958545f9a84c97eb83
|
49c2274328ab4751e92ef0bf6ae9e49b05481956
|
refs/heads/master
| 2022-12-29T16:16:35.207712
| 2022-12-10T21:28:28
| 2022-12-10T21:28:28
| 101,463,296
| 16
| 7
| null | 2020-02-09T00:56:33
| 2017-08-26T04:52:10
|
R
|
UTF-8
|
R
| false
| true
| 4,443
|
rd
|
USREC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/USREC.R
\docType{data}
\name{USREC}
\alias{USREC}
\title{Recession Indicators Series}
\format{
An \code{\link{xts}} object containing monthly observations of NBER
based Recession Indicators
\itemize{
#\item\strong{Release:} {Recession Indicators Series (Not a Press Release)}
\item\strong{Seasonal Adjustment:} {Not Seasonally Adjusted}
\item\strong{Frequency:} {Monthly}
\item\strong{Units:} {+1 or 0}
\item\strong{Date Range:} {1854-12-01 to 2021-11-01}
\item\strong{Last Updated} {2021-12-01 6:01 PM CST}
}
}
\source{
Federal Reserve Bank of St. Louis \url{https://fred.stlouisfed.org/data/USREC.txt}
}
\usage{
data(USREC)
}
\description{
\code{USREC} NBER based Recession Indicators for the United States from the
Period following the Peak through the Trough
}
\section{Notes}{
This time series is an interpretation of US Business Cycle Expansions
and Contractions data provided by The National Bureau of Economic
Research (NBER) at \url{http://www.nber.org/cycles/cyclesmain.html}. Our
time series is composed of dummy variables that represent periods of
expansion and recession. The NBER identifies months and quarters of
turning points without designating a date within the period that
turning points occurred. The dummy variable adopts an arbitrary
convention that the turning point occurred at a specific date within
the period. The arbitrary convention does not reflect any judgment on
this issue by the NBER's Business Cycle Dating Committee. A value of 1
is a recessionary period, while a value of 0 is an expansionary
period. For this time series, the recession begins the first day of
the period following a peak and ends on the last day of the period of
the trough. For more options on recession shading, see the notes and
links below.
The recession shading data that we provide initially comes from the
source as a list of dates that are either an economic peak or trough.
We interpret dates into recession shading data using one of three
arbitrary methods. All of our recession shading data is available
using all three interpretations. The period between a peak and trough
is always shaded as a recession. The peak and trough are collectively
extrema. Depending on the application, the extrema, both individually
and collectively, may be included in the recession period in whole or
in part. In situations where a portion of a period is included in the
recession, the whole period is deemed to be included in the recession
period.
The first interpretation, known as the midpoint method, is to show a
recession from the midpoint of the peak through the midpoint of the
trough for monthly and quarterly data. For daily data, the recession
begins on the 15th of the month of the peak and ends on the 15th of
the month of the trough. Daily data is a disaggregation of monthly
data. For monthly and quarterly data, the entire peak and trough
periods are included in the recession shading. This method shows the
maximum number of periods as a recession for monthly and quarterly
data. The Federal Reserve Bank of St. Louis uses this method in its
own publications. A version of this time series represented using the
midpoint method can be found at:
\url{https://fred.stlouisfed.org/series/USRECM}
The second interpretation, known as the trough method, is to show a
recession from the period following the peak through the trough (i.e.
the peak is not included in the recession shading, but the trough is).
For daily data, the recession begins on the first day of the first
month following the peak and ends on the last day of the month of the
trough. Daily data is a disaggregation of monthly data. The trough
method is used when displaying data on FRED graphs. The trough method
is used for this series.
The third interpretation, known as the peak method, is to show a
recession from the period of the peak to the trough (i.e. the peak is
included in the recession shading, but the trough is not). For daily
data, the recession begins on the first day of the month of the peak
and ends on the last day of the month preceding the trough. Daily data
is a disaggregation of monthly data. A version of this time series
represented using the peak method can be found at:
\url{https://fred.stlouisfed.org/series/USRECP}
}
\examples{
data(USREC)
USREC["2007/2009"]
plot(USREC["1947/"], grid.col = "white", col="red")
}
\keyword{datasets}
|
fa9239566338660f5a0e69d1fc52306a95b8d0b7
|
c34aeb4a6e0ea62408d1985a759c71f6aac98ec1
|
/.Rproj.user/D07D4D2/sources/per/t/84DAA457-contents
|
522e3c729c209c8816595a0c0a80378e12971023
|
[] |
no_license
|
KCY0409/data-retreatment2
|
94d870aab371924a226accd4b3fffd0287d030ff
|
d5b798c365face029cae5ebf50e6501b0f2dadd5
|
refs/heads/master
| 2020-12-15T10:11:05.687411
| 2020-01-29T14:38:03
| 2020-01-29T14:38:03
| 235,071,221
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,061
|
84DAA457-contents
|
# 정규표현식
# 특정한 규칙을 가진 문자열의 집합을 표현하는데 사용하는 형식 언어
# -> 문법을 외워야 해서 읽고 사용하기 어려움 / 익숙해지면 글자를 다루는 코드 작성 쉬움
# R의 정규표현식
# 표준문법인 POSIX와 perl방식의 PCRE 2가지가 대표적
# R은 POSIX의 basic과 Extended 중 Extended를 지원
# perl = T 옵션으로 PCRE방식을 사용가능
# 단순 매칭
# grep은 찾고자하는 문자열이 있는지 찾아주는 함수
# grep(찾고자하는 패턴, 대상벡터)
data <-c("apple", "banana", "banano")
grep("banana", data)
grepl("banana", data)
# 문자열의 시작
# 단순 매칭하는 상황에서 "안에 패턴을 작성할때 ^을 맨 앞에 같이 사용하면
# 그 뒤의 글자로 시작하는 데이터만 찾는다.
data <-c("apple", "banana", "banano", "a banana")
grep("banana", data)
grep("^banana", data)
# 문자열의 끝
# 단순 매칭하는 상황에서 "안에 패턴을 작성할 때 $을 맨 뒤에 같이
# 사용하면 그 앞의 글자로 끝나는 데이터만 찾는다.
data <-c("apple", "banana", "banano", "a banana", "a banana a")
grep("banana", data)
grep("banana$", data)
# 완전히 일치하는 경우만
data <-c("apple", "banana", "banano", "a banana", "a banana a")
grep("banana", data)
grep("^banana$", data)
# 사용해보기
# nycflights13 패키지의 airports 데이터에 이름이 New가 포함되는 데이터는 몇 개?
# 529개
if(!requireNamespace("nycflights13")) install.packages("nycflights13")
library("nycflights13")
library(dplyr)
head(airports)
View(airports)
airportD <- c(airports$name, airports$tzone)
grep("New",airportD)
# nycflights13 패키지의 airports 데이터에 이름이 New로 시작되는 데이터는 몇 개?
# 13개
grep("^New", airportD)
# 임의의 글자 한 개
# .은 정규표현식에서 무엇이든 한 개의 글자를 의미
x <- c("apple", "banana", "pear")
grep(".a.", x)
# 메타문자를 글자 그대로
# \를 메타문자 앞에 쓰면 글자 그대로로 인식합니다.
# 그런데 \또한 메타문자로서 동작하기 때문에 \\를 작성해줘야 함
x <- c("apple", "banana", "pear",".apple")
grep("\\.a.", x)
grep("\.a.", x)
# 문자 클래스
# 문자클래스를 표현하는 []는 대괄호 안에 있는 글자 하나하나가 문자클래스로 가능한 경우
x <- c("123", "1357", "999990", "1133")
grep("[02468]", x)
# 문자 클래스 내에서는 ^가 지정한 글자들을 제외하고라는 뜻
x <- c("123", "1357","999990","0200","02468")
grep("[^02468]", x)
# [[:ascii:]] ASCII 문자(모두 128)
# [[:alpha:]] 알파벳 문자(영문자)
# [[:digit:]] 숫자
# [[:alnum:]] 영문자와 숫자
# [[:blank:]] 빈 문자(스페이스, 탭 등 전체)
# [[:space:]] 공백 문자
# [[:lower:]] 소문자
# [[:upper:]] 대문자
#사용해 보기
# nycflights13 패키지의 airports데이터에 이름이 숫자로 끝나는 데이터는 몇개인가?
# 5926개
airportD2 <- c(airports$faa, airports$name, airports$lat , airports$lon, airports$alt, airports$tz, airports$dst, airports$tzone)
View(airportD2)
grep("[[:digit:]]$",airportD2)
997 + 4929
# 앞에 글자가 없거나 하나
# ?는 글자 뒤에 붙어서 그 글자가 한개 있거나 없는 경우 모두를 표현할 때 사용
x <- c("apple", "banana", "pear", "aple")
grep("app?", x)
# 앞의 글자가 하나 이상
# + 는 글자 뒤에 붙어서 그 글자가 한대 이상 연속하는 모두를 표현할 때 사용
x <- c("apple", "banana", "pear", "aple")
grep("p+", x)
grep("ap+", x)
# 앞의 긃자가 없거나 하나 이상
# *는 글자 뒤에 붙어서 그 글자가 없는 경우부터 여러 개 연속하는 모두를 표현할 때 사용
x <- c("apple", "banana", "pear", "aple", "abble", "appppppppppple")
grep("app*", x)
# 글자의 갯수를 조절하기
# {n} : 글자가 n개인 경우
# {n, } : 글자가 n개 이거나 더 많은 경우
# { ,m} : 글자가 m개 이거나 더 적은 경우
# {n,m} : 글자가 n개에서 부터 m개 사이에 있는 경우
# 정말 그렇게 동작할까?
x <- c("a","aa","aaa","aaaa","aaaaa")
grep("a{3}", x)
grep("^a{3}$", x)
grep("a{3,}", x)
grep("a{,3}", x)
grep("a{2,3}", x)
# ?를 활용한 조절
# ?? : 0또는 1개를 뜻하는데 0개를 선호
# +? : 1개 또는 이상을 뜻하는데 가능한 적은 갯수를 선호
# *? : 0개 또는 이상을 뜻하는데 가능한 적은 갯수를 선호
# {n,}? : n개 또는 이상을 뜻하는데 가능한 적은 갯수를 선호
# {n,m}? : n개에서 m개 사이를 뜻하는데 가능한 적은 갯수를 선호
# ?를 활용한 조절의 사용예
# 아무 글자(.)가 모든 갯수가 가능한(*) 구성이
# 사이에 있는 경우입니다. .*과 .*?가 어떻게 다르게 동작하는지 확인해 보세요.
stri<-"<p> <em>안녕</em>하세요 </p><p>테스트입니다.</p>"
sub("<p>.*</p>","tar",stri)
sub("<p>.*?</p>","tar",stri)
# 그룹
# 정규표현식에서는 글자 하나하나를 하나의 개체로 인식
x <- c("abc","abcabc", "abcabcadc", "abcabcabc", "adcabcabcabc")
grep("(abc){3}", x)
# 그룹의 캡쳐 및 사용
# 그룹은 sub등 치환 기능을 사용할 때 더 빛을 발합니다.
# 찾는 패턴에서 그룹을 지어둔 내용은 순서대로 \\1,\\2의 방법으로 바꿀 패턴에서 사용 가능
x <- c("^ab", "ab", "abc", "ab 12")
gsub("(ab) 12", "\\1 34", x)
# 또는 의 사용
# |는 or 의 뜻으롷 사용하는 글자
# 우선 단순 매칭에서 사용하는 경우 / ()과 함께 사용 가능
x <- c("^ab", "ab", "ac", "abc", "abd", "abe", "ab 12")
grep("abc|abd", x)
grep("a(c|bd)", x)
# 함께 사용하는 함수
# grep : 찾고자 하는 패턴이 있는 벡터의 위치를 결과로 줌
# grepl : 찾고자 하는 패턴 인지를 TRUE, FALSE 벡터로 표현
# sub : 찾고자 하는 첫번째 패턴을 두번째 인자로 바꿈
# gsub : 찾고자 하는 모든 패턴을 두번째 인자로 바꿈
# regexpr : 찾고자 하는 패턴의 글자내 시작점을 결과로 줌
# gregexpr: 찾고자 하는 패턴의 글자내 위치를 모두 결과로 줌
# dir : 찾고자 하는 패턴의 파일 이름을 결과로 줌
# strsplit: 자르고자 하는 패턴으로 글자 데이터를 자름
# apropos : Environment에 보여주지 않는 기본 객체들을 보여줌
# find : 객체가 어디에 포함되어있는지 보여줌
# 우편번호
# 우리나라는 새로운 방식인 "12345"와 "123-456" 두 가지 방식 사용
# ^[0-9]{3}([0-9]{2}|-[0-9]{3})&
# 주민등록번호
# ^([0-9]{2}(0[1-9]|1[0-2])(0[1-9]|[12][0-9]|3[01]))-[1-4][0-9]{6}$
# 전화번호
# ^\\(?[0-9]{2,3}\\)?[-. ]?[0-9]{3,4}[-. ]?[0-9]{4}$
# 그룹과 gsub 양식을 통일시킬 수도 있음
gsub("^\\(?([0-9]{2,3})\\)?[-. ]?([0-9]{3,4})[-. ]?([0-9]{4})$",
"(\\1) \\2-\\3",data)
# 이메일 주소
# /^([a-z0-9_\\.-]+)@([0-9a-z\\.-]+)\.([a-z\\.]{2,6})$/
# 인터넷 주소
# /^(https?:\\/\\/)?([\\da-z\\.-]+)\\.([a-z\\.]{2,6})([\\/[[:word:]]_\\.-]*)*\\/?$/
|
|
975e12a8740700d929618732598c1879c7ce8274
|
fb46465a5f7f72836c7eef3ccc2383cc79f929e5
|
/201214/fitted counting.R
|
ebdd681be31b5bfcfd5a100fb1bc275574daa6a5
|
[] |
no_license
|
BIBS-Summary-Based-Analysis/Which-national-factors-are-most-influential-in-the-spread-of-COVID-19-
|
35c33465def45a0498b6e3bb0fac407634439031
|
1058825d101e0dc2d4659b901a4f08c97f9ca456
|
refs/heads/main
| 2023-06-27T22:12:44.245591
| 2021-07-26T14:51:53
| 2021-07-26T14:51:53
| 348,031,475
| 1
| 0
| null | 2021-03-24T15:16:02
| 2021-03-15T15:50:45
|
R
|
UTF-8
|
R
| false
| false
| 2,671
|
r
|
fitted counting.R
|
coefficient_result = read.csv("coef_result.csv",row.names = 1)
coef_sep_Logi = coefficient_result[,c(1:6,14)]
coef_sep_Gom = coefficient_result[,c(7:12,14)]
# Logistic model counting
country = rownames(coef_sep_Logi)
count1 = 0
count2 = 0
count3 = 0
segment2_len = length(which(!is.na(coef_sep_Logi$breakpoint)))
segment1_len = 165 - segment2_len
for(i in 1:length(country)){
if(!is.na(coef_sep_Logi$breakpoint[i])){
M = 2 # expected number of parameter pairs
}else{
M = 1
}
if(!is.na(coef_sep_Logi$a2_Logi[i])){
m = 2 # number of fitted parameter pairs
}else{
if(!is.na(coef_sep_Logi$a1_Logi[i])){
m = 1
}else{
m = 0
}
}
# Case by Case
if(M==2&m==0){
count1 = count1 + 1
}
if(M==2&m==1){
count2 = count2 + 1
}
if(M==1&m==0){
count3 = count3 + 1
}
}
for(i in 1){
cat("-------fitted model counting-------\n")
cat("\n")
cat("num of countries whose segment num is 2 :", segment2_len,"\n")
cat("segment num is 2, fitted segment num is 2 :", segment2_len-(count1+count2),"\n")
cat("segment num is 2, fitted segment num is 1 :", segment2_len-count1,"\n")
cat("\n")
cat("num of countries whose segment num is 1 :", segment1_len,"\n")
cat("segment num is 1, fitted segment num is 1 :", segment1_len-count3)
}
count1;count2;count3
# Gompertz model counting
count1 = 0
count2 = 0
count3 = 0
country = rownames(coef_sep_Gom)
segment2_len = length(which(!is.na(coef_sep_Gom$breakpoint)))
segment1_len = 165 - segment2_len
for(i in 1:length(country)){
if(!is.na(coef_sep_Gom$breakpoint[i])){
M = 2 # expected number of parameter pairs
}else{
M = 1
}
if(!is.na(coef_sep_Gom$a2_Gom[i])){
m = 2 # number of fitted parameter pairs
}else{
if(!is.na(coef_sep_Gom$a1_Gom[i])){
m = 1
}else{
m = 0
}
}
# Case by Case
if(M==2&m==0){
count1 = count1 + 1
}
if(M==2&m==1){
count2 = count2 + 1
}
if(M==1&m==0){
count3 = count3 + 1
}
}
for(i in 1){
cat("-------fitted model counting-------\n")
cat("\n")
cat("num of countries whose segment num is 2 :", segment2_len,"\n")
cat("segment num is 2, fitted segment num is 2 :", segment2_len-(count1+count2),"\n")
cat("segment num is 2, fitted segment num is 1 :", segment2_len-count1,"\n")
cat("\n")
cat("num of countries whose segment num is 1 :", segment1_len,"\n")
cat("segment num is 1, fitted segment num is 1 :", segment1_len-count3)
}
count1;count2;count3
|
83a570970011773e4ec591971e739fffbc3e82dd
|
f9376bb4d345ec552ac295d4098f523f18eaacba
|
/R/Lecture3/Lecture3/Old/LargeNumbers.R
|
8a75662ac0cc46004240e37a3d0a768ea558fe41
|
[] |
no_license
|
StephenElston/DataScience410
|
1c201792c8c7084e699cf9397daaa658ea40ef73
|
21855687724240192592d0d4f72674f5f21f6895
|
refs/heads/master
| 2023-01-24T21:19:47.038382
| 2020-12-04T03:01:01
| 2020-12-04T03:01:01
| 115,932,652
| 10
| 15
| null | 2020-01-29T03:03:53
| 2018-01-01T16:56:19
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 1,939
|
r
|
LargeNumbers.R
|
##--------------------------------------------
##
## Law of large numbers examples
##
## Class: PCE 350 Data Science Methods Class
##
##
##--------------------------------------------
##-----Use rolls of dice-------
## set a probability
p_six = 1/6
xs = c(10, 100, 1000, 10000, 100000)
sizes = c(60, 600, 6000, 60000, 600000)
# roll the dice and find p(x)
Map(function(x,s) dbinom(x = x, size = s, prob=p_six), xs, sizes)
# Probability of within 5%?
# 1) p(7<x<13|60 trails)
pbinom(12, size=60, prob=p_six) - pbinom(7, size=60, prob=p_six)
# alternatively
sum(sapply(8:12, function(x) dbinom(x, size=60, prob=p_six)))
# 2) p(70<x<130|600 trails)
pbinom(129, size=600, prob=p_six) - pbinom(70, size=600, prob=p_six)
# alternatively
sum(sapply(71:129, function(x) dbinom(x, size=600, prob=p_six)))
# View Distributions:
x_60 = 1:60
y_60 = dbinom(x_60, size=60, prob=p_six)
x_600 = 1:150
y_600 = dbinom(x_600, size=600, prob=p_six)
plot(x_60, y_60, type='l', main='Roll a Die 60 or 600 Times', xlab="# of Successes",
ylab="Probability", lwd=2, col="green", xlim=c(1,150))
lines(x_600, y_600, lwd=2, col="blue")
legend("topright", c("Roll 60 Times", "Roll 600 Times"), col=c("green", "blue"),
lty=c(1,1), lwd=c(2,2))
##----Coin Flips-----
# Calculate a running average of N-trials of flipping a fair coin
n = 10000
outcomes = round(runif(n))
running_average = sapply(1:n, function(x) mean(outcomes[1:x]))
plot(running_average, type='l')
grid()
outcomes_sd = sd(outcomes)
outcomes_sd
outcomes_sd_theo = sqrt( 0.5 * (1 - 0.5) )
outcomes_sd_theo
##----St. Dev. vs. St. Error-----
n = seq(10,10000,len=1000)
sample_means = sapply(n, function(x) mean(rnorm(x)))
sample_sds = sapply(n, function(x) sd(rnorm(x)))
plot(n, sample_means) # Plot means
lines(n, 1/sqrt(n)) # Plot means +- st. error
lines(n, -1/sqrt(n))
plot(n, sample_sds) # Plot sd's
lines(n, 1/sqrt(n)+1) # plot sd's +- st. error
lines(n, -1/sqrt(n)+1)
|
68d5b69284baf83c3e2914723fb2a51c227912bf
|
c65c0bab4d633385efa249e38cf45818754afaff
|
/shinyapps/app14/app.R
|
ceb5c90a98ce7848f92c0b96de73b8ab9bd82a8d
|
[] |
no_license
|
imcullan/Shiny-Tutorial-Rgitbook
|
06762c2ea4cf9900401f68a0c3a9964615376b93
|
1db7814f45b5e10846876a493d4946c73d270aa3
|
refs/heads/master
| 2021-01-22T18:32:35.081445
| 2016-08-26T17:40:30
| 2016-08-26T17:40:30
| 66,667,107
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 752
|
r
|
app.R
|
server <- function(input, output, session) {
observe({
# even though the slider is not involved in a calculation, if
# you change the slider it will run all this code and update the text box
# changes to the mytext box also will trigger the code to run
input$myslider
txt <- paste(input$mytext, sample(1:10000, 1))
updateTextInput(session, inputId = "myresults", value = txt)
})
}
ui <- basicPage(
h3("The results text box gets updated if you change the other text box OR the slider."),
sliderInput("myslider", "A slider:", min=0, max=1000, value=500),
textInput("mytext", "Input goes here", value = "Initial value"),
textInput("myresults", "Results will be printed here")
)
shinyApp(ui = ui, server = server)
|
b4a5483b6ee1c35de3a0bd30e495f29b04ac09f9
|
8b61baaf434ac01887c7de451078d4d618db77e2
|
/R/sort-methods.R
|
562924d670b988e6acb1fc38d0b20687e0f184be
|
[] |
no_license
|
drmjc/mjcbase
|
d5c6100b6f2586f179ad3fc0acb07e2f26f5f517
|
96f707d07c0a473f97fd70ff1ff8053f34fa6488
|
refs/heads/master
| 2020-05-29T19:36:53.961692
| 2017-01-17T10:54:00
| 2017-01-17T10:54:00
| 12,447,080
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,346
|
r
|
sort-methods.R
|
#' sorting or ordering more complex objects
#'
#' @inheritParams base::sort
#' @param FUN a sort function. if \code{x} is 2D or more, then this is applied to the 1st
#' dimension (ie the rows)
#'
#' @return a sorted object of same type as \code{x}
#'
#' @author Mark Cowley
#' @export
#' @rdname sort-methods
#' @docType methods
setGeneric("sort", function(x, decreasing=FALSE, na.last=NA, FUN, ...) standardGeneric("sort"))
#' @rdname sort-methods
#' @aliases sort,matrix-method
#' @export
setMethod(
"sort",
signature=signature("matrix"),
function(x, decreasing=FALSE, na.last=NA, FUN, ...) {
FUN <- match.fun(FUN)
val <- apply(x, 1, FUN, ...)
res <- x[order(val, decreasing=decreasing, na.last=na.last), ]
return(res)
}
)
#' @rdname sort-methods
#' @aliases sort,data.frame-method
#' @export
setMethod(
"sort",
signature=signature("data.frame"),
function(x, decreasing=FALSE, na.last=NA, FUN, ...) {
FUN <- match.fun(FUN)
cols <- colclasses(x) == "numeric"
res <- x
res[,cols] <- sort(as.matrix(x[,cols]), decreasing=decreasing, na.last=na.last, FUN=FUN, ...)
return(res)
}
)
#' @rdname sort-methods
#' @aliases sort,ANY-method
#' @export
setMethod(
"sort",
signature=signature("ANY"),
function(x, decreasing=FALSE, na.last=NA, FUN, ...) {
base::sort(x, decreasing=decreasing, ...)
}
)
|
9680151509449700bd2b931128ea7e5d3a90b357
|
763189ea0c11e7f6b247ea3552400a5bddf72311
|
/man/additive.Rd
|
e79117611d9df6fbcacfa68c628ba970605233b8
|
[
"MIT"
] |
permissive
|
hsbadr/additive
|
75918e59509b21b27086eb96004da2b7cb278662
|
d7999a531076b513a7f2085898e890f22d8fd47f
|
refs/heads/main
| 2023-08-16T02:01:24.017736
| 2023-07-17T12:15:43
| 2023-07-17T12:15:43
| 369,646,227
| 7
| 1
|
NOASSERTION
| 2022-06-16T14:10:16
| 2021-05-21T20:23:42
|
R
|
UTF-8
|
R
| false
| true
| 15,338
|
rd
|
additive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/additive.R
\name{additive}
\alias{additive}
\alias{update.additive}
\alias{additive_fit}
\title{General Interface for Additive TidyModels}
\usage{
additive(
mode = "regression",
engine = "mgcv",
fitfunc = NULL,
formula.override = NULL,
family = NULL,
method = NULL,
optimizer = NULL,
control = NULL,
scale = NULL,
gamma = NULL,
knots = NULL,
sp = NULL,
min.sp = NULL,
paraPen = NULL,
chunk.size = NULL,
rho = NULL,
AR.start = NULL,
H = NULL,
G = NULL,
offset = NULL,
subset = NULL,
start = NULL,
etastart = NULL,
mustart = NULL,
drop.intercept = NULL,
drop.unused.levels = NULL,
cluster = NULL,
nthreads = NULL,
gc.level = NULL,
use.chol = NULL,
samfrac = NULL,
coef = NULL,
discrete = NULL,
select = NULL,
fit = NULL
)
\method{update}{additive}(
object,
parameters = NULL,
fitfunc = NULL,
formula.override = NULL,
family = NULL,
method = NULL,
optimizer = NULL,
control = NULL,
scale = NULL,
gamma = NULL,
knots = NULL,
sp = NULL,
min.sp = NULL,
paraPen = NULL,
chunk.size = NULL,
rho = NULL,
AR.start = NULL,
H = NULL,
G = NULL,
offset = NULL,
subset = NULL,
start = NULL,
etastart = NULL,
mustart = NULL,
drop.intercept = NULL,
drop.unused.levels = NULL,
cluster = NULL,
nthreads = NULL,
gc.level = NULL,
use.chol = NULL,
samfrac = NULL,
coef = NULL,
discrete = NULL,
select = NULL,
fit = NULL,
fresh = FALSE,
...
)
additive_fit(formula, data, ...)
}
\arguments{
\item{mode}{A single character string for the prediction outcome mode.
Possible values for this model are "unknown", "regression", or
"classification".}
\item{engine}{A single character string specifying what computational
engine to use for fitting. Possible engines are listed below.
The default for this model is \code{"mgcv"}.}
\item{fitfunc}{A named character vector that describes how to call
a function for fitting a generalized additive model. This defaults
to \code{c(pkg = "mgcv", fun = "gam")} (\code{\link[mgcv]{gam}}).
\code{fitfunc} should have elements \code{pkg} and \code{fun}.
The former is optional but is recommended and the latter is
required. For example, \code{c(pkg = "mgcv", fun = "bam")} would
be used to invoke \code{\link[mgcv]{bam}} for big data.
A user-specified function is also accepted provided that it is
fully compatible with \code{\link[mgcv]{gam}}.}
\item{formula.override}{Overrides the formula; for details see
\code{\link[mgcv]{formula.gam}}.}
\item{family}{
This is a family object specifying the distribution and link to use in
fitting etc (see \code{\link{glm}} and \code{\link{family}}). See
\code{\link[mgcv]{family.mgcv}} for a full list of what is available, which goes well beyond exponential family.
Note that \code{quasi} families actually result in the use of extended quasi-likelihood
if \code{method} is set to a RE/ML method (McCullagh and Nelder, 1989, 9.6).
}
\item{method}{The smoothing parameter estimation method. \code{"GCV.Cp"} to use GCV for unknown scale parameter and
Mallows' Cp/UBRE/AIC for known scale. \code{"GACV.Cp"} is equivalent, but using GACV in place of GCV. \code{"NCV"}
for neighbourhood cross-validation using the neighbourhood structure speficied by \code{nei} (\code{"QNCV"} for numerically more ribust version). \code{"REML"}
for REML estimation, including of unknown scale, \code{"P-REML"} for REML estimation, but using a Pearson estimate
of the scale. \code{"ML"} and \code{"P-ML"} are similar, but using maximum likelihood in place of REML. Beyond the
exponential family \code{"REML"} is the default, and the only other options are \code{"ML"}, \code{"NCV"} or \code{"QNCV"}.}
\item{optimizer}{An array specifying the numerical optimization method to use to optimize the smoothing
parameter estimation criterion (given by \code{method}). \code{"outer"}
for the direct nested optimization approach. \code{"outer"} can use several alternative optimizers, specified in the
second element of \code{optimizer}: \code{"newton"} (default), \code{"bfgs"}, \code{"optim"} or \code{"nlm"}. \code{"efs"}
for the extended Fellner Schall method of Wood and Fasiolo (2017).}
\item{control}{A list of fit control parameters to replace defaults returned by
\code{\link[mgcv]{gam.control}}. Values not set assume default values. }
\item{scale}{ If this is positive then it is taken as the known scale parameter. Negative signals that the
scale parameter is unknown. 0 signals that the scale parameter is 1 for Poisson and binomial and unknown otherwise.
Note that (RE)ML methods can only work with scale parameter 1 for the Poisson and binomial cases.
}
\item{gamma}{Increase this beyond 1 to produce smoother models. \code{gamma} multiplies the effective degrees of freedom in the GCV or UBRE/AIC. code{n/gamma} can be viewed as an effective sample size in the GCV score, and this also enables it to be used with REML/ML. Ignored with P-RE/ML or the \code{efs} optimizer. }
\item{knots}{this is an optional list containing user specified knot values to be used for basis construction.
For most bases the user simply supplies the knots to be used, which must match up with the \code{k} value
supplied (note that the number of knots is not always just \code{k}).
See \code{\link[mgcv]{tprs}} for what happens in the \code{"tp"/"ts"} case.
Different terms can use different numbers of knots, unless they share a covariate.
}
\item{sp}{A vector of smoothing parameters can be provided here.
Smoothing parameters must be supplied in the order that the smooth terms appear in the model
formula. Negative elements indicate that the parameter should be estimated, and hence a mixture
of fixed and estimated parameters is possible. If smooths share smoothing parameters then \code{length(sp)}
must correspond to the number of underlying smoothing parameters.}
\item{min.sp}{Lower bounds can be supplied for the smoothing parameters. Note
that if this option is used then the smoothing parameters \code{full.sp}, in the
returned object, will need to be added to what is supplied here to get the
smoothing parameters actually multiplying the penalties. \code{length(min.sp)} should
always be the same as the total number of penalties (so it may be longer than \code{sp},
if smooths share smoothing parameters).}
\item{paraPen}{optional list specifying any penalties to be applied to parametric model terms.
\code{\link[mgcv]{gam.models}} explains more.}
\item{chunk.size}{The model matrix is created in chunks of this size, rather than ever being formed whole.
Reset to \code{4*p} if \code{chunk.size < 4*p} where \code{p} is the number of coefficients.}
\item{rho}{An AR1 error model can be used for the residuals (based on dataframe order), of Gaussian-identity
link models. This is the AR1 correlation parameter. Standardized residuals (approximately
uncorrelated under correct model) returned in
\code{std.rsd} if non zero. Also usable with other models when \code{discrete=TRUE}, in which case the AR model
is applied to the working residuals and corresponds to a GEE approximation.}
\item{AR.start}{logical variable of same length as data, \code{TRUE} at first observation of an independent
section of AR1 correlation. Very first observation in data frame does not need this. If \code{NULL} then
there are no breaks in AR1 correlaion.}
\item{H}{A user supplied fixed quadratic penalty on the parameters of the
GAM can be supplied, with this as its coefficient matrix. A common use of this term is
to add a ridge penalty to the parameters of the GAM in circumstances in which the model
is close to un-identifiable on the scale of the linear predictor, but perfectly well
defined on the response scale.}
\item{G}{Usually \code{NULL}, but may contain the object returned by a previous call to \code{gam} with
\code{fit=FALSE}, in which case all other arguments are ignored except for
\code{sp}, \code{gamma}, \code{in.out}, \code{scale}, \code{control}, \code{method} \code{optimizer} and \code{fit}.}
\item{offset}{Can be used to supply a model offset for use in fitting. Note
that this offset will always be completely ignored when predicting, unlike an offset
included in \code{formula} (this used to conform to the behaviour of
\code{lm} and \code{glm}).}
\item{subset}{ an optional vector specifying a subset of observations to be
used in the fitting process.}
\item{start}{Initial values for the model coefficients.}
\item{etastart}{Initial values for the linear predictor.}
\item{mustart}{Initial values for the expected response.}
\item{drop.intercept}{Set to \code{TRUE} to force the model to really not have a constant in the parametric model part,
even with factor variables present. Can be vector when \code{formula} is a list.}
\item{drop.unused.levels}{by default unused levels are dropped from factors before fitting. For some smooths
involving factor variables you might want to turn this off. Only do so if you know what you are doing.}
\item{cluster}{\code{bam} can compute the computationally dominant QR decomposition in parallel using \link[parallel:clusterApply]{parLapply}
from the \code{parallel} package, if it is supplied with a cluster on which to do this (a cluster here can be some cores of a
single machine). See details and example code.
}
\item{nthreads}{Number of threads to use for non-cluster computation (e.g. combining results from cluster nodes).
If \code{NA} set to \code{max(1,length(cluster))}. See details.}
\item{gc.level}{to keep the memory footprint down, it can help to call the garbage collector often, but this takes
a substatial amount of time. Setting this to zero means that garbage collection only happens when R decides it should. Setting to 2 gives frequent garbage collection. 1 is in between. Not as much of a problem as it used to be.
}
\item{use.chol}{By default \code{bam} uses a very stable QR update approach to obtaining the QR decomposition
of the model matrix. For well conditioned models an alternative accumulates the crossproduct of the model matrix
and then finds its Choleski decomposition, at the end. This is somewhat more efficient, computationally.}
\item{samfrac}{For very large sample size Generalized additive models the number of iterations needed for the model fit can
be reduced by first fitting a model to a random sample of the data, and using the results to supply starting values. This initial fit is run with sloppy convergence tolerances, so is typically very low cost. \code{samfrac} is the sampling fraction to use. 0.1 is often reasonable. }
\item{coef}{initial values for model coefficients}
\item{discrete}{experimental option for setting up models for use with discrete methods employed in \code{\link[mgcv]{bam}}. Do not modify.}
\item{select}{ If this is \code{TRUE} then \code{gam} can add an extra penalty to each term so
that it can be penalized to zero. This means that the smoothing parameter estimation that is
part of fitting can completely remove terms from the model. If the corresponding
smoothing parameter is estimated as zero then the extra penalty has no effect. Use \code{gamma} to increase level of penalization.
}
\item{fit}{If this argument is \code{TRUE} then \code{gam} sets up the model and fits it, but if it is
\code{FALSE} then the model is set up and an object \code{G} containing what
would be required to fit is returned is returned. See argument \code{G}.}
\item{object}{A Generalized Additive Model (GAM) specification.}
\item{parameters}{A 1-row tibble or named list with \emph{main}
parameters to update. If the individual arguments are used,
these will supersede the values in \code{parameters}. Also, using
engine arguments in this object will result in an error.}
\item{fresh}{A logical for whether the arguments should be
modified in-place of or replaced wholesale.}
\item{...}{Other arguments passed to internal functions.}
\item{formula}{ A GAM formula, or a list of formulae (see \code{\link[mgcv]{formula.gam}} and also \code{\link[mgcv]{gam.models}}).
These are exactly like the formula for a GLM except that smooth terms, \code{\link[mgcv]{s}}, \code{\link[mgcv]{te}}, \code{\link[mgcv]{ti}}
and \code{\link[mgcv]{t2}}, can be added to the right hand side to specify that the linear predictor depends on smooth functions of predictors (or linear functionals of these).
}
\item{data}{ A data frame or list containing the model response variable and
covariates required by the formula. By default the variables are taken
from \code{environment(formula)}: typically the environment from
which \code{gam} is called.}
}
\value{
An updated model specification.
}
\description{
\code{additive()} is a way to generate a \emph{specification} of a model
before fitting and allows the model to be created using
\pkg{mgcv} package in \pkg{R}.
}
\details{
The arguments are converted to their specific names at the
time that the model is fit. Other options and argument can be
set using \code{set_engine()}. If left to their defaults
here (\code{NULL}), the values are taken from the underlying model
functions. If parameters need to be modified, \code{update()} can be
used in lieu of recreating the object from scratch.
The data given to the function are not saved and are only used
to determine the \emph{mode} of the model. For \code{additive()}, the
possible modes are "regression" and "classification".
The model can be created by the \code{fit()} function using the
following \emph{engines}:
\itemize{
\item \pkg{mgcv}: \code{"mgcv"}
}
}
\section{Engine Details}{
Engines may have pre-set default arguments when executing the model fit
call. For this type of model, the template of the fit calls are:
\if{html}{\out{<div class="sourceCode r">}}\preformatted{additive() |>
set_engine("mgcv") |>
translate()
}\if{html}{\out{</div>}}
\if{html}{\out{<div class="sourceCode">}}\preformatted{## Generalized Additive Model (GAM) Specification (regression)
##
## Computational engine: mgcv
##
## Model fit template:
## additive::additive_fit(formula = missing_arg(), data = missing_arg(),
## weights = missing_arg())
}\if{html}{\out{</div>}}
}
\examples{
additive()
show_model_info("additive")
additive(mode = "classification")
additive(mode = "regression")
set.seed(2020)
dat <- gamSim(1, n = 400, dist = "normal", scale = 2)
additive_mod <-
additive() |>
set_engine("mgcv") |>
fit(
y ~ s(x0) + s(x1) + s(x2) + s(x3),
data = dat
)
summary(additive_mod$fit)
model <- additive(select = FALSE)
model
update(model, select = TRUE)
update(model, select = TRUE, fresh = TRUE)
}
\seealso{
\code{\link[mgcv]{mgcv-package}},
\code{\link[mgcv]{gam}},
\code{\link[mgcv]{bam}},
\code{\link[mgcv]{gamObject}},
\code{\link[mgcv]{gam.models}},
\code{\link[mgcv]{smooth.terms}},
\code{\link[mgcv]{predict.gam}},
\code{\link[mgcv]{plot.gam}},
\code{\link[mgcv]{summary.gam}},
\code{\link[mgcv]{gam.side}},
\code{\link[mgcv]{gam.selection}},
\code{\link[mgcv]{gam.control}},
\code{\link[mgcv]{gam.check}},
\code{\link[mgcv]{vis.gam}},
\code{\link[mgcv]{family.mgcv}},
\code{\link[mgcv]{formula.gam}},
\code{\link[stats]{family}},
\code{\link[stats]{formula}},
\code{\link[stats]{update.formula}}.
}
|
2ddb638e0366f776c3de520801bc9908924decee
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/r/generated/R/ComDayCqDamCoreImplServletCompanionServletProperties.r
|
528c9b924d1c09210db20c92cb2fe8725d4a5f14
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
R
| false
| false
| 4,295
|
r
|
ComDayCqDamCoreImplServletCompanionServletProperties.r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqDamCoreImplServletCompanionServletProperties Class
#'
#' @field More Info
#' @field /mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqDamCoreImplServletCompanionServletProperties <- R6::R6Class(
'ComDayCqDamCoreImplServletCompanionServletProperties',
public = list(
`More Info` = NULL,
`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}` = NULL,
initialize = function(`More Info`, `/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`){
if (!missing(`More Info`)) {
stopifnot(R6::is.R6(`More Info`))
self$`More Info` <- `More Info`
}
if (!missing(`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`)) {
stopifnot(R6::is.R6(`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`))
self$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}` <- `/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`
}
},
toJSON = function() {
ComDayCqDamCoreImplServletCompanionServletPropertiesObject <- list()
if (!is.null(self$`More Info`)) {
ComDayCqDamCoreImplServletCompanionServletPropertiesObject[['More Info']] <- self$`More Info`$toJSON()
}
if (!is.null(self$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`)) {
ComDayCqDamCoreImplServletCompanionServletPropertiesObject[['/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}']] <- self$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`$toJSON()
}
ComDayCqDamCoreImplServletCompanionServletPropertiesObject
},
fromJSON = function(ComDayCqDamCoreImplServletCompanionServletPropertiesJson) {
ComDayCqDamCoreImplServletCompanionServletPropertiesObject <- jsonlite::fromJSON(ComDayCqDamCoreImplServletCompanionServletPropertiesJson)
if (!is.null(ComDayCqDamCoreImplServletCompanionServletPropertiesObject$`More Info`)) {
More InfoObject <- ConfigNodePropertyString$new()
More InfoObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplServletCompanionServletPropertiesObject$More Info, auto_unbox = TRUE))
self$`More Info` <- More InfoObject
}
if (!is.null(ComDayCqDamCoreImplServletCompanionServletPropertiesObject$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`)) {
/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}Object <- ConfigNodePropertyString$new()
/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}Object$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplServletCompanionServletPropertiesObject$/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}, auto_unbox = TRUE))
self$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}` <- /mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}Object
}
},
toJSONString = function() {
sprintf(
'{
"More Info": %s,
"/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}": %s
}',
self$`More Info`$toJSON(),
self$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}`$toJSON()
)
},
fromJSONString = function(ComDayCqDamCoreImplServletCompanionServletPropertiesJson) {
ComDayCqDamCoreImplServletCompanionServletPropertiesObject <- jsonlite::fromJSON(ComDayCqDamCoreImplServletCompanionServletPropertiesJson)
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`More Info` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplServletCompanionServletPropertiesObject$More Info, auto_unbox = TRUE))
ConfigNodePropertyStringObject <- ConfigNodePropertyString$new()
self$`/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComDayCqDamCoreImplServletCompanionServletPropertiesObject$/mnt/overlay/dam/gui/content/assets/moreinfo.html/${path}, auto_unbox = TRUE))
}
)
)
|
6d6e0d2bbdd3cebc7a84a6fac711007c90bd8a0a
|
39bbe7901efe2b830eb1d4aa867ade4cd764364b
|
/testData/rename/renameLocalVariableClosureUsage.R
|
f83c2cbe570795f565bb7e3d7bc6321cf44ae483
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
JetBrains/Rplugin
|
86de0c5e38c191cf26b29ba0dc7b32a2f92ff0f5
|
ab5b0c146e11d441386dd0344f0761d5e69d1d5e
|
refs/heads/master
| 2023-09-03T23:33:54.945503
| 2023-09-01T14:23:29
| 2023-09-01T16:49:57
| 214,212,060
| 68
| 18
|
Apache-2.0
| 2023-04-07T08:36:18
| 2019-10-10T14:59:42
|
Kotlin
|
UTF-8
|
R
| false
| false
| 105
|
r
|
renameLocalVariableClosureUsage.R
|
function(x, y, z) {
ttt <- 331321123
print(ttt)
function(a, b, c) {
print(t<caret>tt + 1)
}
}
|
dbc7ab961b589b23ff4760d63a2e57753c5041b1
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Miller-Marin/fpu/fpu-10Xh-correct02-uniform-depth-22/fpu-10Xh-correct02-uniform-depth-22.R
|
d9db4fad810329a1143d89191c9e855b332848cd
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
fpu-10Xh-correct02-uniform-depth-22.R
|
0e7529f386961c8f22ee17db8aed8daf fpu-10Xh-correct02-uniform-depth-22.qdimacs 593079 1584102
|
14e8d08aeedeab9caae20fb4c7f3ad790504c8a8
|
e53f8d45dac571308c04cbd2f06e04c6ce332696
|
/code/processPosteriors.R
|
4e447638b528dba2a08d1a9cbcf911449a64810d
|
[] |
no_license
|
jeanlucj/BO_Budgets
|
beb6963a409be387c4d11fc469ee2d6daf992dce
|
7396b22b55c4ccae5df4abffd8acd1eceda7978c
|
refs/heads/main
| 2023-08-06T06:40:15.752144
| 2021-10-07T18:44:40
| 2021-10-07T18:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
processPosteriors.R
|
# Process the outputs from getPosteriors.py
posteriors <- list(bestBudget=bestBudget, maxPredGain=maxPredGain,
postVarAtMax=postVarAtMax)
if (exists("predGains")){
posteriors <- c(posteriors,
list(predBudgets=predBudgets, predGains=predGains,
predVars=predVars)
)
}
saveRDS(posteriors, file=paste0("posteriors", init_num, ".rds"))
|
6d2ec69427314fc3d2dcbd4b75983a2b1be9d620
|
0149d1e78e37aa4d3cd54e5dffb5e0c0d04ac398
|
/man/log_pbernoulli.Rd
|
fcdd9d5fc78729a4f79886187a14acfe87e1e996
|
[] |
no_license
|
CreRecombinase/FGEM
|
084b603d2733f7473d41fe9a324450f85a8c11d5
|
da57a629f9a7483feb2106807a3df3d944aa3c68
|
refs/heads/master
| 2021-05-01T04:50:43.914489
| 2020-07-14T18:53:16
| 2020-07-14T18:53:16
| 37,341,934
| 1
| 0
| null | 2017-05-25T22:15:20
| 2015-06-12T20:01:55
|
HTML
|
UTF-8
|
R
| false
| true
| 417
|
rd
|
log_pbernoulli.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{log_pbernoulli}
\alias{log_pbernoulli}
\title{calculate log loss for log-valued prediction}
\usage{
log_pbernoulli(lp, x)
}
\arguments{
\item{lp}{(natural) log-scale probablilty values}
\item{x}{integer (or logical) of length equal to lp indicating}
}
\value{
}
\description{
calculate log loss for log-valued prediction
}
|
c27d7305806969783c9c20e5f9c4e53c898ed9c6
|
7dc24ce2d943197c2d8d20e9cb25d32f7e4399be
|
/man/control_fit.Rd
|
ec26ad5a6aa196a58a8b36b264333b213caae14a
|
[] |
no_license
|
biobakery/SparseDOSSA2
|
26f9ceb91a2965b119d783b07b3cd02ee75d6027
|
e013d9e3c0fd79e1c343340775f33f14f22b8c5e
|
refs/heads/master
| 2023-01-24T09:26:23.553053
| 2023-01-19T16:45:46
| 2023-01-19T16:45:46
| 219,829,612
| 9
| 2
| null | 2022-10-21T17:36:22
| 2019-11-05T19:05:37
|
R
|
UTF-8
|
R
| false
| true
| 1,140
|
rd
|
control_fit.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SparseDOSSA2_fit.R
\name{control_fit}
\alias{control_fit}
\title{Control options for fit_SparseDOSSA2 and fitCV_SparseDOSSA2}
\usage{
control_fit(
maxit = 100,
rel_tol = 0.01,
abs_tol = 0.01,
control_numint = list(),
verbose = FALSE,
debug_dir = NULL
)
}
\arguments{
\item{maxit}{maximum number of EM iterations}
\item{rel_tol}{relative change threshold in the log likelihood
for algorithm convergence}
\item{abs_tol}{absolute change threshold in the log likelihood
for algorithm convergence}
\item{control_numint}{a named list of control parameters for the
numerical integrations during the E step. See help page for
\code{control_numint}}
\item{verbose}{whether or not detailed running messages should be provided}
\item{debug_dir}{directory for intermediate output, such as the
EM expectations and parameter values and during each step of the
EM algorithm. Default to \code{NULL} in which case no such output
will be generated}
}
\value{
a list of the same names
}
\description{
Control options for fit_SparseDOSSA2 and fitCV_SparseDOSSA2
}
|
4c467ff325b0ba263128e94bf06d210bf9ca889e
|
dfd802d011848fa26ab0a5f6121de54e835c3a86
|
/scripts/data_processing/parse_align_PDBs.R
|
f04bb7aa3605fdff73ba4928acf252308e43e818
|
[
"MIT"
] |
permissive
|
cjmathy/Gsp1_DMS_Manuscript
|
c8ef44d150c574083f0ae9c4a35bebe3b38493b6
|
00fb4815d7f8e778c8fe46a62617f5a9c6a048ce
|
refs/heads/main
| 2023-04-12T05:55:28.139056
| 2022-11-07T21:09:56
| 2022-11-07T21:09:56
| 460,596,102
| 1
| 0
| null | 2022-11-04T23:07:51
| 2022-02-17T20:25:11
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,404
|
r
|
parse_align_PDBs.R
|
#!/usr/bin/env RScript
# run from command line using 'Rscript parse_align_PDBs.R' from
# the scripts directory of the project
## Author: Christopher Mathy
## Date: 2020-02-04
## Email: cjmathy@gmail.com
## Email: chris.mathy@ucsf.edu
## Description:
## This script preprocesses structures of Ran GTPase downloaded
## using the script 'download_data.sh'. It parses and aligns
## PDBs using the packages bio3d
# load modules
library(tidyverse)
library(bio3d)
# read structural info file
df <- read_delim('data/pdb_structures_info.txt', delim = '\t', col_types = cols())
### ---------------------------------------------------------------------------
### Clean raw PDBs to have complexes with Ran as chain A and partner as chain B
### Also write out PDBs of monomeric Ran
# list of raw PDBs downloaded from the web (using download_data.sh)
files <- list.files(path = 'data/pdbs_raw', full.names = T)
# set output directories
cmplxdir <- 'data/pdbs_complexes'
randir <- 'data/pdbs_ran'
dir.create(cmplxdir, showWarnings = FALSE)
dir.create(randir, showWarnings = FALSE)
# iterate through each PDB using the mapping function purrr::pwalk
# pmap and pwalk iterate through the rows of a dataframe and perform a function
# pwalk just avoids returning NULL, since we aren't keeping the returned values
# of the anonymous function
df %>%
dplyr::select(pdb_id, ran_chain, partner_chain, partner_name) %>%
purrr::pwalk(.f = function (pdb_id, ran_chain, partner_chain, partner_name) {
# read in file
file <- grep(pdb_id, files, value=T)
print(paste0('Processing ', file))
raw_pdb <- read.pdb(file)
# split pdb (using bio3d functions)
ran <- trim.pdb(raw_pdb, chain=ran_chain)
partner <- trim.pdb(raw_pdb, chain=partner_chain)
complex <- cat.pdb(ran, partner, rechain=T) # warnings about chain breaks OK to ignore
# write ran pdb
write.pdb(pdb=ran, file = paste0(randir, '/', pdb_id, '.pdb'))
# write complex pdb
outfile <- ifelse(!is.na(partner_name),
paste0(cmplxdir, '/', pdb_id, '_', partner_name, '.pdb'),
paste0(cmplxdir, '/', pdb_id, '.pdb'))
write.pdb(pdb=complex, file = outfile)
}
)
### ---------------------------------------------------------------------------
### Align the structures of the complexes using bio3d and MUSCLE
### Then do the same for just the Ran monomers
structure_align <- function(fdir) {
files <- list.files(fdir, full.names = T) # read all the pdb files in one list object
pdbs <- pdbaln(files, outfile = 'data/Ran_aln.fa') # multiple sequuence alignment
core <- core.find(pdbs) # find the conserved core residues (which don't move much)
core.inds <- print(core, vol = 0.5) # indices of residues to be aligned
pdbfit(pdbs, core.inds, outpath=fdir) # structural superposition and write to folder
# delete unaligned files and rename aligned files
unaligned_files <- grep(list.files(path=fdir, full.names=T), pattern='pdb_flsq', inv=T, value=T)
unlink(unaligned_files)
aligned_files <- list.files(path=fdir, full.names=T)
new_filenames <- sapply(aligned_files, gsub, pattern = '.pdb_flsq', replacement = '')
file.rename(from=aligned_files, to=new_filenames)
}
# align and write both the complexes and the monomeric Ran
structure_align(cmplxdir)
structure_align(randir)
|
ca6e07b4d80e91e176d0010c61c6d97f6cb20440
|
eed03e381541bd6c2ede49db5a673129baf61c79
|
/plot1.R
|
fb346ef7d6d2533537a15b3af48c2df322ba6a31
|
[] |
no_license
|
efrainplaza/ExData_Plotting1
|
d2b87544ca7d6c0d0ebd5925054afef7a8f22dac
|
365228173e91dd0318211092418e32871ab3807e
|
refs/heads/master
| 2020-03-21T14:25:11.487089
| 2018-06-30T23:41:27
| 2018-06-30T23:41:27
| 138,656,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 903
|
r
|
plot1.R
|
## First plot for Exploratory Data Analysis Week1 Project
# Read text file
library(dplyr)
library(lubridate)
# Load data for test sets
setwd("C:/Data/R/Exploratory Data Analysis Week1")
headplot <- read.csv("household_power_consumption.txt", sep = ";", dec = ".")
##headplot$Date <- dmy(as.character(headplot$Date))
headplot$Date <- as.Date(headplot$Date, format = "%d/%m/%Y") ## %H:%M:%S
dateplot <- subset(headplot, Date == "2007-02-01" | Date == "2007-02-02")
dateplot$Global_active_power <- as.numeric(as.character(dateplot$Global_active_power))
##summary(dateplot$Global_active_power)
##Create final Histogram and save as PNG file with a width of 480 pixels and a height of 480 pixels
png(filename = "plot1.png", width = 480, height = 480, units = "px")
hist(dateplot$Global_active_power, main = "Global Active Power", col = "red", xlab = "Global Active Power (kilowats)", breaks = 13)
dev.off()
|
da7865d5841eccf7961a1ea703d41d1adefe1f44
|
74b3ee9d3b2ef1edf10c766bc922fbb7c0c3b76d
|
/LT_ND_Grahl_003.R
|
0dfc252e60f06ca10b574635e4b70f2b1c2aa7d1
|
[] |
no_license
|
vanderbi/fcelter
|
c635ea42c2c9886b54042f85ab5d37f54ceed316
|
0bebc6e48bfb04fd07c197e8da11fd0debabbe2f
|
refs/heads/master
| 2022-04-27T00:03:27.059108
| 2020-05-01T00:10:53
| 2020-05-01T00:10:53
| 260,312,052
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,858
|
r
|
LT_ND_Grahl_003.R
|
setwd("C://Users/kvand/OneDrive/Documents/github")
library(tidyverse)
getwd()
#install package tidyverse if not already installed
if(!require(tidyverse)){ install.packages("tidyverse") }
library("tidyverse")
infile1 <- trimws("https://pasta.lternet.edu/package/data/eml/knb-lter-fce/1069/11/6376dd06d6548631ec826f570cce8d42")
infile1 <-sub("^https","http",infile1)
# This creates a tibble named: dt1
dt1 <-read_delim(infile1
,delim=","
,skip=1,
col_names=c(
"SITENAME",
"Plot",
"Date",
"Salinity",
"NandN",
"NO2",
"NH4",
"SRP",
"DOC",
"NO3" ),
col_types=list(
col_character(),
col_number(),
col_date("%Y-%m-%d"),
col_number() ,
col_number() ,
col_number() ,
col_number() ,
col_number() ,
col_number() ,
col_number() ),
na=c( " ",".","NA") )
library(readxl)
dtnew <- read_xlsx("./fcelter/LT_ND_Grahl_003/data/LT_ND_Grahl_003_formatted.xlsx", col_names = TRUE, col_types = NULL)
getwd()
dtnew
dtnew$Date <- as.Date(dtnew$Date,format = "%Y-%m-%d")
dtnew
combined <-bind_rows(dt1, dtnew)
combined
combined$Salinity <- sprintf("%.1f",combined$Salinity)
combined$NandN <- sprintf("%.2f", combined$NandN)
combined$NO2 <- sprintf("%.2f", combined$NO2)
combined$NH4 <- sprintf("%.2f", combined$NH4)
combined$SRP <- sprintf("%.2f", combined$SRP)
combined$DOC <- sprintf("%.3f", combined$DOC)
combined$SRP <- sprintf("%.2f", combined$NO3)
write.csv(combined, "./fcelter/LT_ND_Grahl_003/data/LT_ND_Grahl_003.txt", quote = FALSE, row.names = FALSE)
# Convert Missing Values to NA for individual vectors
dt1$Salinity <- ifelse((trimws(as.character(dt1$Salinity))==trimws("-9999")),NA,dt1$Salinity)
dt1$NandN <- ifelse((trimws(as.character(dt1$NandN))==trimws("-9999.00")),NA,dt1$NandN)
dt1$NO2 <- ifelse((trimws(as.character(dt1$NO2))==trimws("-9999.00")),NA,dt1$NO2)
dt1$NH4 <- ifelse((trimws(as.character(dt1$NH4))==trimws("-9999.00")),NA,dt1$NH4)
dt1$SRP <- ifelse((trimws(as.character(dt1$SRP))==trimws("-9999.00")),NA,dt1$SRP)
dt1$DOC <- ifelse((trimws(as.character(dt1$DOC))==trimws("-9999.000")),NA,dt1$DOC)
dt1$NO3 <- ifelse((trimws(as.character(dt1$NO3))==trimws("-9999.00")),NA,dt1$NO3)
# Observed issues when reading the data. An empty list is good!
problems(dt1)
# Here is the structure of the input data tibble:
glimpse(dt1)
# And some statistical summaries of the data
summary(dt1)
# Get more details on character variables
|
a3cd6e9a89b85933ea20cedf5db9e415a9939e3c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rbmn/examples/estimate8nbn.Rd.R
|
d5af7638cb666e2e9ed93a6256059daf20ddd0a4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 212
|
r
|
estimate8nbn.Rd.R
|
library(rbmn)
### Name: estimate8nbn
### Title: estimating the /nbn/ parameters
### Aliases: estimate8nbn
### ** Examples
data(boco);
print8nbn(rbmn0nbn.05);
print8nbn(estimate8nbn(rbmn0nbn.05, boco));
|
99031140486f958f40fc69e3e5dd8a308be27d27
|
7f12f910638b9949a9d80bf8e307a6d1fafb4c22
|
/TidyTuesdays/W26/W26.R
|
881b796233a0d13bcc1edd8e7f8295ae221af6cf
|
[] |
no_license
|
MJaffee/R-Projects
|
80327f29f691b76b3c899e8abef45d29b341de64
|
8e8d5eba699d7f044805af551b76d4d5adb92fd3
|
refs/heads/main
| 2023-06-03T16:16:36.695966
| 2021-06-23T00:25:30
| 2021-06-23T00:25:30
| 369,615,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,910
|
r
|
W26.R
|
library(tidytuesdayR)
library(tidyverse)
library(patchwork)
library(ggbump)
#load data ----
tt_data <- tt_load(2021, week=26)
data <- tt_data$parks
#organize data & parse to convert percentages + dollars(script from @kierisi) ----
parks <- data %>%
mutate(park_pct_city_data = parse_number(park_pct_city_data),
pct_near_park_data = parse_number(pct_near_park_data),
spend_per_resident_data = parse_number(spend_per_resident_data)) %>%
mutate(across(where(is.character), factor)) %>%
select(-city_dup)
glimpse(parks)
#create vector of Top 10 most populous cities ----
top_pop <- c("New York", "Los Angeles", "Chicago", "Houston",
"Phoenix", "Philadelphia", "San Antonio", "San Diego",
"Dallas", "San Jose")
#create object of Top 10 most populous cities ----
top_pop_parks <- parks %>%
filter(city %in% top_pop)
#plot ----
p1 <- top_pop_parks %>%
ggplot(aes(year, pct_near_park_data, color = city, fill = city)) +
geom_point(size = 4, show.legend = FALSE) +
geom_bump(size = 1, show.legend = FALSE) +
scale_color_manual(values = c('#332288', '#88CCEE', '#44AA99',
'#117733', '#999933', '#DDCC77',
'#CC6677', '#882255', '#AA4499',
'#DDDDDD')) +
geom_area(size = 1, alpha = 0.1, show.legend = FALSE, color = NA) +
scale_fill_manual(values = c('#332288', '#88CCEE', '#44AA99',
'#117733', '#999933', '#DDCC77',
'#CC6677', '#882255', '#AA4499',
'#DDDDDD')) +
scale_y_continuous(limits = c(0, 100)) +
scale_x_continuous(breaks = c(2014, 2018)) +
facet_grid(~city) +
theme(
panel.background = element_rect(fill = "#232229", colour = "#232229"),
plot.background = element_rect(fill = "#232229", colour = "#232229"),
strip.background = element_rect(fill = "#232229", colour = "#232229"),
panel.grid.minor = element_line(colour = "#e5e5e5",
size = .10, linetype = "dotted"),
panel.grid.major = element_line(colour = "#e5e5e5",
size = .10, linetype = "dotted"),
axis.title.x = element_text(family = "Segoe UI Light",
size = 15, colour="#e5e5e5"),
axis.title.y = element_text(family = "Segoe UI Light",
size = 15, colour="#e5e5e5"),
axis.text.x = element_text(family = "Segoe UI Light",
size = 12, colour="#e5e5e5"),
axis.text.y = element_text(family = "Segoe UI Light",
size = 12, colour="#e5e5e5"),
strip.text.x = element_text(face = "italic", family = "Segoe UI Light",
size = 15, colour="#e5e5e5"),
axis.ticks = element_blank(),
) +
labs(
x = "Year",
y = "% of Residents"
) +
plot_annotation(
title = "Percentage of Residents Within a 10 Minute Walk of a Park (2012-2020)",
subtitle = "Top 10 Most Populous US Cities",
caption = "#TidyTuesday Week26 | Data: The Trust for Public Land | Graphic: @marcjaffee_"
) &
theme(
panel.background = element_rect(fill = "#232229", colour="#232229"),
plot.background = element_rect(fill = "#232229", colour="#232229"),
strip.background = element_rect(fill = "#232229", colour="#232229"),
plot.title = element_text(size=20, face="bold", hjust = 0,
color = "#e5e5e5", family="Segoe UI Light"),
plot.subtitle = element_text(size=15, hjust = 0, face="italic",
color = "#e5e5e5", family="Segoe UI Light"),
plot.caption = element_text(size=10, face="italic", hjust = 0,
color = "#e5e5e5", family="Segoe UI Light"))
p1
#save plot ----
ggsave("W26.png", last_plot(), device = "png")
|
e17f7a0f60abcee2e6be21eb4915cd9dec8fd7d3
|
17b1d0220c3315b421554429ff672e927cdd4706
|
/plot3.R
|
dfeb77a4a215d2c8ae03dfe4e6e95765639ff91e
|
[] |
no_license
|
JLeonard20/John-Hopkins-Exploratory-Data-Analysis
|
5f5a69fb0b932a8d893defc6e955fd0f708cfd58
|
d4da48a1f6b0973e2386dc8f85ccd029e177d991
|
refs/heads/main
| 2023-01-12T07:46:17.992495
| 2020-11-16T15:45:00
| 2020-11-16T15:45:00
| 313,349,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
r
|
plot3.R
|
# John Hopkins Exporatory Data Analysis plot 3
# Of the four types of sources indicated by the type type
# (point, nonpoint, onroad, nonroad) variable, which of these four sources have
# seen decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008? Use the ggplot2
# plotting system to make a plot answer this question.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
names(NEI)
NEIbal <- NEI[NEI$fips == "24510", ]
aggTotal <- aggregate(Emissions ~ year, NEIbal, sum)
library(ggplot2)
ggp <- ggplot(NEIbal,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
dev.copy(png, file = "plot3.png")
dev.off()
|
758637e6e1268f4991319add7e9749cf43a13780
|
2a4fd67af01c0370c1cefcd927dfc057791effa9
|
/R/rotate.matrix.2d.R
|
84c1a0a466e564ad54f734862e42e589a1cd9463
|
[] |
no_license
|
asgr/LAMBDAR
|
a630d6bde79f77f797f46a36c186d6c5a0035cbe
|
33a407e3c16e932512f6892c1d7cbf60ab29a3a2
|
refs/heads/master
| 2020-04-05T22:44:11.400466
| 2016-11-10T08:00:28
| 2016-11-10T08:00:28
| 73,358,952
| 1
| 0
| null | 2016-11-10T07:52:10
| 2016-11-10T07:52:09
| null |
UTF-8
|
R
| false
| false
| 341
|
r
|
rotate.matrix.2d.R
|
rotate.data.2d<-function (x, y, theta) {
out = make.rotation.matrix.2d(theta) %*% rbind(x, y)
return = cbind(out[1, ], out[2, ])
}
make.rotation.matrix.2d <-function (theta) {
theta = theta * pi/180
sintheta = sin(theta)
costheta = cos(theta)
return = matrix(c(costheta, -sintheta, sintheta, costheta), ncol = 2, byrow = TRUE)
}
|
39a8049a166e443c4273a89154859efa86d3112e
|
b1225443b98e4359ec5772248a9bae371804230d
|
/plot2.R
|
00fb81e58f453561493d725d4bf511954abb2ca2
|
[] |
no_license
|
pathto/ExploratoryDataProj2
|
c27bb8b71070a204841977820546f7148c91bd98
|
affad4f2cae276178f74a1afbab91c46a31dc0cd
|
refs/heads/master
| 2021-01-10T05:22:32.833062
| 2016-01-02T05:37:51
| 2016-01-02T05:37:51
| 48,899,891
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 547
|
r
|
plot2.R
|
## total PM2.5 emission from all sources in Baltimore
##for each of the years 1999, 2002, 2005, and 2008
library(dplyr)
## read data from files
NEI <- readRDS("summarySCC_PM25.rds")
## SCC <- readRDS("Source_Classification_Code.rds")
## filter the data of Baltimore
data_bal <- filter(NEI, fips == '24510')
emi_bal <- sapply(split(data_bal, data_bal$year), function(x) sum(x$Emission))
png('plot2.png')
plot(names(emi_bal), emi_bal, xlab = 'Years', ylab = 'PM2.5 (tons)', type = 'o')
title(main = 'Total PM2.5 Emission in Baltimore')
dev.off()
|
0ed04072d7d363415ac20fabc34736d1fa42b4a2
|
fd570307c637f9101ab25a223356ec32dacbff0a
|
/src-local/specpr/src.radtran/SRC/abpeak.r
|
79cae808e0de0814abd47f291ad4cf9f29a58487
|
[] |
no_license
|
ns-bak/tetracorder-tutorial
|
3ab4dd14950eff0d63429291c648820fb14bb4cb
|
fd07c008100f6021c293ce3c1f69584cc35de98a
|
refs/heads/master
| 2022-07-30T06:04:07.138507
| 2021-01-03T22:19:09
| 2021-01-03T22:49:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,730
|
r
|
abpeak.r
|
subroutine abpeak (xx,iminr)
implicit integer*4 (i-n)
include "defs.h"
include "lmrefl.h"
include "../../src.specpr/common/lblg"
real*4 xx(MAXCHNS), slope1, slope2, half(MAXCHNS), maxval(MAXCHNS)
integer*4 max(256), min(256), maxtmp
real*4 temp(MAXCHNS), maxvtp
equivalence (temp(1),max(1))
#
# smooth input data
#
write (6,3) nchans
3 format(' nchans= ',i4)
# do i = 2, nchans-1 {
# temp(i) = (0.5*xx(i-1) + xx(i) + 0.5*xx(i+1))/2.0
# }
# temp(1)=(xx(1)+xx(2))/2.0
# temp(nchans) = (xx(nchans)+xx(nchans-1))/2.0
# do i = 1, nchans {
# xx(i) = temp(i)
# temp(i)=0.0
# }
k = 1
do j=1,nchans-2 {
slope1 = (xx(j+1) - xx(j))
slope2 = (xx(j+2) - xx(j+1))
if ((slope1 >= 0.0) & (slope2 < 0.0)) {
if (k > 1) {
if (max(k-1) != 0) next
}
maxval(k) = xx(j+1)
max(k) = j+1
imask(j+1) = 1
write (6,1) k,max(k)
1 format (' max # ',i4,' at channel ',i4)
k = k + 1
} else if ((slope1 <= 0.0) & (slope2 > 0.0)) {
if (k > 1) {
if (min(k-1) != 0) next
}
min(k) = j+1
imask(j+1) = 1
write (6,2) k,min(k)
2 format (' min # ',i4,' at channel ',i4)
k = k + 1
}
}
if (max(1) == 0) {
l=1
} else {
l=0
}
do i=1,k {
max(i) = max(i+l)
maxval(i) = maxval(i+l)
l=l+1
}
do j=2,l {
i=j
50 if (maxval(i-1) < maxval(i)) {
maxtmp = max(i)
max(i) = max(i-1)
max(i-1) = maxtmp
maxvtp = maxval(i)
maxval(i) = maxval(i-1)
maxval(i-1) = maxvtp
if (i != 2) {
i=i-1
go to 50
}
}
}
if (l < 10) {
npeaka(iminr) = l
}else{
npeaka(iminr) = 10
}
n=npeaka(iminr)
do i=1,n {
ipeaka(iminr,i) = max(i)
write (6,"('peak(',i3,')=',i5)") i,ipeaka(iminr,i)
}
do i=1,k {
half(i) = (max(i) - min(i)) / 2.0
}
end
|
5ba51543b75472cbc1f177bc705594216ff2841e
|
0bdfd1f7c6e62dad6266779e48b42ed40798807f
|
/Analysis/YearlyAccuracyAndAggMnACorrelation.R
|
641471637ff1488051dbe6acf802e083258e31fe
|
[] |
no_license
|
noamhabot/EliteLaw
|
bb5b2ee6ff1aa820995f2d59474e146604aaaf61
|
5e57dfb4bbf4638f25756a8283c6e76da5489461
|
refs/heads/master
| 2021-03-19T18:31:15.863992
| 2018-10-20T18:56:39
| 2018-10-20T18:56:39
| 118,191,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,720
|
r
|
YearlyAccuracyAndAggMnACorrelation.R
|
# Load the necessary libraries
library(ggplot2)
library(dplyr)
library(grid)
# clear the current workspace
#rm(list = ls())
# Set the working directory
setwd("~/Google Drive/Stanford Law Project")
# This script accepts the following file generated from MnACutoff.Rmd,
# appends aggregated MnA data to it, and tests and plots their correlations
load('Data/EliteLawDf.RData')
load('Data/YearlyCutoffAccuracies.RData')
yearlyStats <- read.csv('Data/YearlyStats2.csv')
chowCutoff <- read.csv('Data/ChowCutoff.csv')
# get all the indices of unique years
indices <- order(df$Year)[!duplicated(sort(df$Year))]
AggMnADF <- data.frame(df$Year[indices], df$AggMnA[indices])
colnames(AggMnADF) <- c("Year", "AggMnA")
# add a column with the cutoff values to the AggMnADF corresponding to the correct Year
#AggMnADF <- left_join(AggMnADF, (wholeResults %>% select(YearTo, OptimalCutoff)), by = c("Year" = "YearTo"))
AggMnADF <- left_join(AggMnADF, (chowCutoff %>% select(YearTo, OptimalCutoff)), by = c("Year" = "YearTo"))
AggMnADF <- left_join(AggMnADF, (yearlyStats %>% select(Year, ZeroMnA, PositiveMnA, PercentPositive, Total)), by = c("Year"))
# remove the columns with NA's
AggMnADF <- na.omit(AggMnADF)
plotXY <- function(Year, y1, y1title, y2, y2title, maintitle) {
plotY1 <- ggplot() +
geom_point(aes(x = Year, y = y1), color="red", alpha = 0.75) +
geom_line(aes(x = Year, y = y1), size = 0.5, alpha = 0.75) +
xlab("Year") + ylab(y1title) + ggtitle(maintitle) +
theme_minimal() +
theme(axis.title.x = element_blank())
plotY2 <- ggplot() +
geom_point(aes(x = Year, y = y2), color="red", alpha = 0.75) +
geom_line(aes(x = Year, y = y2), size = 0.5, alpha = 0.75) +
xlab("Year") + ylab(y2title) +
theme_minimal() +
theme(axis.title.x = element_blank()) +
labs(caption = paste("Correlation between the two variables:",
format(cor(y1, y2, use="pairwise.complete.obs"),digits=6)))
grid.newpage()
grid.draw(rbind(ggplotGrob(plotY1), ggplotGrob(plotY2), size = "last"))
}
plotXY(AggMnADF$Year, AggMnADF$OptimalCutoff, "Chow Optimal Cutoff Value",
AggMnADF$AggMnA, "Aggregated MnA's", "Chow: Optimal Cutoff Values and Aggregated MnA's by year")
plotXY(AggMnADF$Year, AggMnADF$OptimalCutoff, "Chow Optimal Cutoff Value",
AggMnADF$PositiveMnA, "Positive MnA's", "Chow: Optimal Cutoff Values and Positive MnA's by year")
plotXY(AggMnADF$Year, AggMnADF$PositiveMnA, "Positive MnA's",
AggMnADF$AggMnA, "Aggregated MnA's", "Positive MnA's and Aggregated MnA's by year")
plotXY(AggMnADF$Year, AggMnADF$ZeroMnA, "Zero MnA's",
AggMnADF$AggMnA, "Aggregated MnA's", "Zero MnA's and Aggregated MnA's by year")
|
7e77f6e8c5b6bf5304b68757bcf3c005479031e2
|
50aeef80232f631e48e244a8b4ca8c0c2753595e
|
/history/ui.R
|
f5bd07908f7706918c9a54e7bdc711042a612f73
|
[] |
no_license
|
npp97-field/EucPVE
|
d6627705f7083b79f9827dec0098cc21e15d62b0
|
5bc47d305ddd32335fbf832139932cdf60c4f55d
|
refs/heads/master
| 2021-01-19T21:36:15.351260
| 2015-02-25T02:01:31
| 2015-02-25T02:01:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
ui.R
|
#this scrupt defines the interface(sliders, etc)
library(shiny)
# Define UI for miles per gallon application
shinyUI(pageWithSidebar(
# Application title
headerPanel("Effects of belowground space limitation on performance of Eucalyptus seedlings:
barrier sensing or nutrient limitation?"),
# Sidebar with controls to select the variables to plot against Photosynthesis
# and to specify whether outliers should be included
sidebarPanel(
selectInput("variable", "Variable:",
list("Runs" = "R",
"Hits" = "H",
"Home Runs" = "HR",
"Doubles" = "X2B",
"Triples" = "X3B",
"Walks" = "BB",
"Strikeouts" = "SO",
"Stolen Bases" = "SB",
"Errors" = "E")),
sliderInput("range", "Range:",
min = 1901, max = 2012, format="###",
value = c(1901, 2012), step = 1),
sliderInput("decimal", "Loess Smoothing Fraction:",
min = 0.05, max = 0.95, value = 0.2, step= 0.05)
),
# Show the caption and plot of the requested variable against mpg
mainPanel(
h3(textOutput("caption")),
plotOutput("mpgPlot")
)
))
#start with Photo synthesis (Asat and Amax)
#then use the covariates as the list (N, tnc, lma, )
|
931ff7b2878f502cda87e446022a630b8dc934b1
|
1a8b54238141f92403b9306e49c7c24964705247
|
/man/ghost.Rd
|
558731a32c8ef8f3b4350b68e929c33a613d8d3b
|
[
"MIT"
] |
permissive
|
EmanuelHark12/pkmnR
|
f790f0f3473859ea686b2d4d85ef4549bf5c6d68
|
7d3a8fc2fa55009b349741801314a1242e6932af
|
refs/heads/master
| 2023-05-04T08:19:08.288495
| 2021-05-29T01:32:12
| 2021-05-29T01:32:12
| 351,294,000
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 973
|
rd
|
ghost.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ghost}
\alias{ghost}
\title{Dados sobre os pokemon do tipo Fantasma}
\format{
Uma tabela com 13 colunas:
\describe{
\item{Nome}{Nome do Pokemon}
\item{Regiao}{Região do Pokemon localizado pela primeira vez.}
\item{Tipo Principal}{Tipo Principal do Pokemon}
\item{Tipo Secundario}{Tipo Secundario do Pokemon}
\item{hp}{O Hp base do Pokemon}
\item{attack}{O ataque base do Pokemon}
\item{defense}{A defesa base do Pokemon}
\item{special-attack}{O ataque especial base do Pokemon}
\item{special-defense}{A defesa especial base do Pokemon}
\item{speed}{A velocidade base do Pokemon}
\item{Peso}{O Peso base do Pokemon}
\item{Altura}{A Altura base do Pokemon}
\item{id}{O id dentro da pokemon dentro do pokeapi}
}
}
\source{
https://pokeapi.co/.
}
\usage{
ghost
}
\description{
A tabela, gerada a partir da função poke_type com o argumento 'ghost'
}
\keyword{datasets}
|
9d06f4b5ee99daeef6eb9fed93e23c6dc8680fd6
|
d8a5e3b9eef3c76bb7ca64d29ef2746cebd4c542
|
/R/repetition0N.R
|
db0428dd23188da522eacd6e0c0c90547a8c43dc
|
[] |
no_license
|
cran/qmrparser
|
0539ad4bf5b97039e50b2bffa16c3012899c6134
|
bb1bb2b50b358d79f6400d521f995e1d2a55a784
|
refs/heads/master
| 2022-05-09T03:49:13.511049
| 2022-04-23T23:00:05
| 2022-04-23T23:00:05
| 17,698,845
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
repetition0N.R
|
#do not edit, edit noweb/qmrparser.nw
repetition0N <- function(rpa0,
action = function(s) list(type="repetition0N",value=s ),
error = function(p,h) list(type="repetition0N",pos=p,h=h))
option(repetition1N(rpa0),action=action,error=error)
|
afc8d4a5e1b28fc7276ae8b5ce460136bd5725a6
|
5f65d74beacc184ea35c7da50db407408914e21d
|
/02_R-Programming/rankall.R
|
9e330d88ea7197ec36b1eca748fea1037bcf0920
|
[] |
no_license
|
olistroemer/datasciencecoursera
|
71bb5300ff79eb4fd5ece5c822c68eb371f45712
|
d921e1a215dfbed960acd2e23d50eb81fb329d68
|
refs/heads/master
| 2020-05-30T14:11:26.721840
| 2020-04-08T13:19:05
| 2020-04-08T13:19:05
| 189,783,108
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 919
|
r
|
rankall.R
|
# In which column are the relevant data?
columns <- c("heart attack" = 11, "heart failure" = 17, "pneumonia" = 23)
data <- read.csv("outcome-of-care-measures.csv", colClasses = c("character"))
# Convert relevant columns to numeric
for (c in columns){
data[,c] <- as.numeric(data[,c])
}
# Split data by state
sdata <- split(data, data$State)
rankall <- function(outcome, num = "best"){
# Check argument
if (!outcome %in% names(columns)) stop("invalid outcome")
r <- function(x){
ordered <- x[order(x[,columns[outcome]],x$Hospital.Name, na.last = NA),]
if (num == "best"){
head(ordered,1)[,c("Hospital.Name", "State")]
}
else if (num == "worst"){
tail(ordered,1)[,c("Hospital.Name", "State")]
}
else {
ordered[num, c("Hospital.Name", "State")]
}
}
result <- do.call(rbind, lapply(sdata, r))
colnames(result) <- c("hospital", "state")
result
}
|
e675bac67fc705e6849fbb2e398e43cd6c4c2e6a
|
be47f48854fb51b37ba6aeabf1401a38e2f6b9ff
|
/man/itCall.Rd
|
efc26619d7e1992399ff836ffe8b2086ec7e8fa5
|
[] |
no_license
|
wconstan/ifultools
|
a0d51d9243b85ab4096bfda45f118b5829617699
|
6d887f97f6eb258354122955723c289f268c95a0
|
refs/heads/master
| 2021-01-17T18:31:05.191308
| 2020-04-30T23:42:42
| 2020-04-30T23:42:42
| 58,664,643
| 0
| 1
| null | 2016-05-18T17:36:10
| 2016-05-12T18:00:13
|
C
|
UTF-8
|
R
| false
| false
| 889
|
rd
|
itCall.Rd
|
\name{itCall}
\alias{itCall}
\title{
Thin itCall Wrapper to IFULTOOLS Symbols
}
\description{
Thin itCall Wrapper to IFULTOOLS Symbols
}
\usage{
itCall(symbol, ...)
}
\arguments{
\item{symbol}{character scalar defining symbol to call in DLL}
\item{\dots}{arguments to underlying C code}
}
\details{
Foreign function calls are no longer allowed in CRAN. This function serves as a thin wrapper to avoid
associated R CMD check issues when building packages that depend on IFULTOOLS.
}
\value{output of the \code{itCall}}
\seealso{
\code{\link{itCall}}.
}
\examples{
\dontrun{
itCall("RS_fractal_filter_nonlinear_local_projection",
as.matrix(x),
as.integer(dimension),
as.integer(tlag),
as.integer(n.neighbor),
max.distance,
mutilsDistanceMetric(metric),
as.integer(noise.dimension),
corr.curve)
}
}
\keyword{utilities}
|
d2494939ce9c1c3d7cf7092c951d8ff0ba5f8732
|
4680f495ab20b619ddf824584939a1e0356a0ed3
|
/scripts/solution/slots_out_of_mountains_to_track.R
|
885b1c17ddc1ca1daa2d064d515ec761674cfa7b
|
[] |
no_license
|
Laurigit/flAImme
|
7ca1de5e4dd82177653872f50e90e58aed5968f7
|
9d4b0381d4eedc928d88d0774c0376ba9341774b
|
refs/heads/master
| 2023-05-24T17:06:58.416499
| 2023-04-28T08:10:30
| 2023-04-28T08:10:30
| 251,082,000
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 825
|
r
|
slots_out_of_mountains_to_track.R
|
slots_out_of_mountains_to_track <- function(game_state) {
# game_state[, rleidi := rleid(CYCLER_ID)]
sscols <- game_state[, .(mountain_row = ifelse(PIECE_ATTRIBUTE == "M", 1, 0), GAME_SLOT_ID, CYCLER_ID)]
sscols_aggr <- sscols[, .N, by = .(mountain_row, GAME_SLOT_ID)][order(-GAME_SLOT_ID)]
# sscols_aggr[, start := ifelse(shift(mountain_row == 1, n = 6) | mountain_row == 1, 1, 0)]
#sscols_aggr[, start_of_restricted_movement := ifelse(is.na(start_of_restricted_movement), 0, start_of_restricted_movement)]
sscols_aggr[, counter_cons_piece := rowid(rleid(mountain_row)) - 1]
sscols_aggr[, max_move := ifelse(mountain_row == 1, 5, pmax(counter_cons_piece, 5))]
sscols_res <- sscols_aggr[, .(MAXIMUM_MOVEMENT = max_move, GAME_SLOT_ID)]
joinaa <- sscols_res[game_state, on = .(GAME_SLOT_ID)]
return(joinaa)
}
|
025b804ce7fe1f1f94306d0293638cbc8cde9b57
|
16f9082704bd55e4ad7efba5d4a196da61fb70e7
|
/plot2.R
|
e40078cd71a37aa54a90a259d14a83399420f5ff
|
[] |
no_license
|
JorgePajaron/ExData_Plotting1
|
1c276d2e5a39ae0d3a6a889899386c4384bbb85c
|
66eec50bd445980dd55baaa0941197dc453946a1
|
refs/heads/master
| 2021-01-18T06:32:53.420498
| 2015-03-04T14:42:48
| 2015-03-04T14:42:48
| 31,604,947
| 0
| 0
| null | 2015-03-03T15:18:17
| 2015-03-03T15:18:17
| null |
UTF-8
|
R
| false
| false
| 364
|
r
|
plot2.R
|
data<-subset(read.table("household_power_consumption.txt",header=TRUE,na.strings="?",sep=";"),Date=="1/2/2007"|Date=="2/2/2007")
data$Fecha<-strptime(paste(data$Date,data$Time),"%d/%m/%Y %H:%M:%S")
Sys.setlocale("LC_TIME","English")
png(file="plot2.png")
with(data,plot(Fecha,Global_active_power,type="l",xlab="",ylab="Global active power (kilowatts)"))
dev.off()
|
fb3a6019291dcaf386b6de2dfd4c5996350ce853
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/Isopropamide.R
|
f156a4408d1d94904f4bdad42a1a3144fa2bd19a
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 228
|
r
|
Isopropamide.R
|
library("knitr")
library("rgl")
#knit("Isopropamide.Rmd")
#markdownToHTML('Isopropamide.md', 'Isopropamide.html', options=c("use_xhml"))
#system("pandoc -s Isopropamide.html -o Isopropamide.pdf")
knit2html('Isopropamide.Rmd')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.