blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8093e5e6e0b9f4261210bd8c6cf3aca3e398dec9
|
b8499353fc43469a1bf137d13a7302e95ea10847
|
/man/Gumbel-distribution.Rd
|
f0e0e55203c4e5f1a33f4bfae6591a51c8a69d7e
|
[] |
no_license
|
ilapros/ilaprosUtils
|
68f52bfd5ade384b72728bd8918e96a0ae3a00e7
|
9f0201f3ad82a1b0b38e48954917eabb59d32317
|
refs/heads/master
| 2023-04-09T12:48:26.958111
| 2023-03-27T15:59:47
| 2023-03-27T16:05:44
| 19,574,187
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,820
|
rd
|
Gumbel-distribution.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gevglo.R
\name{Gumbel distribution}
\alias{Gumbel distribution}
\alias{dgum}
\alias{pgum}
\alias{rgum}
\alias{qggum}
\alias{qgum}
\title{The Gumbel distribution}
\usage{
dgum(x, loc, scale, log = FALSE)
pgum(q, loc, scale, lower.tail = TRUE, log.p = FALSE)
qgum(p, loc, scale, lower.tail = TRUE, log.p = FALSE)
rgum(n, loc, scale)
}
\arguments{
\item{x, q}{vector of quantiles}
\item{loc}{location parameter}
\item{scale}{scale parameter}
\item{log, log.p}{logical; if TRUE, probabilities p are given as log(p)}
\item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \leq x]} otherwise, \eqn{P[X > x]}}
\item{p}{vector of probabilities}
\item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.}
}
\value{
dgum gives the density, pgum gives the distribution function,
qgum gives the quantile function, and rgum generates random deviates.
The length of the result is determined by n for rgev,
and is the maximum of the lengths of the numerical arguments for the other functions.
The numerical arguments are recycled to the length of the result.
Only the first elements of the logical arguments are used.
}
\description{
Density, distribution function, quantile function and random generation for
the Gumbel distribution with location parameter equal to \code{loc},
scale parameter equal to \code{scale}. This corresponds to a GEV
distribution with shape parameter equal to 0.
}
\examples{
curve(dgum(x,4,6), from=-15, to = 40,type="l")
plot(ecdf(rgum(100,4,6)))
lines(seq(-15,40,by=0.5),pgum(seq(-15,40,by=0.5),4,6),col=2)
qgum(c(0.5,0.99,0.995,0.995,0.999),4,6)
# notable quantiles
}
\concept{gumbel distribution}
\keyword{Gumbel}
\keyword{distribution}
|
8e90f2f2786abb6b2be36cd6d71cc7e2b167b955
|
a62d607da56af93749f27d7327d560204743a744
|
/man/summarySeg.Rd
|
5e5532de02fc7139f916a740dfb75d3cb91b6290
|
[] |
no_license
|
ubcxzhang/PICS
|
3c98704af3a7756c8f53b445fcbe638103472d00
|
87122d2a141908013034ed4c945c1e136ffbc2e6
|
refs/heads/master
| 2020-08-09T13:50:20.197693
| 2019-10-10T06:39:53
| 2019-10-10T06:39:53
| 214,100,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 625
|
rd
|
summarySeg.Rd
|
\name{summarySeg}
\Rdversion{1.1}
\alias{summarySeg}
\title{
Summarize a segReadsList object.
}
\description{
Returns info about a segReadsList object in a \code{data.frame}
containing the following informations:
chr : chromosome id
NF : number of forward reads
NR : number of reverse reads
L : length of segment
min: start location of segments
max: end location of segments
}
\usage{
summarySeg(seg)
}
\arguments{
\item{seg}{ An object of class \code{segReadsList}}
}
\value{
A six columns \code{data.frame}.
}
\author{
Xuekui Zhang
}
\seealso{
\code{\linkS4class{segReadsList}}
}
\keyword{functions}
|
ab9b244c25aa84a33670579ad1e5cc078580f5f4
|
df487d3a3d9978ba4aff6ab6684468bbd1d59d1a
|
/rproject1/rproject1/script.R
|
6d106c5be6f5b8ff68976d426388aa887d475431
|
[] |
no_license
|
ene1801/demo-repo
|
d9b374645370a263053d55a48a91ef29b2096ffc
|
55da7585b084122d1e63880246d63e710134e6c7
|
refs/heads/master
| 2021-09-01T05:59:25.356507
| 2017-12-25T07:23:08
| 2017-12-25T07:23:08
| 115,313,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23
|
r
|
script.R
|
### Here is some text
|
55169c8fe67ccf9b41d17572f06b6f18f353537f
|
d5d1df70749f408d4e6f1635632587eccc2b725f
|
/run_analysis.R
|
bc7450f57b149da81ea0e28f30b7531f9f25782b
|
[] |
no_license
|
dellins/Getting-and-Cleaning-Data-Project
|
e8b7102f98bd67bd9abea5b1d851d4117f2c29d6
|
49ac429a4843dd32e41c819188ab4c1fe5515b2f
|
refs/heads/master
| 2021-01-19T12:37:34.021510
| 2015-01-24T18:39:31
| 2015-01-24T18:39:31
| 29,777,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,283
|
r
|
run_analysis.R
|
library(data.table)
#Read the raw files into memory
testData = read.table("UCI HAR Dataset/test/X_test.txt",header=FALSE)
testData_activity = read.table("UCI HAR Dataset/test/y_test.txt",header=FALSE)
testData_subject = read.table("UCI HAR Dataset/test/subject_test.txt",header=FALSE)
trainData = read.table("UCI HAR Dataset/train/X_train.txt",header=FALSE)
trainData_activity = read.table("UCI HAR Dataset/train/y_train.txt",header=FALSE)
trainData_subject = read.table("UCI HAR Dataset/train/subject_train.txt",header=FALSE)
# 3. Uses descriptive activity names to name the activities in the data set
activities <- read.table("UCI HAR Dataset/activity_labels.txt",header=FALSE,colClasses="character")
testData_activity$V1 <- factor(testData_activity$V1,levels=activities$V1,labels=activities$V2)
trainData_activity$V1 <- factor(trainData_activity$V1,levels=activities$V1,labels=activities$V2)
# 4. Appropriately labels the data set with descriptive activity names
features <- read.table("UCI HAR Dataset/features.txt",header=FALSE,colClasses="character")
colnames(testData)<-features$V2
colnames(trainData)<-features$V2
colnames(testData_activity)<-c("Activity")
colnames(trainData_activity)<-c("Activity")
colnames(testData_subject)<-c("Subject")
colnames(trainData_subject)<-c("Subject")
# 1. merge test and training sets into one data set, including the activities
testData<-cbind(testData,testData_activity)
testData<-cbind(testData,testData_subject)
trainData<-cbind(trainData,trainData_activity)
trainData<-cbind(trainData,trainData_subject)
allData<-rbind(testData,trainData)
# 2. extract only the measurements on the mean and standard deviation for each measurement
meanSdData <- allData[,grepl("mean|std|Subject|Activity", names(allData))]
# 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
DT <- data.table(meanSdData)
tidyData<-DT[,lapply(.SD,mean),by="Activity,Subject"]
#Clean up feature names
setnames(tidyData, gsub('-mean', 'Mean', names(tidyData)))
setnames(tidyData, gsub('-std', 'Std', names(tidyData)))
setnames(tidyData, gsub('[-()]', '', names(tidyData)))
setnames(tidyData, gsub('BodyBody', 'Body', names(tidyData)))
write.table(tidyData,file="tidyData.txt",row.names = FALSE)
|
3ec7d111907c602c9fd6c37e11f693313082a2e0
|
f6874dd8703801c5e925653f406f18bc758117e5
|
/r/tutorials/.archive/DataMining.R
|
a30fefdacbd1368c703b6472d2dd0f777783ceed
|
[] |
no_license
|
bu-rcs/examples
|
b532d4cb57dd02a1c377b0c24910f882e8b1da7a
|
d793d1a6d21459f142423ab06cb948e8a35ac0ac
|
refs/heads/master
| 2021-07-15T12:48:53.678296
| 2020-06-15T20:30:20
| 2020-06-15T20:30:20
| 175,437,550
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,437
|
r
|
DataMining.R
|
# ---------------------------------------#
# #
# R script for SCV tutorial #
# Katia Oleinik #
# ---------------------------------------#
# lm() - Lenear regression
# glm() - generalised linear model
# coxph() - Cox model (survival package)
# clogit() -Conditioanl logistic regression (survival package)
# gee() - Generalised Estimating Equations (in gee and geepack packages)
# lme() Linear mixed models ( in nlme package)
# polr() Proportional odds model (in MASS package)
#------------------------------------------------------ #
# Examples of linear regression #
#------------------------------------------------------ #
## A few of the exploratory graphs we might draw
library(MASS)
data(birthwt)
pairs(birthwt[,c("age","lwt","ptl","bwt")])
boxplot(bwt~ftv,data=birthwt)
boxplot(bwt~race,data=birthwt)
## A linear model
model1<-lm(bwt~age+lwt+factor(race)+ptl+ftv+ht,data=birthwt)
anova(model1)
par(mfrow=c(2,2)) ##2x2 table of plots
plot(model1)
# There is no suggestion that age is important, or preterm visits.
# Two points (labelled â188â and â226â) seem influential.
#Examining beta's computed by lm.influence() shows that removing
#one of these points would have a fairly important effect on the
#coefficient for ptl. This is a large infant born to a small woman
#who smoked and had had three previous preterm deliveries.
dbeta<-lm.influence(model1)$coef
strange<-which(row.names(birthwt) %in% c("188","226"))
round(dbeta[strange,],2)
birthwt[strange,]
#We may be more interested in identifying risk factors for low birth
#weight (defined as < 2500g) than in predicting actual weights.
#It may be more productive to fit a logistic regression model to
#the binary variable bwt< 2500g
model2<-glm(I(bwt<2500)~age+lwt+factor(race)+ptl+ftv+ht,
data=birthwt,family=binomial())
anova(model2)
plot(model2) ## pretty useless plots
#The same infant is still influential: normal birth weight despite
#being very high risk.
par(mfrow=c(1,1))
#------------------------------------------------------ #
# Decision Trees #
#------------------------------------------------------ #
library("party")
str(iris)
#Call function ctree to build a decision tree. The first parameter is a formula,
# which defines a target variable and a list of independent variables.
iris_ctree <- ctree(Species ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width, data=iris)
print(iris_ctree)
plot(iris_ctree)
plot(iris_ctree, type="simple")
#------------------------------------------------------ #
# Hierarchial Clustering #
#------------------------------------------------------ #
# Sample of 40 records from iris data and remove variable Species
idx <- sample(1:dim(iris)[1], 40)
irisSample <- iris[idx,]
irisSample$Species <- NULL
#Hierarchial clustering
hc <- hclust(dist(irisSample), method="ave")
plot(hc, hang = -1, labels=iris$Species[idx])
#------------------------------------------------------ #
# K means #
#------------------------------------------------------ #
newiris <- iris
newiris$Species <- NULL
#Apply kmeans to newiris, and store the clustering result in kc. The cluster number is set to 3.
(kc <- kmeans(newiris, 3))
#Compare the Species label with the clustering result
plot(newiris[c("Sepal.Length", "Sepal.Width")], col=kc$cluster)
points(kc$centers[,c("Sepal.Length", "Sepal.Width")], col=1:3, pch=8, cex=2)
table(iris$Species, kc$cluster)
#------------------------------------------------------ #
# Classical Tests #
#------------------------------------------------------ #
#--------------------------------------------------------------------------------------------------------------
# | Observations Independent or Correlated | |
# |-----------------------------------------------| Alternative tests |
# Outcome variable | | | ( non-normal distribution, |
# | independent | correlated | small sample size, etc. ) |
# | | | |
#------------------------------------------------------------------------------------------------------------
# | Ttest, ANOVA | Paired ttest | Wilcoxon rank-sum test(alt. ttest) |
# Continuous | linear correlation | Repeated-measures ANOVA| Wilcoxon sign-rank test (alt. paired)|
# | linear regression | Mixed models/GEE | Kruskal-Wallis test (alt. ANOVA) |
# | | | Spearman rank (alt. Pearson' corr.) |
#------------------------------------------------------------------------------------------------------------
# | Risk differ. (2x2) | McNemar's test (2x2) | Fisher's exact test |
# Binary or | Chi-square test (RxC)| Conditional logistic | McNemar's exact test |
# Categorical | Logistic regression | regression (mult.) | |
# | (multivariate regr)| GEE modelling (mult.) | |
#-------------------------------------------------------------------------------------------------------------
# | Rate Ratio | Frailty model | Time-varying effects |
# Time to event | Kaplan-Meier stat. | | |
# (time to get disease)| Cox regression | | |
#--------------------------------------------------------------------------------------------------------------
# Tests for normality
# if the variable is normally distributed, the graph should be close to the straight line
qqnorm(dt$cigar)
qqline(dt$cigar, lty=2)
shapiro.test(dt$cigar)
# p-value is large so we cannot reject the hypothesis that the data
# is normally distributed
qqnorm(dt$area)
qqline(dt$area, lty=2)
shapiro.test(dt$area)
# This time the variable is not normally distributed
# Fisher F test: compare if 2 sample variances are significantly different
var.test (dt$traffic[dt$ed ==0], dt$traffic[dt$ed==1] )
# p-value (large) and 95-confidence interval (includes 1) suggests
# that we cannot reject the hypothesis that variances are the same for 2 samples
# Comparing 2 means: Student's t test
t.test (dt$traffic[dt$ed ==0], dt$traffic[dt$ed==1] )
# we reject null-hypothesis based on the confidence interval and p value
# Paired t test
# Let's simulate another variable "number of traffic fatalities after after some law was implemented"
dt$traffic.after <- dt$traffic - rnorm(length(dt$traffic), mean=0.2, sd=.2)
( t.result<- t.test (dt$traffic, dt$traffic.after, paired=TRUE ) )
# as we expected the difference in means is not equal to 0 based on 95 percent conf. interval and p value
str(t.result)
# the output of this function is a list, we can access each individual element of the list by name
t.result$p.value
t.result$conf.int
# we can also access list elements by the index:
t.result[[3]]
# Binomial test - compare 2 proportions
# let's split the dataset into 2 groups and check if the proportion of "ed"=1 is the same among 2 groups
dt.group1 <- subset( dt, location =="N" | location =="NE" | location =="NW", select=c(state, traffic, ed))
dt.group2 <- subset( dt, location =="S" | location =="C" | location =="E" | location =="S" | location =="SE" | location =="SW", select=c(state, traffic, ed))
prop.test(c(sum(dt.group1$ed==1),sum(dt.group2$ed==1)),c(length(dt.group1$ed),length(dt.group2$ed)))
#--------------------------------- #
# Statistical Models #
#--------------------------------- #
#
# y ~ x - regression
# y ~ x-1 - regression through the origin
# y ~ x + z - multiple regression
# y ~ x * z - multiple regression with interaction
# y ~ xc - one way ANOVA (xc is a categorical variable)
# y ~ xc1 + xc2 - two way ANOVA (xc1, xc2)
# ...
#
lm.fit <- lm(dt$ap ~ dt$pop)
summary(lm.fit )
anova(lm.fit )
qf(0.95,1,48)
par(mfrow=c(2,2)) # display 4 graphs at once
plot(lm.fit )
par(mfrow=c(1,1)) # return to the default mode - 1 graph at a time
# first plot should not show any structure or pattern
# second (upper right) -should be close to the straight line
# third (bottom left) - we should not see a triangular shape
# last one - highlight the points that have the largest effect on the parameter estimates
# It displays Cook's distance contour lines , which is another measure of the importance of each observation in the regression.
# Distances larger than 1 suggest the presence of a possible outlier or a poor model
# Multiple regression
# Suppose Y = dt$ap - number of AP tests in the state
# Predictors are X1 = dt$pop - population
# X2 = dt$area - land area
# X3 = dt$cigar - cigarettes sold
# Y = FUN(X1, X2) + err
lm.fit <- lm(dt$ap ~ dt$pop + dt$area + dt$cigar)
summary(lm.fit)
|
a034d7affb7903357885dcff5f1b796683dbeff0
|
ebd85fbe4e39d629ab67e87e937ddf9df5d7453d
|
/man/manage_dir.Rd
|
afdcf06671a00258f9e961542ade6a8ca95534ca
|
[] |
no_license
|
frank0434/MVItools
|
4275f2228bcfe877781ca459cd849e2696d7d9bf
|
52b03918353aa183b960e64cdd047562974e40e9
|
refs/heads/master
| 2020-05-23T12:06:08.381065
| 2019-05-22T02:13:53
| 2019-05-22T02:13:53
| 186,750,870
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 711
|
rd
|
manage_dir.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage_dir.R
\name{manage_dir}
\alias{manage_dir}
\title{manage_dir}
\usage{
manage_dir(data_source = "./", output_loc = "markered")
}
\arguments{
\item{data_source}{source of data}
\item{output_loc}{output location}
}
\value{
logic vector. true for successfuly creat directory(ies); FALSE otherwise
}
\description{
House keeping work.
}
\examples{
\dontrun{
manage_dir(data_souce = "/Analysis/IndividualSiteOutputs/",
output_loc = "marked_graphes")
}
}
\seealso{
Other water_markers: \code{\link{graph_info}},
\code{\link{graph_output}}, \code{\link{graph_read}},
\code{\link{graph_stamp}}
}
\concept{water_markers}
|
5e17e7bcf4f6bda4592e64eb48a9bdc5662d0c2f
|
94259f4e3fffd791110cd9d0353ff90fe868df32
|
/R/obis_helpers.R
|
2d763f262a6202fa7c997e2d2cb3f8b86d248989
|
[
"MIT"
] |
permissive
|
neiljun/spocc
|
23ba8d1ff717b90141df212020433984f3a42c38
|
bd97f73c3527d14e1ff26cb431f7b45523789f25
|
refs/heads/master
| 2020-04-08T11:32:55.846427
| 2018-11-16T17:59:45
| 2018-11-16T17:59:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
r
|
obis_helpers.R
|
obis_base <- function() "http://api.iobis.org/occurrence"
obis_search <- function(scientificName = NULL, limit = 500, offset = 0,
obisid = NULL, aphiaid = NULL, resourceid = NULL, year = NULL,
startdate = NULL, enddate = NULL, startdepth = NULL,
enddepth = NULL, geometry = NULL, qc = NULL, ...) {
args <- sc(list(scientificname = scientificName, limit = limit,
offset = offset, obisid = obisid, aphiaid = aphiaid,
resourceid = resourceid, year = year, startdate = startdate,
enddate = enddate, startdepth = startdepth,
enddepth = enddepth, geometry = geometry, qc = qc))
cli <- crul::HttpClient$new(
url = obis_base(),
opts = list(...)
)
out <- cli$get(query = args)
if (out$status_code > 201) {
txt <- out$parse("UTF-8")
tt <- tryCatch(jsonlite::fromJSON(txt, FALSE), error = function(e) e)
if (inherits(tt, "error")) out$raise_for_status()
mssg <- strsplit(tt$message, ";")[[1]]
stop(mssg[length(mssg)], call. = FALSE)
}
jsonlite::fromJSON(out$parse("UTF-8"))
}
obis_occ_id <- function(id, ...) {
cli <- crul::HttpClient$new(
url = file.path(obis_base(), id),
opts = list(...)
)
out <- cli$get()
out$raise_for_status()
jsonlite::fromJSON(out$parse("UTF-8"))
}
|
d5413bc4dbf0a3d8717ec3519e777fcead76a675
|
ea6805ab565962fadced65cadb3a57f88984caf2
|
/plotting-galaxies.R
|
a52381dd949ff90b86d9c9296c5a4e674c096eef
|
[] |
no_license
|
jacobbieker/Galaxy_Cluster_Project_Scripts
|
3d3a2f7059834354ae153def4ea3ab3790471bdb
|
7ce2f59f5ef7310d5339580e83be63915b36f370
|
refs/heads/master
| 2021-01-22T11:04:59.086427
| 2016-05-05T22:40:06
| 2016-05-05T22:40:06
| 45,498,526
| 0
| 0
| null | 2016-05-05T22:40:06
| 2015-11-03T22:01:56
|
R
|
UTF-8
|
R
| false
| false
| 41,637
|
r
|
plotting-galaxies.R
|
#########################################################
#
# Install the needed dependancies
#
#########################################################
if(require("XLConnect") && require("ggplot2") && require(gtable) && require("quantreg") && require("scatterplot3d") && require("scales")){
print("XLConnect and ggplot2 are loaded correctly")
} else {
print("Trying to install XLConnect")
install.packages("XLConnect", dependencies = TRUE)
print("Trying to install ggplot2")
install.packages("ggplot2", dependencies = TRUE)
print("Trying to install scatterplot3d")
install.packages("scatterplot3d", dependencies = TRUE)
print("installing gtable")
install.packages("gtable", dependencies = TRUE)
print("installing scales")
install.packages("scales", dependencies = TRUE)
print("Installing quantreg")
install.packages("quantreg", dependencies = TRUE)
if(require("XLConnect") && require("ggplot2") && require("scatterplot3d")){
print("XLConnect, ggplot2, and scatterplot3d are installed and loaded")
} else {
stop("could not install XLConnect, ggplot2, scatterplot3d")
}
}
#########################################################
#
# Loading the data
#
#########################################################
spectrophotometric_data_workbook <- loadWorkbook("GCP_spectrophot_data.xlsx")
spectrophotometric_data <- readWorksheet(spectrophotometric_data_workbook, sheet = getSheets(spectrophotometric_data_workbook))
#########################################################
#
# Functions to calculate the necessary values
#
#########################################################
# Calculate the quadrature error
get.quad.error <- function(error.1, error.2) {
error <- sqrt(error.1^2 + error.2^2)
return(error);
}
#########################################################
#
# Functions to graph the data
#
#########################################################
# Returns the scatter points of log(sigma) vs log(Mass) to plot with ggplot2
graph.sigma.mass <- function(sheet, colorcode, shapecode) {
re.data <- sheet$LREJB_KPC_DEV
re.error <- sheet$E_LRE_DEVAF814W
sigma.data <- sheet$LSIGMA_COR # log10(sigma) in km/s
sigma.error <- sheet$E_LSIGMA # Error in Sigma
}
# Returns the scatter points of log(re) vs log(Mass) to plot with ggplot2
graph.re.mass <- function(sheet, colorcode, shapecode) {
re.data <- sheet$LREJB_KPC_DEV
re.error <- sheet$E_LRE_DEVAF814W
mass.data <- sheet$LMASS_DEV
mass.error <- sheet$E_LMASS_DEV
return(geom_point())
}
# Returns the scatter points of 1.30log(sigma) - 0.82log(<I>e) vs log(re) to plot with ggplot2
graph.sigmaI.re <- function(sheet, res, colorcode, shapecode) {
re.data <- sheet$LREJB_KPC_DEV
re.error <- sheet$E_LRE_DEVAF814W
ie.data <- sheet$LIEJB_DEV
ie.error <- sheet$e_lieJB_DEV #
sigma.data <- sheet$LSIGMA_COR # log10(sigma) in km/s
sigma.error <- sheet$E_LSIGMA # Error in Sigma
}
add.tick.marks <- function(graph_to_add_ticks_to) {
################################
# Adding tick marks on all sides
################################
# Convert the plot to a grob
gt <- ggplotGrob(graph_to_add_ticks_to)
# Get the position of the panel in the layout
panel <-c(subset(gt$layout, name=="panel", se=t:r))
## For the bottom axis
# Get the row number of the bottom axis in the layout
rn <- which(gt$layout$name == "axis-b")
# Extract the axis (tick marks only)
axis.grob <- gt$grobs[[rn]]
axisb <- axis.grob$children[[2]] # Two children - get the second
axisb # Note: two grobs - tick marks and text
# Get the tick marks
xaxis = axisb$grobs[[1]] # NOTE: tick marks first
xaxis$y = xaxis$y - unit(0, "cm") # Position them inside the panel
# Add a new row to gt, and insert the revised xaxis grob into the new row.
gt <- gtable_add_rows(gt, unit(0, "lines"), panel$t-1)
gt <- gtable_add_grob(gt, xaxis, l = panel$l, t = panel$t, r = panel$r, name = "ticks")
## Repeat for the left axis
# Get the row number of the left axis in the layout
panel <-c(subset(gt$layout, name=="panel", se=t:r))
rn <- which(gt$layout$name == "axis-l")
# Extract the axis (tick marks and axis text)
axis.grob <- gt$grobs[[rn]]
axisl <- axis.grob$children[[2]] # Two children - get the second
axisl # Note: two grobs - text and tick marks
# Get the tick marks
yaxis = axisl$grobs[[2]] # NOTE: tick marks second
yaxis$x = yaxis$x - unit(0, "cm") # Position them inside the panel
# Add a new column to gt, and insert the revised yaxis grob into the new column.
gt <- gtable_add_cols(gt, unit(0, "lines"), panel$r)
gt <- gtable_add_grob(gt, yaxis, t = panel$t, l = panel$r+1, name = "ticks")
# Turn clipping off
gt$layout[gt$layout$name == "ticks", ]$clip = "off"
# Draw it
grid.draw(gt)
#######################
# End adding tick marks
######################
}
########################################################
#
# General Graph creating script
#
#########################################################
create.graph <- function(rxj.data, one.hi.data, one.lo.data, two.hi.data, two.lo.data, coma.data, x.column, y.column, coma.x, coma.y, error.x, error.y, error.coma.x, error.coma.y, starting.x, starting.y, y.spacing, x.spacing, x.name, y.name) {
# Starting values for plot
x.vs.y <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(aes(x = rxj.data[,x.column], y =rxj.data[,y.column]), color = "gray", size=2) +
geom_point(data = rxj.data, aes(x = rxj.data[,x.column], y = rxj.data[,y.column]), color = "black", size=2, shape=21) +
geom_point(aes(x = one.hi.data[,x.column], y = one.hi.data[,y.column]), color = "red", size=5) +
geom_point(aes(x = one.hi.data[,x.column], y = one.hi.data[,y.column]), color = "black", size=5, shape=21) +
geom_point(aes(x = one.lo.data[,x.column], y = one.lo.data[,y.column]), color = "red", size=2) +
geom_point(aes(x = one.lo.data[,x.column], y = one.lo.data[,y.column]), color = "black", size=2, shape=21) +
geom_point(aes(x = two.hi.data[,x.column], y = two.hi.data[,y.column]), color = "blue", size=5) +
geom_point(aes(x = two.hi.data[,x.column], y = two.hi.data[,y.column]), color = "black", size=5, shape=21) +
geom_point(aes(x = two.lo.data[,x.column], y = two.lo.data[,y.column]), color = "blue", size=2) +
geom_point(aes(x = two.lo.data[,x.column], y = two.lo.data[,y.column]), color = "black", size=2, shape=21) +
geom_point(aes(x = coma.data[,coma.x], y = coma.data[,coma.y]), color = "yellow", size=2) +
geom_point(aes(x = coma.data[,coma.x], y = coma.data[,coma.y]), color = "black", size=2, shape=21) +
xlab(x.name) +
ylab(y.name) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
# Sample 1 high redshift error
geom_errorbar(aes(x=starting.x, ymin=starting.y + y.spacing - mean(one.hi.data[,error.y], na.rm = TRUE), ymax=starting.y + y.spacing + mean(one.hi.data[,error.y], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
geom_errorbarh(aes(y=starting.y + y.spacing, x=starting.x, xmin=starting.x - mean(one.hi.data[,error.x], na.rm = TRUE), xmax=starting.x + mean(one.hi.data[,error.x], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
# Sample 1 low redshift error0
geom_errorbar(aes(x=starting.x + x.spacing, ymin=starting.y + y.spacing - mean(one.lo.data[,error.y], na.rm = TRUE), ymax=starting.y + y.spacing + mean(one.lo.data[,error.y], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
geom_errorbarh(aes(y=starting.y + y.spacing, x=starting.x + x.spacing, xmin=starting.x + x.spacing - mean(one.lo.data[,error.x], na.rm = TRUE), xmax=starting.x + x.spacing + mean(one.lo.data[,error.x], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="black") +
# Sample 2 high redshift error
geom_errorbar(aes(x=starting.x, ymin=starting.y - mean(two.hi.data[,error.y], na.rm = TRUE), ymax=starting.y + mean(two.hi.data[,error.y], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
geom_errorbarh(aes(y=starting.y, x=starting.x, xmin=starting.x - mean(two.hi.data[,error.x], na.rm = TRUE), xmax=starting.x + mean(two.hi.data[,error.x], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
# Sample 2 low redshift error
geom_errorbar(aes(x=starting.x+x.spacing, ymin=starting.y - mean(two.lo.data[,error.y], na.rm = TRUE), ymax=starting.y + mean(two.lo.data[,error.y], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
geom_errorbarh(aes(y=starting.y, x=starting.x+x.spacing, xmin=starting.x+x.spacing - mean(two.lo.data[,error.x], na.rm = TRUE), xmax=starting.x+x.spacing + mean(two.lo.data[,error.x], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="black") +
# RXJ error
geom_errorbar(aes(x=starting.x+x.spacing/2.0, ymin=starting.y + y.spacing/2.0 - mean(rxj.data[,error.y], na.rm = TRUE), ymax=starting.y + y.spacing/2.0 + mean(rxj.data[,error.y], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="gray") +
geom_errorbarh(aes(y=starting.y + y.spacing/2.0, x=starting.x + x.spacing/2.0, xmin=starting.x+x.spacing/2.0 - mean(rxj.data[,error.x], na.rm = TRUE), xmax=starting.x+x.spacing/2.0 + mean(rxj.data[,error.x], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="gray")
# Coma Error Bar
#geom_errorbarh(aes(y=starting.y + y.spacing/4.0, x=starting.x + x.spacing/2.0, xmin=starting.x +x.spacing/2.0 - mean(coma.data[,error.coma.x], na.rm = TRUE), xmax=starting.x + x.spacing/2.0 + mean(coma.data[,error.coma.x], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="yellow") +
#geom_errorbar(aes(x=starting.x + x.spacing/2.0, ymin=starting.y + y.spacing/4.0 - mean(coma.data[,error.coma.y], na.rm = TRUE), ymax=starting.y + y.spacing/4.0 + mean(coma.data[,error.coma.y], na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="yellow")
}
#########################################################
#
# Go through whichever sheets are needed and graph
#
##########################################################
#####################
# Yellow is Coma, small red is z < .8, big is z > .8, Sample 2 = blue x <.8,
#####################
field.sample.one.LORDSHFT.data <- subset(spectrophotometric_data$FieldGalaxies, REDSHIFT < 0.8 & SAMPLE == 1);
field.sample.one.HIRDSHFT.data <- subset(spectrophotometric_data$FieldGalaxies, REDSHIFT > 0.8 & SAMPLE == 1);
field.sample.two.LORDSHFT.data <- subset(spectrophotometric_data$FieldGalaxies, REDSHIFT < 0.8 & SAMPLE == 2);
field.sample.two.HIRDSHFT.data <- subset(spectrophotometric_data$FieldGalaxies, REDSHIFT > 0.8 & SAMPLE == 2);
RXJ <- subset(spectrophotometric_data$RXJ1226p9p3332member, NUMBERI == 55 | NUMBERI == 229 | NUMBERI == 293 | NUMBERI == 310 | NUMBERI == 423 | NUMBERI == 441 | NUMBERI == 462 | NUMBERI == 491 | NUMBERI == 512 | NUMBERI == 529 | NUMBERI == 534 | NUMBERI == 557 | NUMBERI == 563 | NUMBERI == 641 | NUMBERI == 650 | NUMBERI == 703 | NUMBERI == 709 | NUMBERI == 760 | NUMBERI == 801 | NUMBERI == 899 | NUMBERI == 1047 | NUMBERI == 1164 | NUMBERI == 1170 | NUMBERI == 1199 | NUMBERI == 56 | NUMBERI == 104 | NUMBERI == 648 | NUMBERI == 675);
RXJ.sigma <- RXJ$LSIGMA_COR
RXJ.sigma.error <- RXJ$E_LSIGMA
field.one.LO.sigma <- field.sample.one.LORDSHFT.data$LSIGMA_COR
field.one.HI.sigma <- field.sample.one.HIRDSHFT.data$LSIGMA_COR
field.two.LO.sigma <- field.sample.two.LORDSHFT.data$LSIGMA_COR
field.two.HI.sigma <- field.sample.two.HIRDSHFT.data$LSIGMA_COR
field.one.LO.sigma.error <- field.sample.one.LORDSHFT.data$E_LSIGMA
field.one.HI.sigma.error <- field.sample.one.HIRDSHFT.data$E_LSIGMA
field.two.LO.sigma.error <- field.sample.two.LORDSHFT.data$E_LSIGMA
field.two.HI.sigma.error <- field.sample.two.HIRDSHFT.data$E_LSIGMA
RXJ.ie <- RXJ$LIEJB_DEV
field.one.LO.ie <- field.sample.one.LORDSHFT.data$LIEJB_DEV
field.one.HI.ie <- field.sample.one.HIRDSHFT.data$LIEJB_DEV
field.two.LO.ie <- field.sample.two.LORDSHFT.data$LIEJB_DEV
field.two.HI.ie <- field.sample.two.HIRDSHFT.data$LIEJB_DEV
RXJ.re <- RXJ$LREJB_KPC_DEV
field.one.LO.re <- field.sample.one.LORDSHFT.data$LREJB_KPC_DEV
field.one.HI.re <- field.sample.one.HIRDSHFT.data$LREJB_KPC_DEV
field.two.LO.re <- field.sample.two.LORDSHFT.data$LREJB_KPC_DEV
field.two.HI.re <- field.sample.two.HIRDSHFT.data$LREJB_KPC_DEV
coma.data <- spectrophotometric_data$Coma
coma.re <- coma.data$lreJB_kpc_DEV
coma.ie <- coma.data$lIeJB_cor
coma.sigma <- coma.data$lsigma_cor
##########################################################################################
#
#
# Calculate the errors not given in the data sheet and add to the data.frame
#
#
###########################################################################################
field.sample.one.HIRDSHFT.data$lSIGMA_lLG_IE_E <- sqrt((1.3*field.sample.one.HIRDSHFT.data$E_LSIGMA)^2 + (0.82*field.sample.one.HIRDSHFT.data$e_lIeJB_DEV)^2)
field.sample.one.LORDSHFT.data$lSIGMA_lLG_IE_E <- sqrt((1.3*field.sample.one.LORDSHFT.data$E_LSIGMA)^2 + (0.82*field.sample.one.LORDSHFT.data$e_lIeJB_DEV)^2)
field.sample.two.HIRDSHFT.data$lSIGMA_lLG_IE_E <- sqrt((1.3*field.sample.two.HIRDSHFT.data$E_LSIGMA)^2 + (0.82*field.sample.two.HIRDSHFT.data$e_lIeJB_DEV)^2)
field.sample.two.LORDSHFT.data$lSIGMA_lLG_IE_E <- sqrt((1.3*field.sample.two.LORDSHFT.data$E_LSIGMA)^2 + (0.82*field.sample.two.LORDSHFT.data$e_lIeJB_DEV)^2)
RXJ$lSIGMA_lLG_IE_E <- sqrt((1.3*RXJ$E_LSIGMA)^2 + (0.82*RXJ$e_lIeJB_DEV)^2)
coma.data$lSIGMA_lLG_IE_E_154 <- sqrt(((1.3/1.54)^2)*(coma.data$e_lsigma)^2)
field.sample.one.HIRDSHFT.data$lSIGMA_lLG_IE_E_154 <- sqrt(((1.3/1.54)^2)*(field.sample.one.HIRDSHFT.data$E_LSIGMA)^2 + ((0.82/1.54)^2)*(field.sample.one.HIRDSHFT.data$e_lIeJB_DEV)^2)
field.sample.one.LORDSHFT.data$lSIGMA_lLG_IE_E_154 <- sqrt(((1.3/1.54)^2)*(field.sample.one.LORDSHFT.data$E_LSIGMA)^2 + ((0.82/1.54)^2)*(field.sample.one.LORDSHFT.data$e_lIeJB_DEV)^2)
field.sample.two.HIRDSHFT.data$lSIGMA_lLG_IE_E_154 <- sqrt(((1.3/1.54)^2)*(field.sample.two.HIRDSHFT.data$E_LSIGMA)^2 + ((0.82/1.54)^2)*(field.sample.two.HIRDSHFT.data$e_lIeJB_DEV)^2)
field.sample.two.LORDSHFT.data$lSIGMA_lLG_IE_E_154 <- sqrt(((1.3/1.54)^2)*(field.sample.two.LORDSHFT.data$E_LSIGMA)^2 + ((0.82/1.54)^2)*(field.sample.two.LORDSHFT.data$e_lIeJB_DEV)^2)
RXJ$lSIGMA_lLG_IE_E_154 <- sqrt(((1.3/1.54)^2)*(RXJ$E_LSIGMA)^2 + ((0.82/1.54)^2)*(RXJ$e_lIeJB_DEV)^2)
coma.data$lREJB_lIE_lSIGMA_270 <- sqrt(((1.3/2.7)^2)*(coma.data$e_lsigma)^2)
field.sample.one.HIRDSHFT.data$lREJB_lIE_lSIGMA_270 <- sqrt(((2.22/2.7)^2)*(field.sample.one.HIRDSHFT.data$E_LRE_DEVAF814W)^2 + ((0.82/2.7)^2)*(field.sample.one.HIRDSHFT.data$e_lIeJB_DEV)^2+2*(2.22/2.7)*(0.82/2.7)*(-0.97)*(field.sample.one.HIRDSHFT.data$E_LRE_DEVAF814W)*(field.sample.one.HIRDSHFT.data$e_lIeJB_DEV) + ((1.3/2.7)^2)*(field.sample.one.HIRDSHFT.data$E_LSIGMA)^2)
field.sample.one.LORDSHFT.data$lREJB_lIE_lSIGMA_270 <- sqrt(((2.22/2.7)^2)*(field.sample.one.LORDSHFT.data$E_LRE_DEVAF814W)^2 + ((0.82/2.7)^2)*(field.sample.one.LORDSHFT.data$e_lIeJB_DEV)^2+2*(2.22/2.7)*(0.82/2.7)*(-0.97)*(field.sample.one.LORDSHFT.data$E_LRE_DEVAF814W)*(field.sample.one.LORDSHFT.data$e_lIeJB_DEV) + ((1.3/2.7)^2)*(field.sample.one.LORDSHFT.data$E_LSIGMA)^2)
field.sample.two.HIRDSHFT.data$lREJB_lIE_lSIGMA_270 <- sqrt(((2.22/2.7)^2)*(field.sample.two.HIRDSHFT.data$E_LRE_DEVAF814W)^2 + ((0.82/2.7)^2)*(field.sample.two.HIRDSHFT.data$e_lIeJB_DEV)^2+2*(2.22/2.7)*(0.82/2.7)*(-0.97)*(field.sample.two.HIRDSHFT.data$E_LRE_DEVAF814W)*(field.sample.two.HIRDSHFT.data$e_lIeJB_DEV) + ((1.3/2.7)^2)*(field.sample.two.HIRDSHFT.data$E_LSIGMA)^2)
field.sample.two.LORDSHFT.data$lREJB_lIE_lSIGMA_270 <- sqrt(((2.22/2.7)^2)*(field.sample.two.LORDSHFT.data$E_LRE_DEVAF814W)^2 + ((0.82/2.7)^2)*(field.sample.two.LORDSHFT.data$e_lIeJB_DEV)^2+2*(2.22/2.7)*(0.82/2.7)*(-0.97)*(field.sample.two.LORDSHFT.data$E_LRE_DEVAF814W)*(field.sample.two.LORDSHFT.data$e_lIeJB_DEV) + ((1.3/2.7)^2)*(field.sample.two.LORDSHFT.data$E_LSIGMA)^2)
RXJ$lREJB_lIE_lSIGMA_270 <- sqrt(((2.22/2.7)^2)*(RXJ$E_LRE_DEVAF814W)^2 + ((0.82/2.7)^2)*(RXJ$e_lIeJB_DEV)^2+2*(2.22/2.7)*(0.82/2.7)*(-0.97)*(RXJ$E_LRE_DEVAF814W)*(RXJ$e_lIeJB_DEV) + ((1.3/2.7)^2)*(RXJ$E_LSIGMA)^2)
###########################################################################################
#
#
# Creating the stacked plots for publication
#
#
###########################################################################################
# Error bar end length
error.bar.end.length = 0.00
##########################
# Fundamental Plane graphs
##########################
# Side On Graph
# Get the coef for the line of best fit for Coma
coma.data$lSIGMA_lIE <- (1.3*coma.data$lsigma_cor)-(0.82*coma.data$lIeJB_cor)
fundamental_plane_headon <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = RXJ, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "gray", size=2) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "red", size=5) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "red", size=2) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "blue", size=5) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "blue", size=2) +
geom_point(data = coma.data, aes(x = lreJB_kpc_DEV, y = (1.3*lsigma_cor)-(0.82*lIeJB_cor)), color = "yellow", size=2) +
geom_point(data = RXJ, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "black", size=2, shape=21) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LREJB_KPC_DEV, y = (1.3*LSIGMA_COR)-(0.82*LIEJB_DEV)), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = lreJB_kpc_DEV, y = (1.3*lsigma_cor)-(0.82*lIeJB_cor)), color = "black", size=2, shape=21) +
xlab('logre [kpc]') +
ylab('1.3log(σ) - 0.82log<I>') +
# Currently calculated by coef(lm(data=coma.data, lSIGMA_lIE ~ lreJB_kpc_DEV))
geom_abline(intercept = 0.4259036, slope=1.0079013) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
# Coma Error Bar
geom_errorbar(aes(x=1.4, ymin=-0.5 - mean(coma.data$e_lsigma, na.rm = TRUE), ymax=-0.5 + mean(coma.data$e_lsigma, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="yellow") +
# Sample 1 high redshift error
geom_errorbar(aes(x=1.2, ymin=-0.5 - mean(field.sample.one.HIRDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), ymax=-0.5 + mean(field.sample.one.HIRDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
geom_errorbarh(aes(y=-0.5, x=1.2, xmin=1.2 - mean(field.sample.one.HIRDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), xmax=1.2 + mean(field.sample.one.HIRDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
# Sample 1 low redshift error
geom_errorbar(aes(x=1.3, ymin=-0.5 - mean(field.sample.one.LORDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), ymax=-0.5 + mean(field.sample.one.LORDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
geom_errorbarh(aes(y=-0.5, x=1.3, xmin=1.3 - mean(field.sample.one.LORDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), xmax=1.3 + mean(field.sample.one.LORDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="black") +
# Sample 2 high redshift error
geom_errorbar(aes(x=1.1, ymin=-0.5 - mean(field.sample.two.HIRDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), ymax=-0.5 + mean(field.sample.two.HIRDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
geom_errorbarh(aes(y=-0.5, x=1.1, xmin=1.1 - mean(field.sample.two.HIRDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), xmax=1.1 + mean(field.sample.one.HIRDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
# Sample 2 low redshift error
geom_errorbar(aes(x=1.0, ymin=-0.5 - mean(field.sample.two.LORDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), ymax=-0.5 + mean(field.sample.two.LORDSHFT.data$lSIGMA_lLG_IE_E, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
geom_errorbarh(aes(y=-0.5, x=1.0, xmin=1.0 - mean(field.sample.two.LORDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), xmax=1.0 + mean(field.sample.two.LORDSHFT.data$E_LRE_DEVAF814W, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="black") +
# RXJ error
geom_errorbar(aes(x=0.9, ymin=-0.5 - mean(RXJ$lSIGMA_lLG_IE_E, na.rm = TRUE), ymax=-0.5 + mean(RXJ$lSIGMA_lLG_IE_E, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="gray") +
geom_errorbarh(aes(y=-0.5, x=0.9, xmin=0.9 - mean(RXJ$E_LRE_DEVAF814W, na.rm = TRUE), xmax=0.9 + mean(RXJ$E_LRE_DEVAF814W, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="gray")
add.tick.marks(fundamental_plane_headon)
# Face On Graph
# Getting the Coma Cluster line of best fit
coma.data$lREJB_lIEJB_lSIGMA <- ((2.22*coma.data$lreJB_kpc_DEV)-(0.82*coma.data$lIeJB_cor)+(1.3*coma.data$lsigma_cor))/2.70
coma.data$lIEJB_lSIGMA <- ((1.3*coma.data$lIeJB_cor)+(0.82*coma.data$lsigma_cor))/1.54
fundamental_plane_faceon <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = RXJ, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "gray", size=2) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "red", size=5) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "red", size=2) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "blue", size=5) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "blue", size=2) +
geom_point(data = RXJ, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "black", size=2, shape=21) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = ((2.22*LREJB_KPC_DEV)-(0.82*LIEJB_DEV) + (1.3*LSIGMA_COR))/2.70, y = ((1.3*LIEJB_DEV)+(0.82*LSIGMA_COR))/1.54), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = ((2.22*lreJB_kpc_DEV)-(0.82*lIeJB_cor)+(1.3*lsigma_cor))/2.70, y = ((1.3*lIeJB_cor)+(0.82*lsigma_cor))/1.54), color = "yellow", size=2) +
geom_point(data = coma.data, aes(x = ((2.22*lreJB_kpc_DEV)-(0.82*lIeJB_cor)+(1.3*lsigma_cor))/2.70, y = ((1.3*lIeJB_cor)+(0.82*lsigma_cor))/1.54), color = "black", size=2, shape=21) +
geom_smooth(data = coma.data, aes(x = ((2.22*lreJB_kpc_DEV)-(0.82*lIeJB_cor)+(1.3*lsigma_cor))/2.70, y = ((1.3*lIeJB_cor)+(0.82*lsigma_cor))/1.54, method = "lm", se = FALSE)) +
xlab('(2.22logre - 0.82log<I>e + 1.3log(σ))/2.70') +
ylab('(1.3log<I>e + 0.82log(σ))/1.54') +
# Currently calculated by coef(lm(data=coma.data, lSIGMA_lIE ~ lreJB_kpc_DEV))
geom_abline(intercept = 3.6978053, slope=-0.6993457) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
# Coma Error Bar
geom_errorbarh(aes(y=2.3, x=0.0, xmin=0.0 - mean(coma.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), xmax=0.0 + mean(coma.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="yellow") +
geom_errorbar(aes(x=0.0, ymin=2.3 - mean(coma.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), ymax=2.3 + mean(coma.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="yellow") +
# Sample 1 high redshift error
geom_errorbar(aes(x=-0.1, ymin=2.3 - mean(field.sample.one.HIRDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), ymax=2.3 + mean(field.sample.one.HIRDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
geom_errorbarh(aes(y=2.3, x=-0.1, xmin=-0.1 - mean(field.sample.one.HIRDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), xmax=-0.1 + mean(field.sample.one.HIRDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
# Sample 1 low redshift error0
geom_errorbar(aes(x=-0.2, ymin=2.3 - mean(field.sample.one.LORDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), ymax=2.3 + mean(field.sample.one.LORDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="red") +
geom_errorbarh(aes(y=2.3, x=-0.2, xmin=-0.2 - mean(field.sample.one.LORDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), xmax=-0.2 + mean(field.sample.one.LORDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="black") +
# Sample 2 high redshift error
geom_errorbar(aes(x=-0.3, ymin=2.3 - mean(field.sample.two.HIRDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), ymax=2.3 + mean(field.sample.two.HIRDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
geom_errorbarh(aes(y=2.3, x=-0.3, xmin=-0.3 - mean(field.sample.two.HIRDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), xmax=-0.3 + mean(field.sample.one.HIRDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
# Sample 2 low redshift error
geom_errorbar(aes(x=-0.4, ymin=2.3 - mean(field.sample.two.LORDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), ymax=2.3 + mean(field.sample.two.LORDSHFT.data$lSIGMA_lLG_IE_E_154, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="blue") +
geom_errorbarh(aes(y=2.3, x=-0.4, xmin=-0.4 - mean(field.sample.two.LORDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), xmax=-0.4 + mean(field.sample.two.LORDSHFT.data$lREJB_lIE_lSIGMA_270, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="black") +
# RXJ error
geom_errorbar(aes(x=0.1, ymin=2.3 - mean(RXJ$lSIGMA_lLG_IE_E_154, na.rm = TRUE), ymax=2.3 + mean(RXJ$lSIGMA_lLG_IE_E_154, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="gray") +
geom_errorbarh(aes(y=2.3, x=0.1, xmin=0.1 - mean(RXJ$lREJB_lIE_lSIGMA_270, na.rm = TRUE), xmax=0.1 + mean(RXJ$lREJB_lIE_lSIGMA_270, na.rm = TRUE), width = error.bar.end.length, height=error.bar.end.length), color="gray")
add.tick.marks(fundamental_plane_faceon)
##################
# Velocity Dispersion vs log M/L
##################
lsigma.vs.logml <- create.graph(RXJ, field.sample.one.HIRDSHFT.data, field.sample.one.LORDSHFT.data, field.sample.two.HIRDSHFT.data, field.sample.two.LORDSHFT.data, coma.data, 'LSIGMA_COR', 'LML_JB_DEV', 'lsigma_cor', 'lML_JB_DEV', 'E_LSIGMA', 'E_LML_JB_DEV', 'e_lsigma', 'e_lMgb', 2.7, -0.2, -0.26, -0.15, 'log(σ)', 'log(M/Lb) [M/L]')
# Currently calculated by coef(lm(data=coma.data, lML_JB_DEV ~ lsigma_cor)) slope: 1.07*log(sigma), -1.560
lsigma.vs.logml <- lsigma.vs.logml + geom_abline(intercept = -0.8569, slope=0.7535)
add.tick.marks(lsigma.vs.logml)
###############################
# log Mass vs log(Mass/L)
###############################
# Getting the weights for the rq value
coma.data$lm_vs_lml_weights <- (abs(0.24*coma.data$lMass_DEV + coma.data$lML_JB_DEV + 1.754))/(sqrt(0.24^2+1))
lm.vs.logml <- create.graph(RXJ, field.sample.one.HIRDSHFT.data, field.sample.one.LORDSHFT.data, field.sample.two.HIRDSHFT.data, field.sample.two.LORDSHFT.data, coma.data, 'LMASS_DEV', 'LML_JB_DEV', 'lMass_DEV', 'lML_JB_DEV', 'E_LMASS_DEV', 'E_LML_JB_DEV', 'e_lsigma', 'e_lMgb', 12.3, -0.2, -0.26, -0.15, 'log(Mass)', 'log(M/Lb) [M/L]')
# Calcuated by quantreg's rq(coma.data$lML_JB_DEV ~ coma.data$lMass_DEV)
lm.vs.logml <- lm.vs.logml + geom_abline(intercept = -1.6587, slope = 0.2262)
add.tick.marks(lm.vs.logml)
###############################
# log Mass vs log(Sigma)
###############################
# Getting the weights for the rq value
coma.data$lm_vs_lml_weights <- (abs(0.24*coma.data$lMass_DEV + coma.data$lML_JB_DEV + 1.754))/(sqrt(0.24^2+1))
lm.vs.lsigma <- create.graph(RXJ, field.sample.one.HIRDSHFT.data, field.sample.one.LORDSHFT.data, field.sample.two.HIRDSHFT.data, field.sample.two.LORDSHFT.data, coma.data, 'LMASS_DEV', 'LSIGMA_COR', 'lMass_DEV', 'lsigma_cor', 'E_LMASS_DEV', 'E_LSIGMA', '', '', 12.5, 2.2, -0.15, -0.3, 'log(Mass)', 'log(σ)')
lm.vs.lsigma <- lm.vs.lsigma # + geom_abline(intercept = 1.9587, slope = 0.2262)
add.tick.marks(lm.vs.lsigma)
##########################################
#
# Spectrometry data Graphs
#
##########################################
#############
# Log(sigma) vs redshift
#############
lsigma.vs.redshift <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "red", size=5) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "red", size=2) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "blue", size=5) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "blue", size=2) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = REDSHIFT, y = LSIGMA_COR), color = "black", size=2, shape=21) +
xlab('Redshift') +
ylab('log(σ)') +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=10), minor_breaks = waiver())
add.tick.marks(lsigma.vs.redshift)
#############
# log(sigma) vs log(C4668)
############
lsigma.vs.lc4668 <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "red", size=5) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "red", size=2) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "blue", size=5) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "blue", size=2) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = LC4668_COR), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "yellow", size=2) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "black", size=2, shape=21) +
xlab('log(σ)') +
ylab('log(C4668)') +
# Calcuated by quantreg's rq(coma.data$lML_JB_DEV ~ coma.data$lMass_DEV)
#geom_abline(intercept = -1.6587, slope = 0.2262) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver())
add.tick.marks(lsigma.vs.lc4668)
#############
# log(sigma) vs log(Fe4383)
#############
lsigma.vs.lfe4383 <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "red", size=5) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "red", size=2) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "blue", size=5) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "blue", size=2) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = LFE4383_COR), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "yellow", size=2) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "black", size=2, shape=21) +
xlab('log(σ)') +
ylab('log(Fe4383)') +
# Calcuated by quantreg's rq(coma.data$lML_JB_DEV ~ coma.data$lMass_DEV)
#geom_abline(intercept = -1.6587, slope = 0.2262) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver())
add.tick.marks(lsigma.vs.lfe4383)
#############
# log(sigma) vs CN3883
#############
lsigma.vs.cn3883 <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "red", size=5) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "red", size=2) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "blue", size=5) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "blue", size=2) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = CN3883_COR), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "yellow", size=2) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "black", size=2, shape=21) +
xlab('log(σ)') +
ylab('CN3883') +
# Calcuated by quantreg's rq(coma.data$lML_JB_DEV ~ coma.data$lMass_DEV)
#geom_abline(intercept = -1.6587, slope = 0.2262) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver())
add.tick.marks(lsigma.vs.cn3883)
#############
# log(sigma) vs H(delta) + H(Gamma)
#############
lsigma.vs.lHdgA <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "red", size=5) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "red", size=2) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "blue", size=5) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "blue", size=2) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHdgA_cor), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "yellow", size=2) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "black", size=2, shape=21) +
xlab('log(σ)') +
ylab('H(Gamma) + H(Delta)') +
# Calcuated by quantreg's rq(coma.data$lML_JB_DEV ~ coma.data$lMass_DEV)
#geom_abline(intercept = -1.6587, slope = 0.2262) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver())
add.tick.marks(lsigma.vs.lHdgA)
#############
# log(sigma) vs log(H(zeta))
#############
lsigma.vs.lhzeta <- ggplot() + theme_bw() +
theme(
panel.border = element_rect(fill = NA, colour = "black", size = 1),
panel.grid = element_blank()
) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "red", size=5) +
geom_point(data = field.sample.one.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "black", size=5, shape=21) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "red", size=2) +
geom_point(data = field.sample.one.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "black", size=2, shape=21) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "blue", size=5) +
geom_point(data = field.sample.two.HIRDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "black", size=5, shape=21) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "blue", size=2) +
geom_point(data = field.sample.two.LORDSHFT.data, aes(x = LSIGMA_COR, y = lHzetaA_cor), color = "black", size=2, shape=21) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "yellow", size=2) +
geom_point(data = coma.data, aes(x = lsigma_cor, y = lML_JB_DEV), color = "black", size=2, shape=21) +
xlab('log(σ)') +
ylab('log(H(zeta))') +
# Calcuated by quantreg's rq(coma.data$lML_JB_DEV ~ coma.data$lMass_DEV)
#geom_abline(intercept = -1.6587, slope = 0.2262) +
# Change the tick marks
scale_x_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver()) +
scale_y_continuous(breaks = pretty_breaks(n=20), minor_breaks = waiver())
add.tick.marks(lsigma.vs.lhzeta)
|
c7c27191232a30613a7cfacb0715fa7314bef3fe
|
ef4666f58726e29ea59786b4aa5969d7beecf479
|
/man/plot.competing.risk.Rd
|
1f69772c6a72f7551ad430c958722abc41c33911
|
[] |
no_license
|
ehrlinger/randomForestSRC
|
042e523d05bbba4acdb31aa17a17f9506f5a2533
|
dc22cd40d062ee2fce6b0135a46b933b3469033a
|
refs/heads/master
| 2020-04-15T16:32:29.281631
| 2016-11-28T20:29:22
| 2016-11-28T20:29:22
| 26,964,626
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
rd
|
plot.competing.risk.Rd
|
\name{plot.competing.risk}
\alias{plot.competing.risk}
\alias{plot.competing.risk.rfsrc}
\title{Plots for Competing Risks}
\description{
Plot the ensemble cumulative incidence function (CIF) and
cause-specific cumulative hazard function (CSCHF) from a competing risk
analysis.
}
\usage{\method{plot.competing.risk}{rfsrc}(x, plots.one.page = FALSE, ...)}
\arguments{
\item{x}{An object of class \code{(rfsrc, grow)} or
\code{(rfsrc, predict)}.}
\item{plots.one.page}{Should plots be placed on one page?}
\item{...}{Further arguments passed to or from other methods.}
}
\details{
Ensemble ensemble CSCHF and CIF functions for each event type.
Does not apply to right-censored data.
}
\author{
Hemant Ishwaran and Udaya B. Kogalur
}
\references{
Ishwaran H., Gerds T.A., Kogalur U.B., Moore R.D., Gange S.J. and Lau
B.M. (2014). Random survival forests for competing risks.
\emph{Biostatistics}, 15(4):757-773.
}
\seealso{
\command{\link{follic}},
\command{\link{hd}},
\command{\link{rfsrc}},
\command{\link{wihs}}
}
\examples{
\dontrun{
## ------------------------------------------------------------
## follicular cell lymphoma
## ------------------------------------------------------------
data(follic, package = "randomForestSRC")
follic.obj <- rfsrc(Surv(time, status) ~ ., follic, nsplit = 3, ntree = 100)
plot.competing.risk(follic.obj)
## ------------------------------------------------------------
## competing risk analysis of pbc data from the survival package
## events are transplant (1) and death (2)
## ------------------------------------------------------------
if (library("survival", logical.return = TRUE)) {
data(pbc, package = "survival")
pbc$id <- NULL
plot.competing.risk(rfsrc(Surv(time, status) ~ ., pbc, nsplit = 10))
}
}
}
\keyword{plot}
|
4e327ccda91a7d0320ffae7553dc116bf18110db
|
907fc6b13490775612fcd2c45d4f566cd260f592
|
/backup/data/tv.R
|
44f40618a1569819dc014f328fa1ae3e415d2a44
|
[] |
no_license
|
franziloew/market_definition_2sm
|
dbeefbbb14085c736381c3f87d5ffe982972e3ac
|
f213a00ffd71716c5b39f0d7032f6afad9a70e20
|
refs/heads/master
| 2022-05-04T18:38:26.564337
| 2022-04-15T19:22:30
| 2022-04-15T19:22:30
| 126,069,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,016
|
r
|
tv.R
|
rm(list = ls())
library("urca")
library("tseries")
library("seasonal")
library("lmtest")
library("vars")
library("forecast")
library("graphics")
library("Hmisc")
library("xtable")
library("tikzDevice")
library("stargazer")
library("plotrix")
# Included Magazines: TVMovie, TVSpielfilm, TVDigital
tv <- read.csv("/Users/Franzi/Desktop/R/2sm/tv.csv", header = TRUE, stringsAsFactors = FALSE)
tv <- ts(tv, start=c(2003,1), end = c(2016,25), frequency = 26)
# Assign Globals
# Sub1
y31 <- tv[,"retailTVMovie"]
y41 <- tv[,"totaladpageTVMovie"]
x41 <- (tv[,"totaladpageTVMovie"]/tv[,"contentTVMovie"])*100
# Sub2
y32 <- tv[,"retailTVSpielfilm"]
y42 <- tv[,"totaladpageTVSpielfilm"]
x42 <- (tv[,"totaladpageTVSpielfilm"]/tv[,"contentTVSpielfilm"])*100
# Sub3
y33 <- tv[,"retailTVDigital"]
y43 <- tv[,"totaladpageTVDigital"]
x43 <- (tv[,"totaladpageTVDigital"]/tv[,"contentTVDigital"])*100
# --------- Select Sub-Sample
# 1 --- 2004w33 - 2006w33 ---
# Sub1
y31.1 <- window(y31, start=c(2005,15), end = c(2008,15), frequency = 26)
x41.1 <- window(x41, start=c(2005,15), end = c(2008,15), frequency = 26)
y41.1 <- window(y41, start=c(2005,15), end = c(2008,15), frequency = 26)
# Sub2
y32.1 <- window(y32, start=c(2005,15), end = c(2008,15), frequency = 26)
x42.1 <- window(x42, start=c(2005,15), end = c(2008,15), frequency = 26)
y42.1 <- window(y42, start=c(2005,15), end = c(2008,15), frequency = 26)
# Sub3
y33.1 <- window(y33, start=c(2005,15), end = c(2008,15), frequency = 26)
x43.1 <- window(x43, start=c(2005,15), end = c(2008,15), frequency = 26)
y43.1 <- window(y43, start=c(2005,15), end = c(2008,15), frequency = 26)
# 2 --- 2012/15 - 2015/15 ---
# Sub1
y31.2 <- window(y31, start=c(2012,15), end = c(2015,15), frequency = 26)
x41.2 <- window(x41, start=c(2012,15), end = c(2015,15), frequency = 26)
y41.2 <- window(y41, start=c(2012,15), end = c(2015,15), frequency = 26)
# Sub2
y32.2 <- window(y32, start=c(2012,15), end = c(2015,15), frequency = 26)
x42.2 <- window(x42, start=c(2012,15), end = c(2015,15), frequency = 26)
y42.2 <- window(y42, start=c(2012,15), end = c(2015,15), frequency = 26)
# Sub3
y33.2 <- window(y33, start=c(2012,15), end = c(2015,15), frequency = 26)
x43.2 <- window(x43, start=c(2012,15), end = c(2015,15), frequency = 26)
y43.2 <- window(y43, start=c(2012,15), end = c(2015,15), frequency = 26)
# Fill NA
y31.1[which(is.na(y31.1))] <- mean(y31.1, na.rm = TRUE)
y32.1[which(is.na(y32.1))] <- mean(y32.1, na.rm = TRUE)
y33.1[which(is.na(y33.1))] <- mean(y33.1, na.rm = TRUE)
y31.2[which(is.na(y31.2))] <- mean(y31.2, na.rm = TRUE)
y32.2[which(is.na(y32.2))] <- mean(y32.2, na.rm = TRUE)
y33.2[which(is.na(y33.2))] <- mean(y33.2, na.rm = TRUE)
#
y41.1[which(is.na(y41.1))] <- mean(y41.1, na.rm = TRUE)
y42.1[which(is.na(y42.1))] <- mean(y42.1, na.rm = TRUE)
y43.1[which(is.na(y43.1))] <- mean(y43.1, na.rm = TRUE)
y41.2[which(is.na(y41.2))] <- mean(y41.2, na.rm = TRUE)
y42.2[which(is.na(y42.2))] <- mean(y42.2, na.rm = TRUE)
y43.2[which(is.na(y43.2))] <- mean(y43.2, na.rm = TRUE)
######################
# Summary and plotting
######################
# ----- Reader Market
combined.y1 <- data.frame(y31.1, y32.1, y33.1)
combined.y1.1 <- data.frame(y31.2, y32.2, y33.2)
colnames(combined.y1) <- c("TVMovie", "TVSpielfilm", "TVDigital")
colnames(combined.y1.1) <- c("TVMovie", "TVSpielfilm", "TVDigital")
stargazer(combined.y1, digits = 1, title = "Summary Statistic: Reader Market (2005-2008)")
stargazer(combined.y1.1, digits = 1, title = "Summary Statistic: Reader Market (2012-2015)")
# ----- Ad Market
combined.y2 <- data.frame(y41.1, y42.1, y43.1)
combined.y2.1 <- data.frame(y41.2, y42.2, y43.2)
colnames(combined.y2) <- c("TVMovie", "TVSpielfilm", "TVDigital")
colnames(combined.y2.1) <- c("TVMovie", "TVSpielfilm", "TVDigital")
stargazer(combined.y2, digits = 1, title = "Summary Statistic: advertising Market (2005-2008)")
stargazer(combined.y2.1, digits = 1, title = "Summary Statistic: advertising Market (2012-2015)")
# ---- Reader Market
# 1 ---------------
tikz("/Users/Franzi/Desktop/R/circ_tv1.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts((y31.1)/1000,
type = "l",
col = "red",
xlab = "Weekly Data",
ylab = "Total Retail in tsd",
ylim = c(800, 2200))
lines((y32.1)/1000, type = "l", col = "blue")
lines((y33.1)/1000, type = "l",col = "green")
legend("bottomleft", c("TVMovie", "TVSpielfilm", "TVDigital"),
lty = c(1,1,1),
lwd = c(2.5,2.5,2.5),
col = c("red", "blue", "green"),
bty = "n",
horiz = FALSE)
dev.off()
# 2 ----------------------
tikz("/Users/Franzi/Desktop/R/circ_tv2.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts((y31.2)/1000,
type = "l",
col = "red",
yaxt = "n",
ann = FALSE,
ylim = c(800, 2200))
lines((y32.2)/1000, type = "l", col = "blue")
lines((y33.2)/1000, type = "l", col = "green")
dev.off()
#-------- Ad Market
# 1 ---------------
tikz("/Users/Franzi/Desktop/R/ads_tv1.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts(y41.1,
type = "l",
col = "red",
xlab = "Fortnightly Data",
ylab = "Total ad pages",
ylim = c(10, 100))
lines(y42.1, type = "l", col = "blue")
lines(y43.1, type = "l",col = "green")
dev.off()
# 2 ----------------------
tikz("/Users/Franzi/Desktop/R/ads_tv2.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts(y41.2,
type = "l",
col = "red",
yaxt = "n",
ann = FALSE,
ylim = c(10, 100))
lines(y42.2, type = "l", col = "blue")
lines(y43.2, type = "l",col = "green")
legend("topleft", c("TVMovie", "TVSpielfilm", "TVDigital"),
lty = c(1,1,1),
lwd = c(2.5,2.5,2.5),
col = c("red", "blue", "green"),
bty = "n",
horiz = FALSE)
dev.off()
#######
# ACF #
#######
par(mfrow=c(3,2))
Acf(y31.1, 20, main = "TVMovie (R.M.1)")
Acf(y31.2, 20, main = "TVMovie (R.M.2)")
Acf(y32.1, 20, main = "TVSpielfilm (R.M.1)")
Acf(y32.2, 20, main = "TVSpielfilm (R.M.2)")
Acf(y33.1, 20, main = "TVDigital (R.M.1)")
Acf(y33.2, 20, main = "TVDigital (R.M.2)")
Acf(y41.1, 20, main = "TVMovie (A.M.1)")
Acf(y41.2, 20, main = "TVMovie (A.M.2)")
Acf(y42.1, 20, main = "TVSpielfilm (A.M.1)")
Acf(y42.2, 20, main = "TVSpielfilm (A.M.2)")
Acf(y43.1, 20, main = "TVDigital (A.M.1)")
Acf(y43.2, 20, main = "TVDigital (A.M.2)")
###################################
# Phillips \& Perron Unit Root Test
###################################
# Null-Hypothesis: Series is non-stationary. If the test statistic is bigger than the critical value, we cannot reject the Null and Series is non-stationary.
# ------- Reader Market
# 1 ------------------
y31.1.p <- (ur.pp(y31.1, type = "Z-tau", model = "trend", lags = "short"))
y32.1.p <- (ur.pp(y32.1, type = "Z-tau", model = "trend", lags = "short"))
y33.1.p <- (ur.pp(y33.1, type = "Z-tau", model = "trend", lags = "short"))
# 2 ------------------
y31.2.p <- (ur.pp(y31.2, type = "Z-tau", model = "trend", lags = "short"))
y32.2.p <- (ur.pp(y32.2, type = "Z-tau", model = "trend", lags = "short"))
y33.2.p <- (ur.pp(y33.2, type = "Z-tau", model = "trend", lags = "short"))
# ------- Ad Market
# 1 ------------------
y41.1.p <- (ur.pp(y41.1, type = "Z-tau", model = "trend", lags = "short"))
y42.1.p <- (ur.pp(y42.1, type = "Z-tau", model = "trend", lags = "short"))
y43.1.p <- (ur.pp(y43.1, type = "Z-tau", model = "trend", lags = "short"))
# 2 ------------------
y41.2.p <- (ur.pp(y41.2, type = "Z-tau", model = "trend", lags = "short"))
y42.2.p <- (ur.pp(y42.2, type = "Z-tau", model = "trend", lags = "short"))
y43.2.p <- (ur.pp(y43.2, type = "Z-tau", model = "trend", lags = "short"))
# ------ Latex Table
# 2004-2006---------
c.y1p <- data.frame(y31.1.p@teststat, y32.1.p@teststat, y33.1.p@teststat)
c.y1p[nrow(c.y1p)+1,]<-c(y41.1.p@teststat, y42.1.p@teststat, y43.1.p@teststat)
c.y1p[nrow(c.y1p)+1,]<-c("1pct","5pct","10pct")
c.y1p[nrow(c.y1p)+1,]<-y31.1.p@cval
colnames(c.y1p) <- c("TVMovie", "TVSpielfilm", "TVDigital")
rownames(c.y1p) <- c("Sales", "Ad pages","Sig. Level", "Critical Values")
stargazer(c.y1p, summary = FALSE, digits = 1, title = "Unit Root: 2005-2008")
# 2013-2015--------
c.y2p <- data.frame(y31.2.p@teststat, y32.2.p@teststat, y33.2.p@teststat)
c.y2p[nrow(c.y2p)+1,]<-c(y41.2.p@teststat, y42.2.p@teststat, y43.2.p@teststat)
c.y2p[nrow(c.y2p)+1,]<-c("1pct","5pct","10pct")
c.y2p[nrow(c.y2p)+1,]<-y31.2.p@cval
colnames(c.y2p) <- c("TVMovie", "TVSpielfilm", "TVDigital")
rownames(c.y2p) <- c("Sales", "Ad pages","Sig. Level", "Critical Values")
stargazer(c.y2p, summary = FALSE, digits = 1, title = "Unit Root: 2012-2015")
######################
# Prewhitening / ARIMA
######################
# ------ Reader Markt
# 1 ----------------
# TVMovie
Y31.1 <- auto.arima(y31.1, xreg = cbind(y32.1, y33.1))
Y31.1
#Y31.1 <- arima(y31.1, order = c(2,0,1), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y32.1, y33.1))
resid31.1 <- Y31.1$residuals
# TVSpielfilm
Y32.1 <- auto.arima(y32.1, xreg = cbind(y31.1, y33.1))
Y32.1
#Y32.1 <- arima(y32.1, c(1,1,1), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y31.1, y33.1))
resid32.1 <- Y32.1$residuals
# TVDigital
Y33.1 <- auto.arima(y33.1, xreg = cbind(y31.1, y32.1))
Y33.1
#Y33.1 <- arima(y33.1, order = c(1,0,2), xreg = cbind(y31.1, y32.1))
resid33.1 <- Y33.1$residuals
# Latex Table
# stargazer(Y31.1, Y32.1, Y33.1, title = "Regression Results: Reader Market (2005-2008)", align = TRUE, dep.var.labels = c("TVMovie", "TVSpielfilm", "TVDigital", "TVDigital"), omit.stat = c("LL"), no.space = TRUE)
# Plotting residuals
#tikz("/Users/Franzi/Desktop/R/arima_circ_tv1.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts((resid31.1)/1000,
type = "l",
col = "red",
ylim = c(-200,200),
xlab = "Weekly Data",
ylab = "Residuals")
lines((resid32.1)/1000, type = "l", col = "blue")
lines((resid33.1)/1000, type = "l", col = "green")
abline(a = 0, b = 0)
#dev.off()
# 2 ------------
# TVMovie
Y31.2 <- auto.arima(y31.2, xreg = cbind(y32.2, y33.2))
Y31.2
#Y31.2 <- arima(y31.2, order = c(1,1,0), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y32.2, y33.2))
resid31.2 <- Y31.2$residuals
# TVSpielfilm
Y32.2 <- auto.arima(y32.2, xreg = cbind(y31.2, y33.2))
Y32.2
#Y32.2 <- arima(y32.2, c(1,1,0), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y31.2, y33.2))
resid32.2 <- Y32.2$residuals
# TVDigital
Y33.2 <- auto.arima(y33.2, xreg = cbind(y31.2, y32.2))
Y33.2
#Y33.2 <- arima(y33.2, c(1,0,0), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y31.2, y32.2))
resid33.2 <- Y33.2$residuals
# Latex Table
#stargazer(Y31.2, Y32.2, Y33.2, title = "Regression Results: Reader Market (2012-2015)", align = TRUE, dep.var.labels = c("TVMovie", "TVSpielfilm", "TVDigital"), omit.stat = c("LL"), no.space = TRUE)
# Plotting Residuals
#tikz("/Users/Franzi/Desktop/R/arima_circ_tv2.tex",width=5,height=3.5)
plot.ts((resid31.2)/1000,
type = "l",
col = "red",
ylim = c(-200,200),
yaxt = "n",
ann = FALSE)
lines((resid32.2)/1000, type = "l", col = "blue")
lines((resid33.2)/1000, type = "l", col = "green")
abline(a = 0, b = 0)
legend("bottomleft",
c("TVMovie", "TVSpielfilm", "TVDigital"),
lty = c(1,1,1,1),
lwd = c(2.5,2.5,2.5,2.5),
col = c("red", "blue", "green"),
bty = "n",
horiz = FALSE)
#dev.off()
##################
# CrossCorrelation
##################
# -------- Reader Market
# 1 -------------------
#
xcorr1.1 <- matrix(c(xcorr112.1$lag, xcorr112.1$acf,xcorr113.1$acf,xcorr123.1$acf), ncol = 4)
colnames(xcorr1.1) <- c("Lags", "TVMovie + TVSpielfilm", "TVMovie + TVDigital","TVSpielfilm + TVDigital")
stargazer(xcorr1.1, summary = FALSE, title = "Cross Correlation Reader Market")
# 2 --------------------------
xcorr1.2 <- matrix(c(xcorr112.2$lag, xcorr112.2$acf,xcorr113.2$acf,xcorr123.2$acf), ncol = 4)
colnames(xcorr1.2) <- c("Lags", "TVMovie + Spielfilm", "TVMovie + TVDigital","TVSpielfilm + TVDigital")
stargazer(xcorr1.2, summary = FALSE)
# --------Ad Market
# 1 ----------------
# TVMovie
Y41.1 <- auto.arima(y41.1, xreg = cbind(y42.1, y43.1))
Y41.1
#Y41.1 <- arima(y41.1, order = c(4,1,1), seasonal = list(order=c(0L,0L,1L)), xreg = cbind(y42.1, y43.1))
resid41.1 <- Y41.1$residuals
# TVSpielfilm
Y42.1 <- auto.arima(y42.1, xreg = cbind(y41.1, y43.1))
Y42.1
#Y42.1 <- arima(y42.1, c(2,1,0), xreg = cbind(y41.1, y43.1))
resid42.1 <- Y42.1$residuals
# TVDigital
Y43.1 <- auto.arima(y43.1, xreg = cbind(y41.1, y42.1))
Y43.1
#Y43.1 <- arima(y43.1, order = c(0,0,0), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y41.1, y42.1))
resid43.1 <- Y43.1$residuals
# ## Latex Table
#stargazer(Y41.1, Y42.1, Y43.1, title = "Regression Results: Ad Market (2005-2008)", align = TRUE, dep.var.labels = c("TVMovie", "TVSpielfilm", "TVDigital"), omit.stat = c("LL"), no.space = TRUE)
## Plotting residuals
#tikz("/Users/Franzi/Desktop/R/arima_ads_tv1.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts(resid41.1,
type = "l",
col = "red",
ylim = c(-30,30),
xlab = "Weekly Data",
ylab = "Residuals")
lines(resid42.1, type = "l", col = "blue")
lines(resid43.1, type = "l", col = "green")
abline(a = 0, b = 0)
#dev.off()
# 2 --------------
# TVMovie
Y41.2 <- auto.arima(y41.2, xreg = cbind(y42.2,y43.2))
Y41.2
#Y41.2 <- arima(y41.2, c(1,0,0), xreg = cbind(y42.2,y43.2))
resid41.2 <- Y41.2$residuals
# TVSpielfilm
Y42.2 <- auto.arima(y42.2, xreg = cbind(y41.2, y43.2))
Y42.2
Y42.2 <- arima(y42.2, c(2,0,0), seasonal = list(order=c(1L,0L,0L)), xreg = cbind(y41.2, y43.2))
resid42.2 <- Y42.2$residuals
# TVDigital
Y43.2 <- auto.arima(y43.2, xreg = cbind(y41.2, y42.2))
Y43.2
#Y43.2 <- arima(y43.2, c(1,0,1), xreg = cbind(y41.2, y42.2))
resid43.2 <- Y43.2$residuals
## Latex Table
#stargazer(Y41.2, Y42.2, Y43.2, title = "Regression Results: Ad Market 2012-2015", align = TRUE, dep.var.labels = c("TVMovie", "TVSpielfilm", "TVDigital"), omit.stat = c("LL"), no.space = TRUE)
# 2----------------
#tikz("/Users/Franzi/Desktop/R/arima_ads_tv2.tex",width=5,height=3.5)
par(mfrow=c(1,1))
plot.ts(resid41.2,
type = "l",
col = "red",
ylim = c(-30,30),
yaxt = "n",
ann = FALSE)
lines(resid42.2, type = "l", col = "blue")
lines(resid43.2, type = "l", col = "green")
abline(a = 0, b = 0)
legend("topleft",
c("TVMovie", "TVSpielfilm", "TVDigital"),
lty = c(1,1,1,1),
lwd = c(2.5,2.5,2.5,2.5),
col = c("red", "blue", "green"),
bty = "n",
horiz = FALSE)
#dev.off()
###################
# Cross Correlation
###################
# 1 ----------------
par(mfrow=c(2,3))
xcorr212.1 <- Ccf(resid41.1, resid42.1,
lag.max = 6,
xlim = c(-6,6),
ylim=c(-1,1),
main = "TVMovie & TVSpielfilm (A.M.1)")
xcorr213.1 <- Ccf(resid41.1, resid43.1,
lag.max = 6,
xlim = c(-6,6),
ylim=c(-1,1),
main = "TVMovie & TVDigital (A.M.1)")
xcorr223.1 <- Ccf(resid42.1, resid43.1,
lag.max = 6,
xlim = c(-6,6),
ylim=c(-1,1),
main = "TVSpielfilm & TVDigital (A.M.1)")
xcorr212.2 <- Ccf(resid41.2, resid42.2,
lag.max = 6,
xlim = c(-6,6),
ylim=c(-1,1),
main = "TVMovie & TVSpielfilm (A.M.2)")
xcorr213.2 <- Ccf(resid41.2, resid43.2,
lag.max = 6,
xlim = c(-6,6),
ylim=c(-1,1),
main = "TVMovie & TVDigital (A.M.2)")
xcorr223.2 <- Ccf(resid42.2, resid43.2,
lag.max = 6,
xlim = c(-6,6),
ylim=c(-1,1),
main = "TVSpielfilm & TVDigital (A.M.2)")
# Latex Table
xcorr2.1 <- matrix(c(xcorr212.1$acf,xcorr213.1$acf,xcorr223.1$acf), ncol = 3)
colnames(xcorr2.1) <- c("TVMovie + Spielfilm", "TVMovie + TVDigital","TVSpielfilm + TVDigital")
stargazer(xcorr2.1, summary = FALSE)
xcorr2.2 <- matrix(c(xcorr212.2$lag, xcorr212.2$acf,xcorr213.2$acf,xcorr223.2$acf), ncol = 4)
colnames(xcorr2.2) <- c("Lags", "TVMovie + Spielfilm", "TVMovie + TVDigital","TVSpielfilm + TVDigital")
stargazer(xcorr2.2, summary = FALSE)
#############
# Residual AC
#############
# --- Reader Market
par(mfrow=c(3,2))
Acf(resid31.1, 18, main = "TVMovie (R.M.1)")
Acf(resid31.2, 18, main = "TVMovie (R.M.2)")
Acf(resid32.1, 18, main = "TVSpielfilm (R.M.1)")
Acf(resid32.2, 18, main = "TVSpielfilm (R.M.2)")
Acf(resid33.1, 18, main = "TVDigital (R.M.1)")
Acf(resid33.2, 18, main = "TVDigital (R.M.2)")
# ------- Ad Market
Acf(resid41.1, 18, main = "TVMovie (A.M.1)")
Acf(resid41.2, 18, main = "TVMovie (A.M.2)")
Acf(resid42.1, 18, main = "TVSpielfilm (A.M.1)")
Acf(resid42.2, 18, main = "TVSpielfilm (A.M.2)")
Acf(resid43.1, 18, main = "TVDigital (A.M.1)")
Acf(resid43.2, 18, main = "TVDigital (A.M.2)")
###################
# Granger Causality
###################
# 1 --------------------
write.csv(cbind(resid31.1, resid32.1, resid33.1, resid41.1, resid42.1, resid43.1), file="granger_tv1.csv")
VARselect(cbind(resid31.1, resid32.1, resid33.1), lag.max = 12, type = "none")
VARselect(cbind(resid41.1, resid42.1, resid43.1), lag.max = 12, type = "none")
var1.1 <- VAR(cbind(resid31.1, resid32.1, resid33.1))
var2.1 <- VAR(cbind(resid41.1, resid42.1, resid43.1))
# 2 --------------------
write.csv(cbind(resid31.2, resid32.2, resid33.2, resid41.2, resid42.2, resid43.2), file="granger_tv2.csv")
VARselect(cbind(resid31.2, resid32.2, resid33.2), lag.max = 12, type = "none")
VARselect(cbind(resid41.2, resid42.2, resid43.2), lag.max = 12, type = "none")
var1.2 <- VAR(cbind(resid31.2, resid32.2, resid33.2))
var2.2 <- VAR(cbind(resid41.2, resid42.2, resid43.2))
# Impulse response function
# Reader Market
plot(irf(var.1.1))
plot(irf(var.1.2))
# Ad Market
plot(irf(var.2.1))
plot(irf(var.2.2))
|
a88e4dfdf78f4b07eb4baec0e934f15606f11749
|
e0024ae63e5769377dd1bb64e5e3fbc818faa118
|
/code/scripts/eda-images.R
|
e8d1e00e25366887dcfa3e1aca893fd999130221
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
ss-chang/stat159-project02
|
a44638e394757cbda10a57f3afe0701a4248e7fd
|
1120493cd60626e2b5f4ec86aa5397c1abee0bdb
|
refs/heads/master
| 2021-01-11T03:44:19.966596
| 2016-11-05T01:43:49
| 2016-11-05T01:43:49
| 71,296,660
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,211
|
r
|
eda-images.R
|
#====================================================================================
# title: eda-images.R
# author: Shannon Chang and Nura Kawa
#
# description:
# + generates histograms of quantitative predictors
# + generates boxplots of quantitative predictors
# + generates scatterplot matrix of quantitative predictors
# + generates conditional boxplots of qualitative predictors
# + creates barcharts of qualitative predictors
# + all images are saved as .PDF and .png formats
#====================================================================================
# ==============================================================================
# Read in Credit.csv data set
# ==============================================================================
library(readr)
credit <- read.csv(file = "../../data/Credit.csv", row.names = 1)
# ==============================================================================
# Separate Qualitative and Quantitative Data
# ==============================================================================
# create separate data frame of just qualitative data: "quals"
quals <- credit[,sapply(credit, is.factor)]
quals <- cbind(credit$Balance, quals)
names(quals)[1] = "balance"
# create separate data frame of just quantitative data: "quants"
quants <- credit[,sapply(credit, is.numeric)]
# ==============================================================================
# Function to select saving method
# ==============================================================================
save_image <- function(method, image, extension)
{
switch(method,
"pdf" = pdf(paste0(image, extension)),
"png" = png(paste0(image, extension)))
}
# ==============================================================================
# Generating histograms for Quantitative Data
# ==============================================================================
x_captions <- c(" (in thousands of dollars)",
" (credit limit)",
" (credit rating)",
" (number of credit cards)",
"",
" (years of education)",
" (average credit card debt for a number of individuals)")
# function to plot quantitative histograms
plot_quant_hist <- function(method, extension)
{
for (i in 1:ncol(quants)){
save_image(method,
paste0("../../images/quantitative/histograms/histogram-",casefold(names(quants[i]))),
extension)
hist(quants[,i],
xlab = paste0(names(quants[i]), x_captions[i]),
main = paste0(names(quants[i])))
dev.off()
}
}
# plotting png and pdf qualitative histograms
plot_quant_hist("png", ".png")
plot_quant_hist("pdf", ".pdf")
# ==============================================================================
# Generating boxplots
# ==============================================================================
# ------------------------------------------------------------------------------
# Boxplots for quantitative data:
# ------------------------------------------------------------------------------
plot_quant_box <- function(method, extension)
{
for (i in 1:ncol(quants))
{
save_image(method,
paste0("../../images/quantitative/boxplots/boxplot-",
casefold(names(quants[i]))),
extension)
boxplot(quants[,i],
ylab = paste0(x_captions[i]),
main = paste0(names(quants[i])))
dev.off()
}
}
plot_quant_box("png", ".png")
plot_quant_box("pdf", ".pdf")
# ------------------------------------------------------------------------------
# Conditional Boxplots for Qualitative Data:
# --------------------------------------------------------------------------------
# plot_qual_boxplots function:
plot_qual_boxplots <- function(method, extension, folder)
{
for(i in 2:ncol(quals)) #column 1 is balance
{
boxplot_name <- colnames(quals)[i]
#boxplot_name <- tolower(colnames(quals)[i])
# save image
save_image(method,
paste0("../../images/qualitative/",
folder,
"conditional-boxplot-",
tolower(boxplot_name)),
extension)
boxplot(balance ~ quals[,i], data = quals,
main = paste0("boxplot of balance with respect to ",
boxplot_name),
col = "white")
dev.off()
rm(boxplot_name)
}
}
plot_qual_boxplots("png", ".png", "boxplots/")
plot_qual_boxplots("pdf", ".pdf", "boxplots/")
# ==============================================================================
# Generating barcharts for Qualitative data
# ==============================================================================
# table of frequencies with proportion
individual_frequencies <- apply(quals, 2, table)
total_frequencies <- ftable(table(quals))
make_prop_table <- function(x){prop.table(table(x))}
prop_table_individual_frequencies <- apply(quals,
2,
make_prop_table)
prop_table_total_frequencies <- prop.table(total_frequencies)
plot_qual_barcharts <- function(method, extension, folder)
{
for(i in 2:ncol(quals)) #ignoring "balance"
{
chart_name <- colnames(quals)[i]
# save pdf image
save_image(method,
paste0("../../images/qualitative/",
folder,
"barchart-",
tolower(chart_name)),
extension)
barplot(prop_table_individual_frequencies[[i]],
col = "white",
main = paste0("barchart: ", chart_name))
dev.off()
rm(chart_name)
}
}
plot_qual_barcharts("png", ".png", "barcharts/")
plot_qual_barcharts("pdf", ".pdf", "barcharts/")
# ==============================================================================
# Generating scatterplot matrix
# ==============================================================================
png("../../images/scatterplot-matrix.png")
plot(quants)
dev.off()
pdf("../../images/scatterplot-matrix.pdf")
plot(quants)
dev.off()
|
c369db2b2e2b7140db7bdaa611d68621cdf1f0da
|
5cb215dd1d269b4471b91efea988d842bf55de40
|
/man/synGetMembershipStatus.Rd
|
d99d45b82b3630fba2c4af4ba9b0f4e827f97ce0
|
[
"Apache-2.0"
] |
permissive
|
Sage-Bionetworks/synapser
|
0d308dba0a4a993a1e8f609c25c75b072de78cdc
|
c9ed6ca9fb5247d56167ff8812ddc780de013127
|
refs/heads/master
| 2023-06-24T23:10:43.914336
| 2023-06-14T22:33:35
| 2023-06-14T22:33:35
| 34,292,371
| 31
| 16
|
Apache-2.0
| 2023-09-10T04:16:43
| 2015-04-20T23:33:04
|
R
|
UTF-8
|
R
| false
| false
| 587
|
rd
|
synGetMembershipStatus.Rd
|
\name{synGetMembershipStatus}
\alias{synGetMembershipStatus}
\docType{methods}
\title{
synGetMembershipStatus
}
\description{
Retrieve a user's Team Membership Status bundle.
https://docs.synapse.org/rest/GET/team/id/member/principalId/membershipStatus.html
}
\usage{
synGetMembershipStatus(userid, team, user=NULL)
}
\arguments{
\item{userid}{}
\item{team}{ A Team object or a\cr
team's ID.}
\item{user}{optional named parameter: Synapse user ID\cr
}
}
\value{
dict of TeamMembershipStatus
}
\examples{
\dontrun{
synGetMembershipStatus(user_id, team_id)$isMember
}
}
|
8f28d8bd00993efc3bddd7cf1c3262caaf1a88cc
|
ed2530b7c73ad80b86a0e60db075aaacdf53dbea
|
/missense_for_provean.R
|
a556b75364d52a1d001fbe5d66edc59023f8bff5
|
[] |
no_license
|
BrianRitchey/qtl
|
7444a409d4d07abda3f13b5cfa2794113f93c0e9
|
9792fef3dfa7ecdd62857d58ca3f9966456ae6b8
|
refs/heads/master
| 2023-07-08T01:38:25.485945
| 2017-09-11T18:44:29
| 2017-09-11T18:44:29
| 98,877,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,969
|
r
|
missense_for_provean.R
|
# missense_for_provean function
#
# written by Brian Ritchey
#
function(rs_list= rs_list, gene_table = gene_table){
require(rentrez)
final <- vector("list")
no_match <- vector("list")
genes <- names(rs_list)
for(rs_i in 1:length(rs_list)){
missense_SNPs <- rs_list[[rs_i]]
for(i in 1:length(missense_SNPs)){
rs <- entrez_fetch(db = "snp",
id = missense_SNPs[i],
rettype = "flt")
rs <- strsplit(rs, "residue")
rs_split <- strsplit(rs[[1]][3], split = "\\|")
pro_acc <- gsub(pattern = " prot_acc=",
replacement = "",
x = rs_split[[1]][4])
pro_acc <- strsplit(pro_acc, split = "\n")
pro_acc <- pro_acc[[1]][1]
# break loop once an accession "NP_" is found
if(length(grep(pattern = "NP_", x = pro_acc)) == 1){
break
}
}
# Get protein sequence and accession name
# If pro_seq doesn't exist, "NP_033256.2"...
# a soat1 acc which is the same in AKR + DBA/2 will be forced in
pro_seq <- tryCatch({
entrez_fetch(db = "protein",
id = pro_acc,
rettype = "fasta")},
error = function(e){
entrez_fetch(db = "protein",
id = "NP_033256.2",
rettype = "fasta")
})
pro_seq <- gsub(pro_seq, pattern = "\n", replacement = "")
pro_seq_split <- strsplit(pro_seq, split = "]")
pro_name <- pro_seq_split[[1]][1]
pro_seq <- pro_seq_split[[1]][2]
pro_seq_string <- strsplit(pro_seq, "")
# Get amino acid position of residue change for each rs (AKR vs. DBA)
# Also get referece and mutant alleles
aa_positions <- character(length = length(missense_SNPs))
ref_alleles <- character(length = length(missense_SNPs))
mut_alleles <- character(length = length(missense_SNPs))
# "rs30510802" is rs number that matches pro_acc above
for(i in 1:length(missense_SNPs)){
rs <- tryCatch({
entrez_fetch(db = "snp",
id = missense_SNPs[i],
rettype = "flt")},
error = function(e){
entrez_fetch(db = "snp",
id = "rs30510802",
rettype = "flt")
})
rs <- strsplit(rs, "residue")
rs_split <- strsplit(rs[[1]][3], split = "\\|")
aa_positions[i] <- trimws(
gsub(pattern = "aa_position=",
replacement = "",
x = rs_split[[1]][2]))
ref_alleles[i] <- trimws(
gsub(pattern = "=",
replacement = "",
x = rs_split[[1]][1]))
mutant_allele <- gsub(pattern = "=",
replacement = "",
x = rs[[1]][2])
mutant_allele <- strsplit(mutant_allele, split = "\\|")
mut_alleles[i] <- trimws(mutant_allele[[1]][1])
}
# Concatenate into Provean variant syntax (i.e. Y284H)
# Account for no match conditions (NA) and orientation of ref/mutant in paste0
ref_vec <- ref_alleles == pro_seq_string[[1]][as.integer(aa_positions)]
mut_vec <- mut_alleles == pro_seq_string[[1]][as.integer(aa_positions)]
pro_variants <- character(length = length(missense_SNPs))
none <- character(length = length(missense_SNPs))
for(i in 1:length(missense_SNPs)){
if(is.na(ref_vec[i])){
pro_variants[i] <- NA
none[i] <- missense_SNPs[i]
}else if(ref_vec[i] == T){
pro_variants[i] <- paste0(ref_alleles[i], aa_positions[i], mut_alleles[i])
}else if (mut_vec[i] == T){
pro_variants[i] <- paste0(mut_alleles[i], aa_positions[i], ref_alleles[i])
}else{
pro_variants[i] <- NA
none[i] <- missense_SNPs[i]
}
}
pro_variants <- na.exclude(pro_variants)
attributes(pro_variants) <- NULL
no_match[[rs_i]] <- none
pro_name <- paste0(pro_name, "]")
output <- character(length = (3 + length(pro_variants)))
output <- c(genes[rs_i], pro_name, pro_seq, pro_variants)
final[[rs_i]] <- output
}
names(no_match) <- genes
no_match
names(final) <- genes
final_df <- list_to_df(final)
rownames(final_df) <- NULL
final_rm <- which(is.na(final_df[,4]))
if(length(final_rm) > 0 ){
final <- final[-final_rm]
}
no_match <- unlist(no_match, use.names = F)
nm_rm <- which(gene_table$dbSNP == no_match, useNames = F)
# In the case everything matches
if(length(nm_rm) > 0){
gene_table <- gene_table[-nm_rm,]
}
rownames(gene_table) <- NULL
final[[length(final) + 1]] <- gene_table
# final[[length(final) + 1]] <- no_match
# names(final) <- c(genes, "gene_table", "no_rs_match")
final
}
|
cfef9e41d3f7766f330fa658a1bc14ec23727362
|
736d35dfbe750b872865ff123a863529b07ccf98
|
/R/plot.R
|
c9fe740fca1a4f7c8311ec60dcc88bfe15e1f4b7
|
[] |
no_license
|
seananderson/glmmfields
|
27e9dc767c8fd128ec986e52007b49e73defa501
|
92b44f1499834e5815823bde243183967990f1c1
|
refs/heads/master
| 2023-03-21T23:27:11.484397
| 2023-03-10T19:43:54
| 2023-03-10T19:43:54
| 73,511,815
| 42
| 10
| null | 2023-09-08T21:53:53
| 2016-11-11T21:12:07
|
R
|
UTF-8
|
R
| false
| false
| 1,992
|
r
|
plot.R
|
#' Plot predictions from an glmmfields model
#'
#' @param x An object returned by \code{\link{glmmfields}}
#' @param type Type of plot
#' @param link Logical: should the plots be made on the link scale
#' or on the natural scale?
#' @param ... Other arguments passed to \code{\link{predict.glmmfields}}
#'
#' @importFrom ggplot2 ggplot aes_string facet_wrap geom_point
#' scale_color_gradient2 geom_smooth geom_hline facet_wrap
#' @export
#' @examples
#' \donttest{
#' # Spatiotemporal example:
#' set.seed(1)
#' s <- sim_glmmfields(n_draws = 12, n_knots = 12, gp_theta = 1.5,
#' gp_sigma = 0.2, sd_obs = 0.1)
#' # options(mc.cores = parallel::detectCores()) # for parallel processing
#' m <- glmmfields(y ~ 0, time = "time",
#' lat = "lat", lon = "lon", data = s$dat,
#' nknots = 12, iter = 600, chains = 1)
#' x <- plot(m, type = "prediction")
#' x
#' x + ggplot2::scale_color_gradient2()
#' plot(m, type = "spatial-residual")
#' plot(m, type = "residual-vs-fitted")
#' }
plot.glmmfields <- function(x,
type = c("prediction", "spatial-residual", "residual-vs-fitted"),
link = TRUE, ...) {
type <- match.arg(type)
p <- predict(x, type = ifelse(link, "link", "response"), ...)
d <- data.frame(x$data, p)
y <- x$y
if (link) y <- do.call(x$family$link, list(y))
d$residual <- y - p$estimate
g <- NULL
if (type == "prediction") {
g <- ggplot(d, aes_string(x$lon, x$lat, colour = "estimate")) +
geom_point(size = 2) +
facet_wrap(x$time)
}
if (type == "spatial-residual") {
g <- ggplot(d, aes_string(x$lon, x$lat, colour = "residual")) +
geom_point(size = 2) +
scale_color_gradient2() +
facet_wrap(x$time)
}
if (type == "residual-vs-fitted") {
g <- ggplot(d, aes_string("estimate", "residual")) +
geom_point() +
facet_wrap(x$time) +
geom_hline(yintercept = 0, lty = 2) +
geom_smooth(method = "loess", se = FALSE, colour = "red")
}
g
}
|
1ef471813ecfa734b2a64b4197f5f2a72bc2d60c
|
8782c1d5956c6c2fe181cd59abf845adf763ee1f
|
/man/printResiduals.Rd
|
bbca8677991d02e300ea6574dc994413898c762f
|
[] |
no_license
|
cran/featurefinder
|
842e04793156acab3e06ef497f08ab31436d3450
|
808414cdabb6417c8c3e3ad309aa170002286651
|
refs/heads/master
| 2020-03-08T20:49:14.806784
| 2018-12-03T04:20:03
| 2018-12-03T04:20:03
| 128,392,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,216
|
rd
|
printResiduals.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/featurefinder.r
\name{printResiduals}
\alias{printResiduals}
\title{printResiduals}
\usage{
printResiduals(fileConn, all, dat, runname, levelname,
treeSummaryResidualThreshold, treeSummaryMinBucket,
treeSummaryResidualMagnitudeThreshold, ...)
}
\arguments{
\item{fileConn}{A file connection}
\item{all}{A dataframe}
\item{dat}{The dataset}
\item{runname}{A string corresponding to the name of the factor being analysed}
\item{levelname}{A string corresponding to the factor level being analysed}
\item{treeSummaryResidualThreshold}{The minimum residual threshold}
\item{treeSummaryMinBucket}{The minumum volume per leaf}
\item{treeSummaryResidualMagnitudeThreshold}{Minimun residual magnitude}
\item{...}{and parameters to be passed through}
}
\value{
Residuals are printed and also saved in a simplified format.
}
\description{
This function generates a residual tree on a subset of the data
}
\examples{
require(featurefinder)
data(examples)
printResiduals(fileConn,splitlist[t][[1]],dat, runname, names[t],
treeSummaryResidualThreshold,treeSummaryMinBucket,
treeSummaryResidualMagnitudeThreshold)
}
\keyword{saveTree}
|
9f7d88ca3eb093bb7572f5b0fcf926becc2dd6a2
|
0c1892c4e0747936e662efb472257409b5985130
|
/CKD Code.R
|
512bbd59c914a3db6c20874952dfb7ac9b61e3e1
|
[] |
no_license
|
suriya3193/Prediction-of-the-presence-of-CKD-
|
ae63cee52d4c9183f0ba6f2ddab5b08b80ac3b2b
|
550cf00fddb6ef91c0e1331dc7842bc2b21b8e38
|
refs/heads/master
| 2020-12-13T16:38:28.907775
| 2020-01-17T05:04:36
| 2020-01-17T05:04:36
| 234,474,339
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,526
|
r
|
CKD Code.R
|
library(ROSE)
library(caTools)
library(corrplot)
#loading the data
CKD_data<-read.csv("E:/Healthcare project/Chronic Kidney Disease Dataset.csv", header = TRUE,sep = ",")
View (CKD_data)
CKD_data<-subset(CKD_data, CKD_data$ID >=1)
#converting the variables to factors
CKD_data$Female<-as.factor(CKD_data$Female)
CKD_data$Racegrp<-as.factor(CKD_data$Racegrp)
CKD_data$Educ<-as.factor(CKD_data$Educ)
CKD_data$Unmarried<-as.factor(CKD_data$Unmarried)
CKD_data$Income<-as.factor(CKD_data$Income)
CKD_data$Insured<-as.factor(CKD_data$Insured)
CKD_data$Obese<-as.factor(CKD_data$Obese)
CKD_data$Dyslipidemia<-as.factor(CKD_data$Dyslipidemia)
CKD_data$PVD<-as.factor(CKD_data$PVD)
CKD_data$Activity<-as.factor(CKD_data$Activity)
CKD_data$PoorVision<-as.factor(CKD_data$PoorVision)
CKD_data$Smoker<-as.factor(CKD_data$Smoker)
CKD_data$Hypertension<-as.factor(CKD_data$Hypertension)
CKD_data$Fam.Hypertension<-as.factor(CKD_data$Fam.Hypertension)
CKD_data$Diabetes<-as.factor(CKD_data$Diabetes)
CKD_data$Fam.Diabetes<-as.factor(CKD_data$Fam.Diabetes)
CKD_data$Stroke<-as.factor(CKD_data$Stroke)
CKD_data$CVD<-as.factor(CKD_data$CVD)
CKD_data$Fam.CVD<-as.factor(CKD_data$Fam.CVD)
CKD_data$CHF<-as.factor(CKD_data$CHF)
CKD_data$Anemia<-as.factor(CKD_data$Anemia)
CKD_data$CKD<-as.factor(CKD_data$CKD)
#Correlation
my_num_data <- CKD_data[, sapply(CKD_data, is.numeric)]
res<-cor(my_num_data, use = "complete.obs")
View(res)
corrplot(res)
#Splitting the data
CKD_train <- subset(CKD_data, CKD_data$ID <= 6000)
CKD_test <- subset(CKD_data, CKD_data$ID > 6000)
CKD_sampling<-CKD_train[complete.cases(CKD_train), ]
View(CKD_sampling)
table(CKD_sampling$CKD)
#Treating class Imbalance
CKD_rose <- ROSE(CKD ~ ., data = CKD_sampling, seed = 1)$data
table(CKD_rose$CKD)
library(DMwR)
CKD_smote <- SMOTE(CKD ~., CKD_sampling, seed = 2)
#ANOVA Test
aov<- aov(CKD_rose$Age ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$Weight ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$Height ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$BMI ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$Waist ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$SBP ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$DBP ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$HDL ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$LDL ~ CKD_rose$CKD)
summary(aov)
aov<- aov(CKD_rose$Total.Chol ~ CKD_rose$CKD)
summary(aov)
#Chisquare Test
chisq.test(CKD_rose$CKD,CKD_rose$Hypertension)
chisq.test(CKD_rose$CKD,CKD_rose$Female)
chisq.test(CKD_rose$CKD,CKD_rose$Racegrp)
chisq.test(CKD_rose$CKD,CKD_rose$Educ)
chisq.test(CKD_rose$CKD,CKD_rose$Unmarried)
chisq.test(CKD_rose$CKD,CKD_rose$Income)
chisq.test(CKD_rose$CKD,CKD_rose$Insured)
chisq.test(CKD_rose$CKD,CKD_rose$Obese)
chisq.test(CKD_rose$CKD,CKD_rose$Dyslipidemia)
chisq.test(CKD_rose$CKD,CKD_rose$PVD)
chisq.test(CKD_rose$CKD,CKD_rose$Activity)
chisq.test(CKD_rose$CKD,CKD_rose$PoorVision)
chisq.test(CKD_rose$CKD,CKD_rose$Smoker)
chisq.test(CKD_rose$CKD,CKD_rose$Fam.Hypertension)
chisq.test(CKD_rose$CKD,CKD_rose$Diabetes)
chisq.test(CKD_rose$CKD,CKD_rose$Fam.Diabetes)
chisq.test(CKD_rose$CKD,CKD_rose$CVD)
chisq.test(CKD_rose$CKD,CKD_rose$Fam.CVD)
chisq.test(CKD_rose$CKD,CKD_rose$Stroke)
chisq.test(CKD_rose$CKD,CKD_rose$CHF)
chisq.test(CKD_rose$CKD,CKD_rose$Anemia)
CKD_rose$CHM<-NULL
CKD_rose$Unmarried<-NULL
CKD_rose$CareSource<-NULL
#Train and Test
set.seed(123)
smp_size <- floor(0.70 * nrow(CKD_rose))
train_ind <- sample(seq_len(nrow(CKD_rose)), size = smp_size)
CKD_rosetrain <- CKD_rose[train_ind, ]
CKD_rosetest <- CKD_rose[-train_ind, ]
#Model
model <- glm (CKD ~ Age+Female+Racegrp+Educ+Unmarried+Income+Insured+BMI+SBP+DBP+HDL+LDL+PVD+Activity+PoorVision+Smoker+Hypertension+Fam.Hypertension+Diabetes+Stroke+CVD+Fam.CVD+CHF+Anemia, data = CKD_rosetrain, family = binomial)
summary(model)
exp(coef(model))
predict_result <- predict(model,CKD_rosetest,type = 'response')
fitted.results <- ifelse(predict_result > 0.5,1,0)
misClasificError <- mean(fitted.results != CKD_rosetest$CKD)
print(paste('Accuracy',1-misClasificError))
#Predict
CKD_test$CKD<-NULL
CKD_test<-CKD_test[complete.cases(CKD_test), ]
CKD_test$CKD<-NA
View(CKD_test)
predict_result <- predict(model,CKD_test,type = 'response')
fitted.results <- ifelse(predict_result > 0.5,1,0)
View(fitted.results)
table(fitted.results)
|
bdeaecde7de10c2b3bbc9a62d0c6025530131573
|
2e194fc4b9d9d43b57f6697aea6229e9cc652cf7
|
/cachematrix.R
|
c21f9f9e7baeb003b88f5d500e3cf97b73d4def8
|
[] |
no_license
|
lingani/ProgrammingAssignment2
|
9bbfa7779deec73ee5ebdf80bdaee2946a6f2164
|
58b4faa281c08519ac1e740f225493a093ec5111
|
refs/heads/master
| 2021-01-15T18:09:54.602477
| 2014-08-12T17:21:03
| 2014-08-12T17:21:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,984
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
##This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#Creation a function that cache initializes 'x' and 'm'
#so any time makeCacheMatrix is called those variables get initialized
set <- function(y) {
x <<- y
m <<- NULL
}
#function that returns the given matrix 'x'
get <- function() x
#function that sets the cached variable 'm'
setSolve <- function(inv) m <<- inv
#function that retrieves the cached variable 'm'
getSolve <- function() m
#list of the above 4 functions to return
list(set = set, get = get, setSolve = setSolve, getSolve = getSolve)
}
##This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed),
##then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
## The first parameter 'x' is a list of the functions set, get, setSolve, getSolve
m <- x$getSolve() #returns the cached variable 'm'
if(!is.null(m)) { #checks if 'm' is not null. If this it is true, notify with the message "getting cached data" and return m
message("getting cached data")
return(m)
}
## if otherwise m is null
data <- x$get() #retrieving the cached variable 'x' and setting it to 'data'
##solve() is a R function that returns the inverse of a square matrix
##We don't check to see if the supplied matrix is invertible. We assume that the matrix supplied is always invertible
m <- solve(data, ...) # computing the inverse of data and setting it to 'm'
x$setSolve(m) # caching the 'm' variable
m #returns 'm' the inverse of 'x'
}
|
3ab5c45cf61b630143f0d14a41df10f72baab5fa
|
f6badfd96b3c593b1e08179846ecce25dd7d73b9
|
/Scripts/Final_Project.R
|
a317d37d496211a21a27a2d95b01a01b242c9915
|
[] |
no_license
|
kkdobbin/SGMA_DACparticipation
|
488509f9d060e7531b835aac8dd9044da6150f80
|
d3b67bfe0dc743fb40eb2f9a43e1bf40baa59f3c
|
refs/heads/master
| 2022-02-24T01:16:07.156687
| 2022-01-20T17:45:18
| 2022-01-20T17:45:18
| 124,990,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,024
|
r
|
Final_Project.R
|
# Final Project Process
#Set up
library(dplyr)
library(tidyverse)
library(foreign)
library(car)
library(MASS)
library(lmtest)
library(tseries)
library(broom)
# Creating master data ------------------------------------------------
# Load data
DACP <- read_csv("Data/DACP_GSApostings_3.16.18.csv")
IRWM <- read_csv("Outputs/DACP_IRWM.csv")
DEM <- read_csv("Outputs/DEM_by_DACO_viaBG_all.csv")
# Clarify some variables before joining
colnames(DACP)[17] <- "GSA_Int_a_"
colnames(DACP)[18] <- "GSA_Int_Percent"
# Create master dataframe
Data <- left_join(DACP, DEM, by = c("DACP_IDNumber" = "DACP_Place_IDNumber"))
# removign this because it is duplicating my GSA data when they are in more than one IRWM! Data <- left_join(Data, IRWM, by = c("DACP_IDNumber" = "DACPlace_ID_Number"))
# Clean up data -----------------------------------------------------------
# Deal with surrogates
# Consider all Surrogates as Ns
table(Data$GSAMember) ## First count to be sure
Data$GSAMember[Data$GSAMember == "S"] <- "N"
table(Data$GSAMember) # It works!
table(Data$DecisionMaker)
Data$DecisionMaker[Data$DecisionMaker == "S"] <- "N"
table(Data$DecisionMaker)
table(Data$GSA_eligible_entity)
Data$GSA_eligible_entity[Data$GSA_eligible_entity == "S"] <- "N"
table(Data$GSA_eligible_entity)
# Make variable types correct
Data$GSAType <- as.factor(Data$GSAType)
Data$GSAMember <- as.factor(Data$GSAMember)
Data$DecisionMaker <- as.factor(Data$DecisionMaker)
Data$Listed_IP <- as.factor(Data$Listed_IP)
Data$Incorporated <- as.factor(Data$Incorporated)
Data$DAC_Population <- as.numeric(Data$DAC_Population)
Data$GSA_eligible_entity <- as.factor(Data$GSA_eligible_entity)
# removing this because I took out IRWM Data$Any_Pilot <- as.factor(Data$Any_Pilot)
Data$AdvisoryCommittee <- as.factor(Data$AdvisoryCommittee)
# Make MOU and MOA the same
Data$GSAType[Data$GSAType == "MOA"] <- "MOU"
table(Data$GSAType)
Data$GSAType <- factor(Data$GSAType)
table(Data$GSAType)
# NEED TO DEAL WITH MHI zeros
Data$DACP_MHI[Data$DACP_MHI == 0] <- NA
# NEED TO DEAL WITH POP zero
Data$DACP_Population[Data$DACP_Population == 0] <- NA
#Data has 286 observations total before limiting it any more than just small DACs and intersections ten percent or greater. Plus nine NAs. ANy other NAs or observations removed? what is the total universe here? narrowed it down to have just 9 NAs (one observation has NAs in two categories)
# Figure out how many unique DACP in this 286 master dataset
uniqueDACPData <- unique(Data$DACP_IDNumber)
length(uniqueDACPData) #243
# Figure out how many joined to more than one
tableunique <- as.data.frame(table(Data$DACP_IDNumber))
count(tableunique %>% filter(tableunique$Freq >1)) # 39 DACPs joined with more than one GSA.
count(tableunique %>% filter(tableunique$Freq == 2)) #35 joined with two
max(tableunique$Freq)
# Limit to those that have GSA eligible entities
Data <- Data %>% filter(Data$GSA_eligible_entity == "Y")
#Lets make a new dataframe with just the variables I care about to figure this out
Variables <- Data %>% dplyr::select(DACP_MHI, GSAType, DACP_Population, Percent_latino, Percent_white_not_hispanic, AREAofDACP_a_, GSA_Int_Percent, Listed_IP, Incorporated, AdvisoryCommittee)
Variables2 <- na.omit(Variables) # ugh there is tons of NAs in IP varaible due to decision makers
# Change NA is Listed_IP variable to yes's
Data$Listed_IP[is.na(Data$Listed_IP)] <- "Y"
#now a lot fewer NAs but three in advisory committee column that should be Ns
Data$AdvisoryCommittee[is.na(Data$AdvisoryCommittee)] <- "N"
table(Data$AdvisoryCommittee)
# goign back to variables dataframe now and omitting NAs there is only nine lost which are the 9 with MHI of zero (another one had pop zero but also have MHI zero)
#Ends with final data set with 121 observations, one of which has an NA
# Create Dependent Variables ----------------------------------------------
# Add participation dependent variable (DV) DV = master 0 to 5 composite
Data$DV <- NA
Data$DV <- ifelse(Data$GSAMember == "N" & Data$DecisionMaker == "N", 0, NA)
Data$DV <- ifelse(Data$GSAMember == "Y" & Data$DecisionMaker == "N", 1, Data$DV)
Data$DV <- ifelse(Data$GSAMember == "N" & Data$DecisionMaker == "Y" & Data$Voting_type == "S", 2, Data$DV)
Data$DV <- ifelse(Data$GSAMember == "N" & Data$DecisionMaker == "Y" & Data$Voting_type == "V", 3, Data$DV)
Data$DV <- ifelse(Data$GSAMember == "N" & Data$DecisionMaker == "Y" & Data$Voting_type == "E", 3, Data$DV)
Data$DV <- ifelse(Data$GSAMember == "Y" & Data$DecisionMaker == "Y" & Data$Voting_type == "S", 4, Data$DV)
Data$DV <- ifelse(Data$GSAMember == "Y" & Data$DecisionMaker == "Y" & Data$Voting_type == "V", 5, Data$DV)
Data$DV <- ifelse(Data$GSAMember == "Y" & Data$DecisionMaker == "Y" & Data$Voting_type == "E", 5, Data$DV)
#Deal with those DACPs not in IRWMs DONT NEED TO DO BECAUSE ELIMINATED IRWM
# Data$Any_Pilot[is.na(Data$Any_Pilot)] <- "N"
#table(Data$Any_Pilot)
# Data Visualization and Exploration --------------------------------------
# DV histogram
hist(Data$DV, breaks = 6, xlab = "6 Category Participation Variable", main = "Histogram of Dependent Variable", ylim = c(0, 100))
# IV individual Scatterplots and transformations
# MHI
scatterplot(Data$DV ~ Data$DACP_MHI) # scewed
Data$Trans_DACP_MHI <- (Data$DACP_MHI)^3
scatterplot(Data$DV ~ (Data$Trans_DACP_MHI))
# Population
scatterplot(Data$DV ~ Data$DACP_Population) # scewed
Data$Trans_DACP_Population <- (Data$DACP_Population)^2
scatterplot(Data$DV ~ Data$Trans_DACP_Population)
# GSA Type
scatterplot(Data$DV ~ Data$GSAType)
# DEM
scatterplot(Data$DV ~ Data$Percent_white_not_hispanic)
scatterplot(Data$DV ~ Data$Percent_latino)
# scatterplot(Data$DV ~ Data$Any_Pilot) eliminated IRWM
scatterplot(Data$DV ~ Data$AREAofDACP_a_) # scewed Only one that maybe isn't a monotone but close
# percent int
scatterplot(Data$DV ~ Data$GSA_Int_Percent)
# area of DACP
scatterplot(Data$DV ~ Data$AREAofDACP_a_) # major outlier
xoutData <- as.data.frame(Data %>% filter(Data$AREAofDACP_a_ < 20000)) # identify outlier
scatterplot(xoutData$DV ~ xoutData$AREAofDACP_a_)
# IP
scatterplot(Data$DV ~ Data$Listed_IP)
#Incorproated
scatterplot(Data$DV ~ Data$Incorporated)
# Advisory committee
scatterplot(Data$DV ~ Data$AdvisoryCommittee)
# exploring pairwise graphs
pairs(Variables) #dammit that is impossible to read
# DAC more social characteristics
VariablesDAC <- Data %>% select(DACP_MHI, DACP_Population, Percent_latino, Any_Pilot, AREAofDACP_a_, Incorporated)
pairs(VariablesDAC)
# More GSA level characteristics
VariablesGSA <- Data %>% select(GSAType, GSA_Int_Percent, Listed_IP, AdvisoryCommittee, GSA_eligible_entity)
pairs(VariablesGSA)
# scatterplot matrices
pairs(~ Data$GSA_Int_Percent + Data$DACP_MHI + Data$DACP_Population + Data$Percent_latino)
with(Data, pairs(~ GSA_Int_Percent + DACP_MHI + DACP_Population + Percent_latino))
with( Data, scatterplotMatrix(~ DV + GSA_Int_Percent + Trans_DACP_MHI + DACP_Population + Percent_latino + Data$AREAofDACP_a_, Data = Data, main = "Scatterplot Matrix")) # promising?
with( Data, scatterplotMatrix(~ DV + GSA_Int_Percent + Trans_DACP_MHI + DACP_Population + Percent_latino + Data$AREAofDACP_a_ + GSAType + Incorporated, Data = Data, main = "Scatterplot Matrix"))
# Spread level plots. Can't get this to work so ignore
spreadLevelPlot(DV + 1 ~ Incorporated, Data)
boxplot(DV ~ Incorporated, Data)
spreadLevelPlot(DV + 1 ~ Incorporated, Data)
spreadLevelPlot(DV + 1 ~ GSAType, Data)
table(Data$Incorporated)
table(Data$GSAType)
# OLS --------------------------------------------------
# DV
OLS <- lm(Data$DV ~ Data$GSAType + Data$GSA_Int_Percent + Data$Trans_DACP_MHI + Data$DAC_Population + Data$Incorporated + Data$Percent_latino + Data$AREAofDACP_a_)
summary(OLS)
OLS2 <- lm(Data$DV ~ Data$GSAType + Data$GSA_Int_Percent + Data$Trans_DACP_MHI + Data$DAC_Population + Data$Incorporated)
summary(OLS2)
#just to test function form against one without transformed MHI
OLS3 <- lm(Data$DV ~ Data$GSAType + Data$GSA_Int_Percent + Data$DACP_MHI + Data$DAC_Population + Data$Incorporated)
summary(OLS3)
tidy(OLS2)
write_csv(tidy(OLS2), "Outputs/coefs_OLS2.csv")
# F test to compare
var.test(OLS, OLS2)
# More Data Visualization and Exploration --------------------------------------
# Normality
# Distribution of residuals
sresid <- studres(OLS2)
hist(sresid, freq=FALSE,
main="Distribution of Studentized Residuals")
xfit<-seq(min(sresid),max(sresid),length=120)
yfit<-dnorm(xfit)
lines(xfit, yfit) # normal
jarque.bera.test(OLS2$residuals) # because it is significant everythign is now okay not normally distributed
shapiro.test(OLS2$residuals) # Shapiro-wilk test.
# Quantile comparison plots
qq.plot(OLS2, main = "Quantile-Comparison Plot")
#just for fun to see how this is different than the non transformed version
qq.plot(OLS3) # not much different.
# Linearity: the expected mean value of residuals is 0
mean(sresid) # All looks well?
# reset test
Data2 <- Data %>% filter(!is.na(Data$DACP_MHI))
Data2$fit <- OLS2$fitted.values # in original data set save fitted values
reset <- lm(Data2$DV ~ Data2$GSAType + Data2$GSA_Int_Percent + Data2$DACP_MHI + Data2$DACP_Population + Data2$Incorporated + I(Data2$fit^2) + I(Data2$fit^3))
summary(reset)
anova(OLS2, reset) # compare original model to new model with added fitted vallues (squares and cubes) see if non-linear combinations of predictors explain response. if you can reject the null (0), model is misspecifieds. # if it is significant we are worried it isn't linear. In this case we are okay if we are using .05 as threshold # Not linear fuck
resettest(OLS2, power = 2:3, type = c("fitted", "regressor", "princomp"))
resettest(OLS3, power = 2:3, type = c("fitted", "regressor", "princomp")) # cool to see it is worse
# Nonstochastic regressors - x values are independent of the error term
residualPlots(OLS2, terms = ~ ., type = "rstudent", fitted = F) # Overall on the outlier front looking okay. doesn't seem like a ton more than 5% are falling outside of the plus or minus two range. Only significant result for lack of fit test is Percent_latino. For the distribution of residuals. Things look good except maybe not GSAType, incorporated or percent latino. Only percent latino is significant for lack of fit test.
residualPlots(OLS2, terms = ~ 1, type = "rstudent", fitted = T) # I have no idea how to interpret this one.
marginalModelPlot(OLS2)
avPlots(OLS2)
# homoscedacitity
# plot residuals on fitted values (if spread of residuals differs, may have heteroskedasticity)
plot(OLS2$residuals ~ OLS2$fitted.values, xlab="Fitted Values", ylab="Residuals")
### heteroskedasticity ###
bptest(OLS2, studentize=F) # Breusch-Pagan test if is is significant it is heteroskedastic
bptest(OLS2) # isn't anymore when I take out the F studentized part....
# with(Data, bwplot(DV ~ GSAType))
# Multicolinearity
vif(OLS2) # NOT A PROBLEM! YAY!
# Outliers and influential observations
# studentized residuals
plot(rstudent(OLS2))
table(rstudent(OLS2) > 2)
table(rstudent(OLS2) < -2)
# Exploring leverage
head(hatvalues(OLS2))
influenceIndexPlot(OLS2, vars= "hat", id.n=4)
mean(hatvalues(OLS2))
table(hatvalues(OLS2) > mean(hatvalues(OLS2))*2)
table(hatvalues(OLS2) > mean(hatvalues(OLS2))*3) # looking good but just realized this isn't what they mean by average h bar.... h bar is equal to k plus 1 / n
# Grace gave a different cut off so here that is
abline(h = 2 * length(coef(OLS2)) / nrow(Data2)) # cutoff for hat values calculated based on # of estimates and observations.
# exploring influence
head(cooks.distance(OLS2))
max(cooks.distance(OLS2))
influenceIndexPlot(OLS2, vars="Cook", id.n=5)
cutoff <- 4/(120-5-1) # cutoff point = 4/(n-k-1). # Change k here
abline(h = 4/(120-5-1))
plot(OLS2, which=4, cook.levels=cutoff)
# cutoff for cook's generally 1 but can miss influential data about 17 above the cut off line here
# all together
influenceIndexPlot(OLS2, vars=c("Cook", "Studentized", "hat"), id.n=5)
# cutoff for studentized residuals is generally abs(3) (absolute value)
influencePlot(OLS2, id.n=2, main = "Bubble Plot") # only one verticle dashed line because 3h cut off for hat values is outside of all observations. circle is proportional to cook's D
# Predict values
predicted <- predict(OLS2)
range(predicted)
table(predicted > 5)
table(predicted < 0)
# Transformed OLS ---------------------------------------------------------
#Trying to either transform Y down or X up to get rid of bulging
OLS2 <- lm( sqrt(Data$DV) ~ Data$GSAType + Data$GSA_Int_Percent + Data$DACP_MHI + Data$DAC_Population + Data$Incorporated + Data$Percent_latino + Data$Any_Pilot)
summary(OLS2)
qq.plot(OLS2) # That looks worse
OLS3 <- lm(Data$DV ~ Data$GSAType + Data$GSA_Int_Percent + Data$DACP_MHI^2 + Data$DAC_Population^2 + Data$Incorporated + Data$Percent_latino + Data$Any_Pilot)
summary(OLS3)
qq.plot(OLS3) # Doesn't seem to have done much
var.test(OLS, OLS2) # F test to compare two variances - how to interpret?
|
1e5a5d85b1b69e0d3edfd89b8da993dabc41f0a5
|
42ac78fed8e8494cc54a533e6cb9b4c18ca51369
|
/branches/Matrix-mer2/R/Auxiliaries.R
|
6dfca2b93991b86445230a08d050f9f50f30ebfe
|
[] |
no_license
|
LTLA/Matrix
|
8a79cac905cdb820f95190e99352cd9d8f267558
|
2b80087cfebc9f673e345000aeaf2170fc15b506
|
refs/heads/master
| 2020-08-07T20:22:12.075155
| 2019-09-28T21:21:10
| 2019-09-28T21:21:10
| 213,576,484
| 0
| 1
| null | 2019-10-13T00:56:38
| 2019-10-08T07:30:49
|
C
|
UTF-8
|
R
| false
| false
| 7,442
|
r
|
Auxiliaries.R
|
#### "Namespace private" Auxiliaries such as method functions
#### (called from more than one place --> need to be defined early)
## For %*% (M = Matrix; v = vector (double or integer {complex maybe?}):
.M.v <- function(x, y) callGeneric(x, as.matrix(y))
.v.M <- function(x, y) callGeneric(rbind(x), y)
.has.DN <- ## has non-trivial Dimnames slot?
function(x) !identical(list(NULL,NULL), x@Dimnames)
.bail.out.1 <- function(fun, cl) {
stop(gettextf('not-yet-implemented method for %s(<%s>)', fun, cl),
call. = FALSE)
}
.bail.out.2 <- function(fun, cl1, cl2) {
stop(gettextf('not-yet-implemented method for %s(<%s>, <%s>)',
fun, cl1, cl2), call. = FALSE)
}
## chol() via "dpoMatrix"
cholMat <- function(x, pivot, LINPACK) {
px <- as(x, "dpoMatrix")
if (isTRUE(validObject(px, test=TRUE))) chol(px)
else stop("'x' is not positive definite -- chol() undefined.")
}
dimCheck <- function(a, b) {
da <- dim(a)
db <- dim(b)
if(any(da != db))
stop(gettextf("Matrices must have same dimensions in %s",
deparse(sys.call(sys.parent()))),
call. = FALSE)
da
}
dimNamesCheck <- function(a, b) {
## assume dimCheck() has happened before
nullDN <- list(NULL,NULL)
h.a <- !identical(nullDN, dna <- dimnames(a))
h.b <- !identical(nullDN, dnb <- dimnames(b))
if(h.a || h.b) {
if (!h.b) dna
else if(!h.a) dnb
else { ## both have non-trivial dimnames
r <- dna # "default" result
for(j in 1:2) {
dn <- dnb[[j]]
if(is.null(r[[j]]))
r[[j]] <- dn
else if (!is.null(dn) && any(r[[j]] != dn))
warning(gettextf("dimnames [%d] mismatch in %s", j,
deparse(sys.call(sys.parent()))),
call. = FALSE)
}
r
}
}
else
nullDN
}
rowCheck <- function(a, b) {
da <- dim(a)
db <- dim(b)
if(da[1] != db[1])
stop(gettextf("Matrices must have same number of rows in %s",
deparse(sys.call(sys.parent()))),
call. = FALSE)
## return the common nrow()
da[1]
}
colCheck <- function(a, b) {
da <- dim(a)
db <- dim(b)
if(da[2] != db[2])
stop(gettextf("Matrices must have same number of columns in %s",
deparse(sys.call(sys.parent()))),
call. = FALSE)
## return the common ncol()
da[2]
}
emptyColnames <- function(x)
{
## Useful for compact printing of (parts) of sparse matrices
## possibly dimnames(x) "==" NULL :
dimnames(x) <- list(dimnames(x)[[1]], rep("", dim(x)[2]))
x
}
prTriang <- function(x, digits = getOption("digits"),
justify = "none", right = TRUE)
{
## modeled along stats:::print.dist
diag <- TRUE
upper <- x@uplo == "U"
m <- as(x, "matrix")
cf <- format(m, digits = digits, justify = justify)
if(upper)
cf[row(cf) > col(cf)] <- "."
else
cf[row(cf) < col(cf)] <- "."
print(cf, quote = FALSE, right = right)
invisible(x)
}
prMatrix <- function(x, digits = getOption("digits")) {
d <- dim(x)
cl <- class(x)
cat(sprintf('%d x %d Matrix of class "%s"\n', d[1], d[2], cl))
maxp <- getOption("max.print")
if(prod(d) <= maxp) {
if(is(x, "triangularMatrix"))
prTriang(x, digits = digits)
else
print(as(x, "matrix"), digits = digits)
}
else { ## d[1] > maxp / d[2] >= nr :
m <- as(x, "matrix")
nr <- maxp %/% d[2]
n2 <- ceiling(nr / 2)
print(head(m, max(1, n2)))
cat("\n ..........\n\n")
print(tail(m, max(1, nr - n2)))
}
## DEBUG: cat("str(.):\n") ; str(x)
invisible(x)# as print() S3 methods do
}
## For sparseness handling
non0ind <- function(x) {
if(is.numeric(x))
return(if((n <- length(x))) (0:(n-1))[x != 0] else integer(0))
## else return a (i,j) matrix of non-zero-indices
stopifnot(is(x, "sparseMatrix"))
if(is(x, "TsparseMatrix"))
return(unique(cbind(x@i,x@j)))
isCol <- function(M) any("i" == slotNames(M))
.Call("compressed_non_0_ij", x, isCol(x), PACKAGE = "Matrix")
}
### There is a test on this in ../tests/dgTMatrix.R !
uniq <- function(x) {
if(is(x, "TsparseMatrix")) {
## Purpose: produce a *unique* triplet representation:
## by having (i,j) sorted and unique
## -----------------------------------------------------------
## The following is *not* efficient {but easy to program}:
if(is(x, "dgTMatrix")) as(as(x, "dgCMatrix"), "dgTMatrix")
else if(is(x, "lgTMatrix")) as(as(x, "lgCMatrix"), "lgTMatrix")
else stop("not implemented for class", class(x))
} else x # not 'gT' ; i.e. "uniquely" represented in any case
}
if(FALSE) ## try an "efficient" version
uniq_gT <- function(x)
{
## Purpose: produce a *unique* triplet representation:
## by having (i,j) sorted and unique
## ----------------------------------------------------------------------
## Arguments: a "gT" Matrix
stopifnot(is(x, "gTMatrix"))
if((n <- length(x@i)) == 0) return(x)
ii <- order(x@i, x@j)
if(any(ii != 1:n)) {
x@i <- x@i[ii]
x@j <- x@j[ii]
x@x <- x@x[ii]
}
ij <- x@i + nrow(x) * x@j
if(any(dup <- duplicated(ij))) {
}
### We should use a .Call() based utility for this!
}
t_geMatrix <- function(x) {
x@x <- as.vector(t(array(x@x, dim = x@Dim))) # no dimnames here
x@Dim <- x@Dim[2:1]
x@Dimnames <- x@Dimnames[2:1]
## FIXME: how to set factors?
x
}
## t( [dl]trMatrix ) and t( [dl]syMatrix ) :
t_trMatrix <- function(x) {
x@x <- as.vector(t(as(x, "matrix")))
x@Dim <- x@Dim[2:1]
x@Dimnames <- x@Dimnames[2:1]
x@uplo <- if (x@uplo == "U") "L" else "U"
# and keep x@diag
x
}
fixupDense <- function(m, from) {
if(is(m, "triangularMatrix")) {
m@uplo <- from@uplo
m@diag <- from@diag
} else if(is(m, "symmetricMatrix")) {
m@uplo <- from@uplo
}
m
}
## -> ./ldenseMatrix.R :
l2d_Matrix <- function(from) {
stopifnot(is(from, "lMatrix"))
fixupDense(new(sub("^l", "d", class(from)),
x = as.double(from@x),
Dim = from@Dim, Dimnames = from@Dimnames,
factors = list()), ## FIXME: treat 'factors' smartly
from)
}
if(FALSE)# unused
l2d_meth <- function(x) {
cl <- class(x)
as(callGeneric(as(x, sub("^l", "d", cl))), cl)
}
## -> ./ddenseMatrix.R :
d2l_Matrix <- function(from) {
stopifnot(is(from, "dMatrix"))
fixupDense(new(sub("^d", "l", class(from)),
Dim = from@Dim, Dimnames = from@Dimnames,
factors = list()), ## FIXME: treat 'factors' smartly
from)
}
try_as <- function(x, classes, tryAnyway = FALSE) {
if(!tryAnyway && !is(x, "Matrix"))
return(x)
## else
ok <- canCoerce(x, classes[1])
while(!ok && length(classes <- classes[-1])) {
ok <- canCoerce(x, classes[1])
}
if(ok) as(x, classes[1]) else x
}
## MM thinks the following should become part of 'methods' :
canCoerce <- function(object, Class) {
## Purpose: test if 'object' is coercable to 'Class', i.e.,
## as(object, Class) will {typically} work
## ----------------------------------------------------------------------
## Author: John Chambers, Date: 6 Oct 2005
is(object, Class) ||
!is.null(selectMethod("coerce", c(class(object), Class),
optional = TRUE,
useInherited = c(from = TRUE, to = FALSE)))
}
|
bfe183693edc6b94fc96ae4f8ee30b6be8f084b4
|
fa90f1a971b81858e12529de41a3566cbbc79b47
|
/shinyApp/ui.R
|
ba337961b966490473bde99715f76685fb53dbe0
|
[] |
no_license
|
SBIMB/AWIGEN-1-Post-QC
|
de5b8875e86a8ee072fd98fe2b1d22cbbb2bc37c
|
00c1c04ba6f0b107e3622832af96a40340f432d1
|
refs/heads/master
| 2020-08-31T19:08:13.476188
| 2020-03-16T10:13:25
| 2020-03-16T10:13:25
| 218,762,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,390
|
r
|
ui.R
|
library(shiny)
library(shinydashboard)
library(plotly)
library(shinyalert)
source('data.R')
source('tab-modules.R')
shinyUI(
dashboardPage(
# header
dashboardHeader(title = "AWI-Gen 1 QC"),
# side bar
dashboardSidebar(
# importing dataset
helpText("Import dataset here. Make sure it is a comma separated file"),
fileInput("file", "Upload the csv here", accept = c(".csv")),
br(),
tags$hr(),
# side bar menu items
sidebarMenu(
# data entry window
menuItem("Home", tabName = "home"), # This menu Item displays all the important messages to user
# data here..
menuItem("Data", tabName = "data")
)
),
# body
dashboardBody(
# tab items
tabItems(
tabItem( tabName = "home", div(uiOutput("launch_summary"), align="center")),
# data: This is the main screen where users search for variables for analyis
# There are two sections: Categorical and Numerical sections.
# All column enteries MUST belong to any of those sections.
tabItem( tabName = "data",
sectionLabels(),
# top row
fluidRow(
column(6,
tabBox(height = "500px", width = "250px",
tabPanel("Variables",
helpText("Select one or 3 variables here"),
uiOutput("launch_columns"),
hr(),
selectInput("a_dataInput","Choose dataset here:", c(site_data))
),
tabPanel("Crosstabs",
div(style = 'overflow-y:scroll;height:500px;',
verbatimTextOutput("a_crosstab_summary"))
),
tabPanel("Plot", plotOutput("a_bar_plot"))
)
),
column(6,
tabBox(height = "500px", width = "250px",
tabPanel("Variables",
box(title = "Select numeric here ", status = "primary", solidHeader = T,
uiOutput("num_columns"),
selectInput("awigen2", "Choose variable",sort(group_by)))
),
tabPanel("Missing", tableOutput("a_missing")),
tabPanel("Not Missing", tableOutput("a_not_missing")),
tabPanel("Mean", dataTableOutput("a_stats_mean")),
tabPanel("Median", dataTableOutput("a_stats_median")),
tabPanel("Summary", verbatimTextOutput("summary_of_selected_awigen")),
tabPanel("Outliers",
div(style = 'overflow-y:scroll;height:5000px;',
dataTableOutput("a_return_outliers")
)
)
)
)
),
# bottom row
fluidRow(
column(6,
tabBox(height = "500px",width = "250px",
tabPanel("Codebook",
div(style = 'overflow-y:scroll;height:400px;',
verbatimTextOutput("aCodebook"))
)
)
),
column(6,
tabBox(height = "500px",width = "250px",
tabPanel("Plot", plotOutput("plot_awigen"))
)
)
)
)
)
)
)
)
|
5a20a78a39321d77c24419238cc6501e2e023d72
|
5f754675e68444db1fa7f077eff5dc6ee622a0be
|
/R/indicators.R
|
50349701633ab31ea3a302f6cec2f70c406208e1
|
[] |
no_license
|
flr/mydas
|
893c3e8f93957109377329682fc99eee4518cbc0
|
726126972e5bca4d6a99ba774f05ea63d60f7b87
|
refs/heads/master
| 2023-06-22T22:43:10.246612
| 2023-06-16T05:07:31
| 2023-06-16T05:07:31
| 221,472,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,028
|
r
|
indicators.R
|
#' @export indicators
#'
indicators<-function(params,
m ="gislason",
f =1,
srDev=FLQuant(rep(1,121)),
fbar =srDev%=%1){
##set up equilibrium object
if ("numeric"%in%is(f))
f=FLPar(f=array(f,c(1,length(f))))
## need to add interactions for f and par
if (dim(params)[2]>1&dim(f)[2]>1){
npar=dim(params)[2]
params=as(mdply(seq(dim(f)[2]), with,
cbind(model.frame(params)))[,-c(1,dim(params)[1]+2)],"FLPar")
f =rep(c(f),each=npar)
f =FLPar(array(f,c(1,length(f))))
}
eql=lhEql(params,m=m)
## convert to FLStock with constant F
eq=eql
fbar(eq)=fbar
mlt=FLPar(f=array(c(f)*c(eq@refpts["msy","harvest"]),
c(1,length(c(f)*c(eq@refpts["msy","harvest"])))))
fbar(eq)=fbar(eq)%*%mlt
stk=fwd(propagate(as(eq,"FLStock"),dim(srDev)[6]),fbar=fbar(eq)[,-1],
sr=eq,residuals=srDev)
## Other stuff
#srr=model.frame(FLQuants(eql,"ssb"=ssb,"rec"=rec),drop=TRUE)
#srp=model.frame(FLQuants("rp"=setPlusGroup(stock.n(eq)[,1]*stock.wt(eq)[,1]*eq@mat[,1],12)),drop=T)
#ts =model.frame(FLQuants(stk,"ssb"=ssb,"biomass"=stock,"rec"=rec,"catch"=catch,
# "dev"=function(x) propagate(srDev,dim(x)[6])),drop=T)
## Summary stats
ind=mydas:::omSmry(stk,eql,params)
refs=model.frame(popdyn(params,eq=lhEql(params,m=m)))
key=cbind(model.frame(params),f=c(f))
list(ind=ind,refs=refs,key=key,
ctn=catch.n(stk),
cln=exp(log(catch.wt(stk)%/%params["a"])%/%params["b"]))
}
#' $L_{max5\%}$ Mean length of largest 5\%
#' $L_{95\%}$ $95^{th}$ 95thpercentile
#' $P_{mega}$ Proportion of individuals above $L_{opt} + 10\%$
#' $L_{25\%}$ $25^{th}$ percentile of length distribution
#' $L_{c}$ Length at $50\%$ of modal abundance
#' $L_{mean}$ Mean length of individuals $> L_c$
#' $L_{max_{y}}$ Length class with maximum biomass in catch
#' $L_{mean}$ Meanlength of individuals $> L$
|
33dfee038cd1836d383f584d9e4f1792868a912b
|
8c266b8d14fc5ee1aead7e69d6cf3bb7f5d84ccf
|
/rfile.R
|
0d3f90c97805360e7177caf2b6a8c5f4895ddfa7
|
[] |
no_license
|
krinya/corona_simulation
|
1536b6440afe110533f60d1f777c90b425fd398d
|
dd6dfeb322fc77912686b0fa4ca16bb945bd8a33
|
refs/heads/master
| 2022-04-27T01:40:31.661332
| 2020-04-24T13:50:49
| 2020-04-24T13:50:49
| 257,967,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,469
|
r
|
rfile.R
|
library(data.table)
library(tidyr)
library(ggplot2)
library(gganimate)
library(ggrepel)
theme_set(theme_bw())
#initial parameters
# Number of days
t_max <- 100
# To create 1000 steps? So 10 for each day? To make the curve more smooth?
dt <- 0.1
#
t <- seq(from = 0, to = t_max, by = dt)
#alpha: infection rate (0-1)
#beta: contact rate (persons)
#gamma: recovery rate (0-1)
#rho: social distancing factor (0 everyone, 1 base situation)
#dr: death rate
alpha = 0.2
beta = 1.75
gamma = 0.5
rho = 0.0
dr <- 0.05
N <- 10000
# Normalized population values
S_0 <- 1-1/N
E_0 <- 1/N
I_0 <- 0
R_0 <- 0
D_0 <- 0
create_data <- function(){
# Create a data.table with t0 values
dataSimulation <- data.table(t = t[1],
S = S_0,
E = E_0,
I = I_0,
R = R_0,
D = D_0)
for (i in t[2:length(t)]){
print(i)
last_S <- dataSimulation[nrow(dataSimulation), S]
last_E <- dataSimulation[nrow(dataSimulation), E]
last_I <- dataSimulation[nrow(dataSimulation), I]
last_R <- dataSimulation[nrow(dataSimulation), R]
last_D <- dataSimulation[nrow(dataSimulation), D]
if(rho == 0){
next_S <- last_S - (beta * last_S * last_I) * dt
next_E <- last_E + (beta * last_S * last_I - alpha * last_E) * dt
} else {
next_S <- last_S - (rho * beta * last_S * last_I) * dt
next_E <- last_E + (rho * beta * last_S * last_I - alpha*last_E) * dt
}
next_I <- last_I + (alpha * last_E - gamma * last_I - dr * last_I) * dt
next_R <- last_R + (gamma * last_I) * dt
next_D <- last_D + (dr * last_I) * dt
insertRow <- data.table(t = i, S = next_S, E = next_E, I = next_I, R = next_R, D = next_D)
l = list(dataSimulation, insertRow)
dataSimulation <- rbindlist(l, fill = T)
#check
dataSimulation[, population:= S + E + I + R + D]
}
return(dataSimulation)
}
###
dataSimulation <- create_data()
### Plot values
longData <- gather(dataSimulation, type, value, S:population, factor_key=TRUE)
setDT(longData)
ggplot(longData, aes(x = t, y = value)) +
geom_line(aes(color = type), size = 1) +
geom_text_repel(data = longData[t == 50], aes(x = t , y = value ,label = round(value, 2)))
ggplot(longData, aes(x = t, y = value)) +
geom_line(aes(color = type), size = 1) +
transition_reveal(t)
|
ced375987fea71ad7b0d25639963f652c6da6c3c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GSAQ/examples/totqtlhit.Rd.R
|
6f54a2ea2e1811fd8843e0445cecf119c967e193
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
totqtlhit.Rd.R
|
library(GSAQ)
### Name: totqtlhit
### Title: Computation of total number of qtl-hits found in the whole gene
### space
### Aliases: totqtlhit
### Keywords: genelist qtl qtlhit
### ** Examples
data(genelist)
data(qtl_salt)
genelist=as.data.frame(genelist)
qtl=as.data.frame(qtl_salt)
totqtlhit(genelist, qtl)
|
3a7adfad8ca22395e7553501ff14aa91a4c81337
|
dcd35737536a86172791f29ff762c965af6a5262
|
/helper-functions/Scatterplot_Function.R
|
3d6ee5f0827a23a4b8c02a849c9609a2e16889bb
|
[] |
no_license
|
jakepscott/Monash_Spotify_Dashboard
|
6e119a42862646e4acf2fb3b426b5f055b392179
|
886d2f13f7a609ec47b326989fa3577e64babc71
|
refs/heads/master
| 2023-07-30T21:43:38.741580
| 2021-09-23T02:16:52
| 2021-09-23T02:16:52
| 373,667,176
| 0
| 0
| null | 2021-06-12T01:34:07
| 2021-06-03T23:23:15
|
R
|
UTF-8
|
R
| false
| false
| 10,548
|
r
|
Scatterplot_Function.R
|
scatterplot_function <- function(main_variable, comparison_variable, playlist_or_track, data){
if (playlist_or_track=="playlist") {
# Set labels --------------------------------------------------------------
#If variable=="minutes", use the custom made "duration_label", otherwise use a generic label
data_scatter <- data %>%
rowwise() %>%
mutate(x_label=case_when(comparison_variable=="minutes" ~ duration_label,
comparison_variable=="Date_Song_Saved" ~ Song_Saved_Label,
comparison_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(comparison_variable))),
y_label=case_when(main_variable=="minutes"~duration_label,
main_variable=="Date_Song_Saved" ~ Song_Saved_Label,
main_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(main_variable))),
label=glue("Playlist: {str_to_title(playlist_name)} \nMedian {str_to_title(str_replace_all(main_variable, '_', ' '))}: {y_label} \nMedian {str_to_title(str_replace_all(comparison_variable, '_', ' '))}: {x_label}"))
# PLot --------------------------------------------------------------------
data_scatter %>%
filter(!!as.symbol(main_variable)!=0) %>%
filter(!!as.symbol(comparison_variable)!=0) %>%
ggplot(aes(!!as.symbol(comparison_variable),!!as.symbol(main_variable))) +
geom_vline(xintercept = (data %>% pull(comparison_variable) %>% median(na.rm = T))) +
geom_hline(yintercept = (data %>% pull(main_variable) %>% median(na.rm = T))) +
geom_point_interactive(color="grey",
alpha = 0.5,
aes(tooltip=label,
data_id=playlist_id)) +
geom_smooth(se=F,
color="#1DB954") +
labs(title = glue("Median *{str_to_title(str_replace_all(main_variable,'_', ' '))}* Versus Median *{str_to_title(str_replace_all(comparison_variable, '_', ' '))}*"),
subtitle = "By playlist",
x=glue("Median {str_to_title(str_replace_all(comparison_variable,'_', ' '))}"),
y=glue("Median {str_to_title(str_replace_all(main_variable,'_', ' '))}")) +
theme(plot.title.position = "plot",
plot.title = element_markdown(size=rel(1.25)),
plot.subtitle = element_text(color = "grey30"))
# If the data is all the tracks: ------------------------------------------
} else if (playlist_or_track=="track") {
if (!is.null(data$playlist_name)) {
#If it is more than one playlist and each track within those playlists plotted as color coded points
# highlight every song in a a plylist
if (length(unique(data$playlist_name))>1) {
# Set labels --------------------------------------------------------------
#If variable=="minutes", use the custom made "duration_label", otherwise use a generic label
data_scatter <- data %>%
rowwise() %>%
mutate(x_label=case_when(comparison_variable=="minutes" ~ duration_label,
comparison_variable=="Date_Song_Saved" ~ Song_Saved_Label,
comparison_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(comparison_variable))),
y_label=case_when(main_variable=="minutes"~duration_label,
main_variable=="Date_Song_Saved" ~ Song_Saved_Label,
main_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(main_variable))),
label=glue("Track: {str_to_title(track_name)} \nPlaylist: {playlist_name} \n{str_to_title(str_replace_all(main_variable, '_', ' '))}: {y_label} \n{str_to_title(str_replace_all(comparison_variable, '_', ' '))}: {x_label} \n Album: {track_album_name} \n Artist: {artist_name}"))
# PLot --------------------------------------------------------------------
data_scatter %>%
filter(!!as.symbol(main_variable)!=0) %>%
filter(!!as.symbol(comparison_variable)!=0) %>%
ggplot(aes(!!as.symbol(comparison_variable),!!as.symbol(main_variable))) +
geom_vline(xintercept = (data %>% pull(comparison_variable) %>% median(na.rm = T))) +
geom_hline(yintercept = (data %>% pull(main_variable) %>% median(na.rm = T))) +
geom_point_interactive(alpha = 0.5,
show.legend=T,
aes(tooltip=label,
data_id=playlist_id,
color=playlist_name)) +
# geom_smooth(se=F,
# aes(color=playlist_name),
# show.legend = F) +
labs(title = glue("*{str_to_title(str_replace_all(main_variable,'_', ' '))}* versus *{str_to_title(str_replace_all(comparison_variable, '_', ' '))}*"),
x=str_to_title(str_replace_all(comparison_variable,"_", " ")),
y=str_to_title(str_replace_all(main_variable,"_", " ")),
color=NULL) +
theme(plot.title.position = "plot",
plot.title = element_markdown(size=rel(1.5)))
} else {
# Set labels --------------------------------------------------------------
#If variable=="minutes", use the custom made "duration_label", otherwise use a generic label
data_scatter <- data %>%
rowwise() %>%
mutate(x_label=case_when(comparison_variable=="minutes" ~ duration_label,
comparison_variable=="Date_Song_Saved" ~ Song_Saved_Label,
comparison_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(comparison_variable))),
y_label=case_when(main_variable=="minutes"~duration_label,
main_variable=="Date_Song_Saved" ~ Song_Saved_Label,
main_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(main_variable))),
label=glue("Track: {str_to_title(track_name)} \n{str_to_title(str_replace_all(main_variable, '_', ' '))}: {y_label} \n{str_to_title(str_replace_all(comparison_variable, '_', ' '))}: {x_label} \n Album: {track_album_name} \n Artist: {artist_name}"))
# PLot --------------------------------------------------------------------
data_scatter %>%
filter(!!as.symbol(main_variable)!=0) %>%
filter(!!as.symbol(comparison_variable)!=0) %>%
ggplot(aes(!!as.symbol(comparison_variable),!!as.symbol(main_variable))) +
geom_vline(xintercept = (data %>% pull(comparison_variable) %>% median(na.rm = T))) +
geom_hline(yintercept = (data %>% pull(main_variable) %>% median(na.rm = T))) +
geom_point_interactive(color="grey",
alpha = 0.5,
aes(tooltip=label,
data_id=track_id)) +
geom_smooth(se=F,
color="#1DB954") +
labs(title = glue("*{str_to_title(str_replace_all(main_variable,'_', ' '))}* versus *{str_to_title(str_replace_all(comparison_variable, '_', ' '))}*"),
x=str_to_title(str_replace_all(comparison_variable,"_", " ")),
y=str_to_title(str_replace_all(main_variable,"_", " "))) +
theme(plot.title.position = "plot",
plot.title = element_markdown(size=rel(1.5)))
}
} else {
# Set labels --------------------------------------------------------------
#If variable=="minutes", use the custom made "duration_label", otherwise use a generic label
data_scatter <- data %>%
rowwise() %>%
mutate(x_label=case_when(comparison_variable=="minutes" ~ duration_label,
comparison_variable=="Date_Song_Saved" ~ Song_Saved_Label,
comparison_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(comparison_variable))),
y_label=case_when(main_variable=="minutes"~duration_label,
main_variable=="Date_Song_Saved" ~ Song_Saved_Label,
main_variable=="Track_Release_Date" ~ Track_Release_Label,
TRUE ~ as.character(!!as.symbol(main_variable))),
label=glue("Track: {str_to_title(track_name)} \n{str_to_title(str_replace_all(main_variable, '_', ' '))}: {y_label} \n{str_to_title(str_replace_all(comparison_variable, '_', ' '))}: {x_label} \n Album: {track_album_name} \n Artist: {artist_name}"))
# PLot --------------------------------------------------------------------
data_scatter %>%
filter(!!as.symbol(main_variable)!=0) %>%
filter(!!as.symbol(comparison_variable)!=0) %>%
ggplot(aes(!!as.symbol(comparison_variable),!!as.symbol(main_variable))) +
geom_vline(xintercept = (data %>% pull(comparison_variable) %>% median(na.rm = T))) +
geom_hline(yintercept = (data %>% pull(main_variable) %>% median(na.rm = T))) +
geom_point_interactive(color="grey",
alpha = 0.5,
aes(tooltip=label,
data_id=track_id)) +
geom_smooth(se=F,
color="#1DB954") +
labs(title = glue("*{str_to_title(str_replace_all(main_variable,'_', ' '))}* versus *{str_to_title(str_replace_all(comparison_variable, '_', ' '))}*"),
x=str_to_title(str_replace_all(comparison_variable,"_", " ")),
y=str_to_title(str_replace_all(main_variable,"_", " "))) +
theme(plot.title.position = "plot",
plot.title = element_markdown(size=rel(1.5)))
}
}
}
# scatterplot_function(main_variable = "energy",
# comparison_variable = "valence",
# playlist_or_track = "track",
# data=playlist_tracks)
|
1492805c372c63dba9c074f5a6f30267cdd017d4
|
07c6be5bb6a2cfa8f5736c74d62d3e32f2231207
|
/1. Basic syntax R/string.R
|
c5a90158ba9e7fbf5862be6bd8f66f527c628482
|
[] |
no_license
|
samurainote/R-Basics
|
5e34486f4ba5676517dd8a69e8b26d2101fa8d13
|
3f2ab15e2a1081efa030f6fd11eaef4693a8fb30
|
refs/heads/master
| 2020-04-21T20:38:02.596505
| 2019-02-09T09:52:14
| 2019-02-09T09:52:14
| 169,852,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 540
|
r
|
string.R
|
#text data
a<-"Batman"
b<-"Bat-man"
c<-"Bat/man"
substr(a,start=2,stop=5)
nchar()
tolower()
toupper()
strsplit(c,split="/")
paste(b,split=c)
grep("-",b)
grepl()
sub()
gsub()
datay<-mtcars
names(datay)
head(datay)
mfc=rownames(mtcars)
head(mfc)
sapply(mname,"[",1)
#test
reve=c("M 3.2","B 2.6","M 6.7","B 2.4")
class(reve)
data=data.frame(reve)
data
index=which(grepl("M ", data$reve))
data$reve<-gsub("M ","",data$reve)
data$reve<-gsub("B ","",data$reve)
data$reve<-as.numeric(data$reve)
data$reve[index]=data$reve[index]/1000
data
|
739714feafa0b0c22b278ba0d0da815a0d7a8379
|
dbfe5ce272e204a8e1663ced35c9d48ef4870496
|
/man/str_to_han.Rd
|
a0a364391cb52e9737d1a38a559cb31130771a38
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hmito/hmRLib
|
fac91a4e2ddfcd899283ec0b63c87c31965fb17f
|
f2cfd54ea491ee79d64f7dd976a94086092b8ef5
|
refs/heads/master
| 2023-08-31T07:21:31.825394
| 2023-08-28T10:02:07
| 2023-08-28T10:02:07
| 41,907,654
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 357
|
rd
|
str_to_han.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/character.R
\name{str_to_han}
\alias{str_to_han}
\title{Replace zenkaku alphabet, number and symbols by hankaku}
\usage{
str_to_han(x)
}
\arguments{
\item{x}{target character}
}
\value{
replaced character
}
\description{
Replace zenkaku alphabet, number and symbols by hankaku
}
|
58ab8e46d341f2f10189936fb6e84306ad66ed3c
|
eedeaa3b876cf388f027f727a900346da1b52d34
|
/man/col_hasHistogramValue.Rd
|
1d9d8a0ec31fdbc51370b1bbda9a90612030ab0f
|
[
"MIT"
] |
permissive
|
EvgenyPetrovsky/deeque
|
e59b32a2fc0ea89f235976e337e564be566b4f60
|
8c0efdb102169abf4004e104ba4977ef735bbdc3
|
refs/heads/master
| 2020-08-26T15:53:38.697404
| 2020-01-21T18:43:56
| 2020-01-21T18:43:56
| 217,062,624
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 566
|
rd
|
col_hasHistogramValue.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/validation-functions-column.R
\name{col_hasHistogramValue}
\alias{col_hasHistogramValue}
\title{Has histogram value}
\usage{
col_hasHistogramValue(data, column, udf, ...)
}
\arguments{
\item{data}{dataframe to check}
\item{column}{column name to check}
\item{udf}{user-defined function to apply to histogram value found}
\item{...}{other parameters that are passed to \code{hist} function}
}
\description{
Function to build histogram object and examine it with user-defined function
}
|
09472ced4e04775f99a46a281fc7eee63eb48865
|
d5a2871028d84a02de0271633c7b0997ab8541fb
|
/man/get_social.Rd
|
372b8e8d31d305bbb15cb072a0baef6e802cfc5f
|
[
"MIT"
] |
permissive
|
ThomasPepperz/cryptor
|
5954e411b5cdbab34be75b1021df20f50495de85
|
3331f57e761825948dbaf80b9ec13d4b04fdf147
|
refs/heads/master
| 2020-07-31T17:06:20.689582
| 2019-09-24T19:55:48
| 2019-09-24T19:55:48
| 210,685,889
| 0
| 0
| null | 2019-09-24T19:52:26
| 2019-09-24T19:52:26
| null |
UTF-8
|
R
| false
| true
| 1,427
|
rd
|
get_social.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/public_api.R
\name{get_social}
\alias{get_social}
\title{Gets social data for a given coin id.}
\usage{
get_social(id)
}
\arguments{
\item{id}{integer. Coin id. Can be retreived using \code{\link{get_coins}}.}
}
\value{
A list with the following structure:
\itemize{
\item \strong{similar_items} A tibble outlining coins similar to the coin provided.
\item \strong{cryptopian_followers} A tibble containing details about followers of
the provided coin on cryptocompare.
\item \strong{page_views} A tibble outlining number of page views for the coin on
different pages.
\item \strong{crypto_compare_summary} Aggergated cryptocompare data for the given coin.
\item \strong{social_media} A list containing the following three elements:
\itemize{
\item \strong{Twitter} A tibble containing Twitter data.
\item \strong{Reddit} A tibble containing Reddit data.
\item \strong{Facebook} A tibble containing Facebook data.
}
\item \strong{repo_summary} A tibble with details about various code repositories
associated with the given coin.
}
}
\description{
\code{get_social} returns a list containing several tibbles that provide
social details for the specified coin.
}
\examples{
\dontrun{
# Get bitcoin social data
get_social(1182)
}
}
\references{
\url{https://www.cryptocompare.com/api#-api-data-socialstats-}
}
|
6eae6262e54548c6e0187d492ed4e647e02623bb
|
79b935ef556d5b9748b69690275d929503a90cf6
|
/man/Penttinen.Rd
|
a41ad34e6b799dc0cfdc0ff9f5f2fc449f2824c6
|
[] |
no_license
|
spatstat/spatstat.core
|
d0b94ed4f86a10fb0c9893b2d6d497183ece5708
|
6c80ceb9572d03f9046bc95c02d0ad53b6ff7f70
|
refs/heads/master
| 2022-06-26T21:58:46.194519
| 2022-05-24T05:37:16
| 2022-05-24T05:37:16
| 77,811,657
| 6
| 10
| null | 2022-03-09T02:53:21
| 2017-01-02T04:54:22
|
R
|
UTF-8
|
R
| false
| false
| 2,688
|
rd
|
Penttinen.Rd
|
\name{Penttinen}
\alias{Penttinen}
\title{Penttinen Interaction}
\description{
Creates an instance of the Penttinen pairwise interaction
point process model, which can then be fitted to point pattern data.
}
\usage{
Penttinen(r)
}
\arguments{
\item{r}{circle radius}
}
\value{
An object of class \code{"interact"}
describing the interpoint interaction
structure of a point process.
}
\details{
Penttinen (1984, Example 2.1, page 18), citing Cormack (1979),
described the pairwise interaction point process with interaction factor
\deqn{
h(d) = e^{\theta A(d)} = \gamma^{A(d)}
}{
h(d) = exp(theta * A(d)) = gamma^(A(d))
}
between each pair of points separated by a distance $d$.
Here \eqn{A(d)} is the area of intersection between two discs
of radius \eqn{r} separated by a distance \eqn{d}, normalised so that
\eqn{A(0) = 1}.
The scale of interaction is controlled by the disc radius \eqn{r}:
two points interact if they are closer than \eqn{2 r}{2 * r} apart.
The strength of interaction is controlled by the
canonical parameter \eqn{\theta}{theta}, which
must be less than or equal to zero, or equivalently by the
parameter \eqn{\gamma = e^\theta}{gamma = exp(theta)},
which must lie between 0 and 1.
The potential is inhibitory, i.e.\ this model is only appropriate for
regular point patterns.
For \eqn{\gamma=0}{gamma=0} the model is
a hard core process with hard core diameter \eqn{2 r}{2 * r}.
For \eqn{\gamma=1}{gamma=1} the model is a Poisson process.
The irregular parameter
\eqn{r} must be given in the call to
\code{Penttinen}, while the
regular parameter \eqn{\theta}{theta} will be estimated.
This model can be considered as a pairwise approximation
to the area-interaction model \code{\link{AreaInter}}.
}
\seealso{
\code{\link{ppm}},
\code{\link{ppm.object}},
\code{\link{Pairwise}},
\code{\link{AreaInter}}.
}
\examples{
fit <- ppm(cells ~ 1, Penttinen(0.07))
fit
reach(fit) # interaction range is circle DIAMETER
}
\references{
Cormack, R.M. (1979)
Spatial aspects of competition between individuals.
Pages 151--212 in \emph{Spatial and Temporal Analysis in Ecology},
eds. R.M. Cormack and J.K. Ord, International Co-operative
Publishing House, Fairland, MD, USA.
Penttinen, A. (1984)
\emph{Modelling Interaction in Spatial Point Patterns:
Parameter Estimation by the Maximum Likelihood Method.}
\ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}
Studies in Computer Science, Economics and Statistics \bold{7},
University of \ifelse{latex}{\out{Jyv\"askyl\"a}}{Jyvaskyla}, Finland.
}
\author{
\spatstatAuthors
}
\keyword{spatial}
\keyword{models}
|
4499f64b7d386d72be87e170c0556433d320db4b
|
6e64a7abaec7bb14df5cd61b3700bfe070929535
|
/Project/Dashboard_Trail_Connection.R
|
c79ba7f8581f01102125a0092ff6228d24876221
|
[
"MIT"
] |
permissive
|
Ellie190/Database_Systems_Tutor
|
2748b9dfb88807b2793d780c94fa47ab498d3c28
|
66194a7b9c1cb1b4c1e0369dad02436a90f829d3
|
refs/heads/master
| 2023-01-08T00:14:32.964396
| 2020-11-05T16:43:55
| 2020-11-05T16:43:55
| 297,327,190
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
Dashboard_Trail_Connection.R
|
## Database Server connection
library(RMySQL)
con <- dbConnect(RMySQL::MySQL(),
dbname = "employees",
host = "localhost",
port = 3306,
user = "CTO",
password = "PiE2mo@RdressZ99")
|
a6313e572689097d147c986e45b9f05a7ab44d2b
|
a0f1ae3847220088493821bf7eaf71aeb95e6453
|
/man/rankingPlot.Rd
|
60e78e3ecdf416189ae93960cd5e3b96a4d6533a
|
[
"BSD-3-Clause"
] |
permissive
|
kuhsibiris/MIDEPlots
|
c93e1b3e9bd1c0673f77fdd60177843d14eb4cb4
|
e3fa587fe064c017958b6ba501c6f644686d7ef4
|
refs/heads/master
| 2021-01-19T22:15:25.293591
| 2017-04-20T16:02:33
| 2017-04-20T16:02:33
| 88,784,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,043
|
rd
|
rankingPlot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/horizontal plot.R
\name{rankingPlot}
\alias{rankingPlot}
\title{Ranking Plot}
\usage{
rankingPlot(data, name, num, limits, colores, n, paleta)
}
\arguments{
\item{data}{a dataframe with the info}
\item{name}{the name of a column to be used as the names of the elements}
\item{num}{the name of the numeric variable to be compared}
\item{limits}{range of the plot should be in the format c(min,max)}
\item{colores}{name of a column giving information of the color of each point}
\item{n}{show the top n cases}
\item{paleta}{colors to be used in the points}
}
\value{
a ggplot graph
}
\description{
This function makes a plot that is intented to be used for
comparison of the scores in a single variable
}
\details{
This function takes a dataframe and two columns, one of the name
and the other of a numeric attribute, this function orders the
data and makes a horizontal point plot that makes easy compare
the results between units.
}
\author{
Andrés Mejía
}
|
1883d8e0bd6316ab970d09e1a27b90ca1b675596
|
ea49e39c5ab2913fc372e3d0bc3292e4ae235d69
|
/R/utils.R
|
959a7b38f52f9f854eb3b0b3dbaf6d5dcb49f32a
|
[] |
no_license
|
zerononee/flexdashboard
|
5ba1af4fce665ede679e0b463406dbde7059d328
|
bb9bd147faa94022cfee1aa07cf7abc7f56922fa
|
refs/heads/master
| 2023-08-13T04:44:07.692026
| 2021-10-14T14:27:59
| 2021-10-14T14:27:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
utils.R
|
# return a string as a tempfile
as_tmpfile <- function(str) {
if (length(str) > 0) {
str <- enc2utf8(str)
str_tmpfile <- tempfile("rmarkdown-str", fileext = ".html")
con <- file(str_tmpfile, open = "w+", encoding = "native.enc")
writeLines(str, con = con, useBytes = TRUE)
close(con)
str_tmpfile
} else {
NULL
}
}
# A variant of relative_to that normalizes its inputs.
normalized_relative_to <- function(dir, file) {
rmarkdown::relative_to(
normalizePath(dir, winslash = "/", mustWork = FALSE),
normalizePath(file, winslash = "/", mustWork = FALSE))
}
# devel mode
knit_devel <- function(input, ...) {
rmarkdown::render(input,
output_options = list(devel = TRUE),
quiet = TRUE)
}
is_accent_color <- function(x) {
stopifnot(length(x) == 1)
x %in% accent_colors()
}
accent_colors <- function() {
c("primary", "info", "success", "warning", "danger")
}
"%||%" <- function(x, y) {
if (is.null(x)) y else x
}
dropNulls <- function(x) {
x[!vapply(x, is.null, FUN.VALUE = logical(1))]
}
is_available <- function(package, version = NULL) {
installed <- nzchar(system.file(package = package))
if (is.null(version)) {
return(installed)
}
installed && isTRUE(utils::packageVersion(package) >= version)
}
|
096a206fbb324f8995b1fb8d7f0f0129441704ef
|
92f1abd9a4a7383ddf41b089f5aea6b8caf43b03
|
/man/propr.Rd
|
d19774c8c38eba80d31e14cf7a9688f368dcfdef
|
[] |
no_license
|
samleenz/propr
|
29cdf5cd5b9da62416206b3d6605d392e1df695e
|
922b8a01bc009aa1f469138e9c72d79033ddb443
|
refs/heads/master
| 2021-08-23T13:35:29.566988
| 2017-11-29T08:06:01
| 2017-11-29T08:06:01
| 111,872,374
| 0
| 0
| null | 2017-11-24T03:34:21
| 2017-11-24T03:34:21
| null |
UTF-8
|
R
| false
| true
| 3,164
|
rd
|
propr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\docType{class}
\name{propr}
\alias{propr}
\alias{propr-class}
\alias{show,propr-method}
\alias{subset,propr-method}
\alias{[,propr,ANY,ANY,ANY-method}
\alias{[,propr-method}
\alias{plot,propr,missing-method}
\alias{simplify}
\alias{adjacent}
\title{The propr Package}
\usage{
\S4method{show}{propr}(object)
\S4method{subset}{propr}(x, subset, select)
\S4method{[}{propr,ANY,ANY,ANY}(x, i = "all", j, tiny = FALSE)
\S4method{plot}{propr,missing}(x, y, prompt = TRUE, plotly = FALSE)
simplify(object)
adjacent(object)
}
\arguments{
\item{object, x}{An object of class \code{propr}.}
\item{subset}{Subsets via \code{object@counts[subset, ]}.
Use this argument to rearrange subject order.}
\item{select}{Subsets via \code{object@counts[, select]}.
Use this argument to rearrange feature order.}
\item{i}{Operation used for the subset indexing. Select from
"==", "=", ">", ">=", "<", "<=", "!=", or "all".}
\item{j}{Reference used for the subset indexing. Provide a numeric
value to which to compare the proportionality measures in the
\code{@matrix} slot.}
\item{tiny}{A logical scalar. Toggles whether to pass the indexed
result through \code{\link{simplify}}.}
\item{y}{Missing. Ignore. Leftover from the generic
method definition.}
\item{prompt}{A logical scalar. Set to \code{FALSE} to disable
the courtesy prompt when working with big data.}
\item{plotly}{A logical scalar. Set to \code{TRUE} to produce
a dynamic plot using the \code{plotly} package.}
}
\description{
Welcome to the \code{propr} package!
To learn more about calculating proportionality, see
\code{\link{proportionality}}.
To learn more about visualizing proportionality, see
\code{\link{visualize}}.
To learn more about \code{ALDEx2} package integration, see
\code{\link{aldex2propr}}.
To learn more about differential proportionality, see
\code{\link{propd}}.
To learn more about compositional data analysis, and its relevance
to biological count data, see the bundled vignette.
}
\section{Slots}{
\describe{
\item{\code{counts}}{A matrix. Stores the original "count matrix" input.}
\item{\code{logratio}}{A matrix. Stores the log-ratio transformed "count matrix".}
\item{\code{matrix}}{A matrix. Stores the proportionality matrix calculated by
\code{phiRcpp} or \code{rhoRcpp}.}
\item{\code{pairs}}{A vector. Indexes the proportionality metrics of interest.}
}}
\section{Methods (by generic)}{
\code{show:} Method to show \code{propr} object.
\code{subset:} Method to subset \code{propr} object.
\code{[:} Method to subset \code{propr} object.
\code{plot:} Method to plot \code{propr} object.
}
\section{Functions}{
\code{simplify:}
This convenience function takes an indexed \code{propr} object
and subsets the object based on that index. Then, it populates the
\code{@pairs} slot of the new object with an updated version
of the original index. You can call \code{simplify} from within the
\code{[} method using the argument \code{tiny}.
\code{adjacent:}
This function uses pairs indexed in the \code{@pairs}
slot to build a symmetric adjacency matrix.
}
|
7a4d8373a777658eb11dd29b5ce733832fad2a54
|
66d51b94552394adc66c9b07bac89eb90eed5e03
|
/ic.R
|
10aed9968f8a27c46b9456b347bfd800ab9763b9
|
[] |
no_license
|
gomesfellipe/functions
|
4f83083c4b97ae5c638e2b68f773bb8e6f9f96b8
|
656b167ce08d67bdb572f063bda9e1814e6988e3
|
refs/heads/master
| 2021-05-05T21:39:05.277794
| 2021-01-15T03:21:53
| 2021-01-15T03:21:53
| 115,950,878
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
r
|
ic.R
|
# Fonte: http://www.leg.ufpr.br/Rpira/Rpira/node11.html
# Calcula o interalo de confianca para a média
ic.m <- function(x, conf = 0.95){
n <- length(x)
media <- mean(x)
variancia <- var(x)
quantis <- qt(c((1-conf)/2, 1 - (1-conf)/2), df = n-1)
ic <- media + quantis * sqrt(variancia/n)
return(ic)
|
aedec1449df205222a0d092aaedde94ce5502c63
|
5aa23d590a93494d76b1267e7c15457fb76be061
|
/scripts/data_analysis/append_stats_dfrm.r
|
3369a870369e1b592ec98134ad47d233b500d069
|
[] |
no_license
|
palakpsheth/Trovagene
|
4d3625ede95e887c83591480bd1c9d46c25658f8
|
bcc19e325283a5b429f74df4467b5570d1e67e97
|
refs/heads/master
| 2021-01-22T20:26:26.877313
| 2017-04-05T04:29:41
| 2017-04-05T04:29:41
| 85,323,181
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 535
|
r
|
append_stats_dfrm.r
|
# appends a row to stats_dfrm
append_stats_dfrm <- function(stats_dfrm,runid,flag,assay,input,source,std_group,type,metric,comments,status=NA) {
stats_dfrm <- rbind(stats_dfrm,
data.frame( RunId=runid,
Tool=unique(stats_dfrm$Tool), # NOTE: the field 'Tool' needs to be unique!
FLAG=flag,
Assay=assay,
Input=input,
Source=source,
StandardGroup=std_group,
Type=type,
Metric=metric,
Comments=comments,
Status=status))
return(stats_dfrm)
}
|
2e53f4b51b66067da9a27f25e7c56368f6afe82d
|
6f339891e5f97af487b2dbde35e8b79699d831ac
|
/man/Loc.Rd
|
ae359711dc87d620ee608644abd06c45297388e8
|
[] |
no_license
|
cran/gstar
|
74558eadaf182aadbf2b806630a0a24641815877
|
1b368845090cbcc2be906f30569409e3bf77b887
|
refs/heads/master
| 2020-12-22T00:05:46.420741
| 2019-06-28T14:10:06
| 2019-06-28T14:10:06
| 236,609,217
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 560
|
rd
|
Loc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Loc}
\alias{Loc}
\title{Coordinate of several region In Indonesia}
\format{A data frame with 4 rows and 3 variables:
\describe{
\item{City}{Name of region/city}
\item{latitude}{The latitude coordinate of each location}
\item{longitude}{The longitude coordinate of each location}
}}
\usage{
data(Loc)
}
\description{
A dataset containing the coordinate several region In Indonesia i.e Semarang, Surakarta, Tegal and Purwokerto.
}
\keyword{datasets}
|
1a76118dbb7bdc72176eadb4b5c6286cdaa9507e
|
fced4b5a08001c0a186c49a1bcc60031349521a1
|
/vignettes/figure1.R
|
a6340f699b7b1a77ee6857d9dc6a6370bdf38763
|
[] |
no_license
|
adimajo/scoringTools
|
470577a9adafced24fc364264bb298c31d49a49e
|
2bc2c29b0ecebecaf1b5a69f4a515d0e833111a7
|
refs/heads/master
| 2023-02-13T03:37:41.735293
| 2021-01-10T14:42:41
| 2021-01-10T14:42:41
| 84,586,749
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,586
|
r
|
figure1.R
|
## ----libraries----------------------------------------------------------------
library(MASS)
library(scoringTools)
# library(tikzDevice)
## ----mean_vectors-------------------------------------------------------------
mu0 <- array(0, c(1, 1))
mu1 <- array(1, c(1, 1))
sigma0 <- diag(1)
sigma1 <- diag(1)
## ----data_generation----------------------------------------------------------
m <- 10000
set.seed(21)
y <- rbinom(m, 1, 0.5)
data_test_pred <- array(0, c(m, 2))
for (n in 1:1) {
x <- array(0, c(m, n))
x[y == 0, ] <- mvrnorm(n = sum(y == 0), mu0[1:n], sigma0[1:n, 1:n])
x[y == 1, ] <- mvrnorm(n = sum(y == 1), mu1[1:n], sigma1[1:n, 1:n])
data_test_pred[, 1:(n + 1)] <- as.matrix(cbind.data.frame(y = y, x = x))
rm(x)
}
rm(y)
## ----oracle-------------------------------------------------------------------
oracle <- glm(V1 ~ V2, data = as.data.frame(data_test_pred), family = binomial(link = "logit"))
## ----reject-------------------------------------------------------------------
reject <- predict(oracle, as.data.frame(data_test_pred), type = "response") < 0.3
## ----reclassification---------------------------------------------------------
reclass_model <- reclassification(
as.matrix(data_test_pred[!reject, 2]),
as.matrix(data_test_pred[reject, 2]),
data_test_pred[!reject, 1]
)
## ----plot---------------------------------------------------------------------
# path_to_tex <- file.path(dirname(rstudioapi::getActiveDocumentContext()$path), "../TEX_CODE/biais_CEM.tex")
# tikz(file = path_to_tex, width = 6, height = 3, engine = "pdftex", pointsize = 14)
plot(x = data_test_pred[, 2], y = predict(reclass_model@financed_model, data.frame(x = data_test_pred[, 2]), type = "response"), type = "n", ylab = "$p(1 \\mid x)$", xlab = "Feature $x$", xlim = c(-2, 4))
lines(x = data_test_pred[order(data_test_pred[, 2]), 2], y = predict(oracle, data.frame(V2 = data_test_pred[order(data_test_pred[, 2]), 2]), type = "response"), col = "blue", lwd = 5, lty = 2)
lines(x = data_test_pred[order(data_test_pred[, 2]), 2], y = predict(reclass_model@financed_model, data.frame(x = data_test_pred[order(data_test_pred[, 2]), 2]), type = "response"), col = "orange", lwd = 5, lty = 3)
lines(x = data_test_pred[order(data_test_pred[, 2]), 2], y = predict(reclass_model@infered_model, data.frame(x = data_test_pred[order(data_test_pred[, 2]), 2]), type = "response"), col = "red", lwd = 3.5)
legend(-2, 1,
pch = c(-1, -1, -1), lty = c(2, 3, 1), lwd = (2.5),
col = c("blue", "orange", "red"),
legend = c("Oracle", "Financed", "CEM"),
cex = 1
)
# dev.off()
|
8480939a0d9578379ffa8ac4ad25a22d2a89957b
|
b85da7484aa060f8a58e2370add65150464bf3bb
|
/ui.R
|
22963a87909e246925df8707b10fe74008785241
|
[
"Apache-2.0"
] |
permissive
|
jbpost2/DeltaMethod
|
53a18877ee1ff5f6146e331811ae832ec5132c54
|
81188360817f6d0f7a11748e4d4ccc98999ddbd9
|
refs/heads/master
| 2021-01-17T22:57:43.473447
| 2017-03-24T04:18:51
| 2017-03-24T04:18:51
| 84,207,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,505
|
r
|
ui.R
|
###########################################################################
##R Shiny App to investigate the Delta method
##Justin Post - 2017
###########################################################################
#Load package
library(shiny)
library(shinydashboard)
dashboardPage(skin="red",
dashboardHeader(title="Comparison of First and Second Order Delta Method",titleWidth=750),
#define sidebar items
dashboardSidebar(sidebarMenu(
menuItem("About", tabName = "about", icon = icon("archive")),
menuItem("Application", tabName = "app", icon = icon("laptop"))
)),
#define the body of the app
dashboardBody(
tabItems(
# First tab content
tabItem(tabName = "about",
fluidRow(
#add in latex functionality if needed
withMathJax(),
#two columns for each of the two items
column(6,
#Description of App
h1("What does this app do?"),
#box to contain description
box(background="red",width=12,
h4("This applet allows for the visualization of the first and second order delta method approximations of a function."),
h4("For this applet the Random Variable, X, is assumed to follow a Gamma distribution. The function to be approximated is Y=1/X."),
h4("The exact distribution of this function can be derived to follow the Inverse Gamma distribution."),
h4("The true mean of this distribution is compared to the first order and second order Delta method approximations and a measure of the error in approximation is given."),
h4("The mean of the random variable X is given by $$E(X)=\\alpha/\\lambda$$ and the variance is given by $$Var(X)=\\alpha/\\lambda^2$$"),
h4("The exact mean of \\(Y=1/X\\) is given by $$E(Y)=\\lambda/(\\alpha-1) \\mbox{ for }\\alpha>1 \\mbox{ (undefined otherwise)}$$"),
h4("The first order approximation is given by $$E(Y)\\approx g(\\mu_X)=1/\\mu_X = \\lambda/\\alpha$$"),
h4("The second order approximation is given by $$E(Y)\\approx g(\\mu_X)+\\frac{1}{2}g''(\\mu_X)\\sigma^2_X = \\lambda(\\alpha+1)/\\alpha^2$$")
)
),
column(6,
#How to use the app
h1("How to use the app?"),
#box to contain description
box(background="red",width=12,
h4("The controls for the app are located to the left, the visualization appears in the middle, and the approximation information is given on the right."),
h4("The parameters of the Gamma random variable can be adjusted using the boxes on the left."),
h4("The graph in the middle displays this distribution, the function \\(y=\\frac{1}{x}\\), and the first and second order Taylor polynomials about the mean of the Gamma distribution."),
h4("The box on the right displays the true mean of the distribution of \\(Y=1/X\\) and the approximations.")
)
)
)
),
#actual app layout
tabItem(tabName = "app",
fluidRow(
column(3,
box(width=12,title="Parameters of the Gamma distribution",
background="red",solidHeader=TRUE,
h5("(Set to 1 if blank.)"),
numericInput("alpha",label=h5("Alpha Value (> 0, 'Shape')"),value=1,min=0,step=0.25),
numericInput("lambda",label=h5("Lambda Value (> 0, 'Rate')"),value=1,min=0,step=0.25)
)
),
column(width=6,
fluidRow(
box(width=12,
plotOutput("plots"),
br(),
h4("The plot above displays the function 1/X, the first and second order Taylor approximations to 1/X and overlays the assumed distribution of X (not to scale).")
)
)
),
column(width=3,
fluidRow(
box(width=12,
tableOutput("vals"),
br(),
h4("The first column provides the Method of approximation (or truth)."),
h4("The second column provides the approximation to the mean."),
h4("The third column provides the percent difference (Estimate-Truth)/Truth*100%.")
)
)
)
)
)
)
)
)
|
5ce5a2d9ca6ff6e3be3070f620647779a52387c3
|
3f40b243e5cdd0936e6371389a61ed8f08f75a2b
|
/Script_TFG_V2.R
|
c0357b217cf6a1b9bed478318ec9b8e89e532eca
|
[] |
no_license
|
sote131295/Script_TFG
|
4f86842b4732d4190b4bec8ab142e0e1f53d9025
|
5658f0b35a0f8ad5ef077526d80afc30cecb921b
|
refs/heads/master
| 2020-05-06T13:22:00.310277
| 2019-04-08T11:22:59
| 2019-04-08T11:22:59
| 180,131,814
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 14,509
|
r
|
Script_TFG_V2.R
|
rm(list=ls())
cat("\014")
#vamos a cambiar de directorio de trabajo
setwd("C:/Users/MSI/Documents/TFG")
source('C:/Users/MSI/Documents/TFG/ur.test.R')
library(urca)
library(zoo)
library(tseries)
library(readxl)
library(normtest)
library(DistributionUtils)
library(forecast)
library(ggplot2)
library(ggfortify)
library(scales)
library(strucchange)
library(faraway)
library(lmtest)
#vamos a importar series y nombrarlas
series = read_excel("Selecció20.xlsx", sheet = "Todas las Series")
lleg = series$TOT
#Vamos a eliminar los valores faltantes
lleg = na.omit(lleg)
lleg = ts(lleg, start = c(1999,1), end = c(2018,11), frequency = 12)
str(lleg)
d.lleg = diff(log(lleg))
#plot de nº de llegadas de turistas
autoplot(lleg, ts.colour="black")+
scale_y_continuous(breaks=seq(0,3500000,500000))+
scale_x_date(date_breaks = "2 years", labels = date_format("%Y"))+
labs(x="tiempo",y="nº de llegadas")+
theme(axis.title.x = element_text(size=rel(1.5)))+
theme(axis.title.y = element_text(size=rel(1.5)))+
theme(panel.background = element_rect(fill ="white"))+
theme_test()
############################################################
# contrastes de cambio estructural
############################################################
bp_lleg = breakpoints(lleg~1)
summary(bp_lleg)# esto debería darte los puntos y suintérvalo de confianza
ic_lleg = confint(bp_lleg)# se guarda los intervalos de confianza
#gráfico del cambio estructural seleccionado por el contraste de Bai y Perron
plot(lleg,type = "1", xlim=c(1999,2019))
axis(1, at=2000:2018, labels = 2000:2018);axis(2);box()
lines(lleg, col=9)
lines(bp_lleg)
lines(ic_lleg)
title(xlab = "años", ylab = "nº de llegadas")
nchow = length(lleg)
Trend = c(1:nchow)
sctest(lleg~Trend, type="Chow", point=196)
#####################################################################
# calculamos estadisticos descriptivos de la serie
#####################################################################
summary(lleg)
sd(lleg)
skewness(lleg)
kurtosis(lleg)#contraste del 3er momento si es mayor que tres significa que la cola es muy pesada
# gráfico de la curva de densidad serie de nº de llegadas
ggplot(series, aes(x=TOT))+
geom_density(alpha =.2, fill="blue")+
labs(x="llegadas de turistas",y="distribución")+
theme(axis.title.x = element_text(size=rel(3)),axis.text.x = element_text(size = rel(2)))+
theme(axis.title.y = element_text(size=rel(3)),axis.text.y = element_text(size = rel(2)))+
theme(panel.background = element_rect(fill ="white"))+
scale_x_continuous(breaks=seq(0,3500000,500000))
# pvalor casi 0 rechazamos hipotesis nula de la serie es normal
jb.norm.test(lleg, nrepl = 3000) #
# para la diferencia de llegadas
D.lleg=series$DIF
D.lleg = na.omit(D.lleg)
D.lleg = ts(D.lleg, start = c(1999,2), end = c(2018,11), frequency = 12)
# gráfico de la curva de densidad serie de la diferencia del nº de llegadas
ggplot(series, aes(x=DIF))+
geom_density(alpha =.3, fill="red")+
labs(x=" variación de llegadas de turistas",y="distribución")+
theme(axis.title.x = element_text(size=rel(3)))+
theme(axis.title.y = element_text(size=rel(3)))+
theme(panel.background = element_rect(fill ="white"))+
scale_x_continuous(breaks=seq(-1500000, 1500000,500000))
summary(D.lleg)
sd(D.lleg)
skewness(D.lleg)
kurtosis(D.lleg)
jb.norm.test(D.lleg, nrepl = 3000)
# para la diferencia del logaritmo de llegadas
sd(diff(log(lleg)))
mean(diff(log(lleg)))
skewness(diff(log(lleg)))
kurtosis(diff(log(lleg)))
summary(d.lleg)
# gráfico de la curva de densidad serie de la diferencia del logaritmo del nº de llegadas
ggplot(series, aes(x=DL))+
geom_density(alpha =.3, fill="red")+
labs(x="diferencia logarítmica de llegadas",y="densidad")+
theme(axis.title.x = element_text(size=rel(3)), axis.text.x = element_text(size = rel(2)))+
theme(axis.title.y = element_text(size=rel(3)), axis.text.y =element_text(size = rel(2)))+
theme(panel.background = element_rect(fill ="white"))+
scale_x_continuous(breaks=seq(-2,1, 0.5))+
xlim(-2,2)
#####################################################################
# Vamos a hacer contrastes de raíces unitarias
#####################################################################
#ADF
lleg.df = ur.df(y = log(lleg), type = "trend", lags = 24, selectlags = "BIC")
summary(lleg.df)
#la nula hay raices unitarias= serie no es estacionaria
adf.test(lleg)
adf.test(d.lleg)
d.lleg.df = ur.df(y = diff(log(lleg)), type = "trend", lags = 24, selectlags = "BIC")
#ZA
#vamos a seleccionar el retardo optimo
bic.test<- matrix(NA, 12,1)
for (i in 1:12) {
za<-ur.za(y=diff(log(lleg)), model = 'both', lag = i)
bic.test[i]=BIC(eval(attributes(za)$testreg))
}
which(bic.test==min(bic.test))
lleg.za<-ur.za(y=log(lleg), model = 'both', lag = 12)
summary(lleg.za)
#no rechazamos Ho presenta raices unitariaas -3.67<-5.08 valor absoluto al 5%
d.lleg.za<-ur.za(y=diff(log(lleg)), model = 'both', lag = 11)
summary(d.lleg.za)
# rechazamos Ho, no presenta raices unitariaas -8.63>-5.08 valor absoluto al 5%
#####################################################################
# vamos a crear vables binarias estacionarias
#####################################################################
meses = seasonaldummy(lleg)
meses
d.lleg = diff(log(lleg))
meses =seasonaldummy(d.lleg)
summary(lm(d.lleg ~meses))
M1=lm(d.lleg~meses)
#####################################################################
# vamos a descomponer la serie mediante holt-winters
#####################################################################
# decompese descmpone la serie en una tendencia, estacionalidad, componente irregular
mod = decompose(lleg)
mod
mod$figure
plot(mod)
# vamos a hacer lo mismo pero con la descomposioción multiplicativa
mod2 = decompose(lleg, "multiplicative")
mod2
mod2$figure
plot(mod2)
# descomponer en tablas
# vamos a hacer un gráfico de la serie original con el valor predicho
plot(lleg, type="1", xlim=c(1999,2019))
axis(1, at=2000:2018, labels=2000:2018);axis(2);box()
lines(lleg, col=9)
lines(seasadj(mod), col = 4)
lines(seasadj(mod2), col = 10)
legend("topleft",col=c(9, 4, 10),
legend =c("serie original","descomposición aditiva","descomposición multiplicativa"),
lwd=3, bty = "n")
title(xlab = "años", ylab = "nº de llegadas")
# hacemos la descomposición exponencial HW Aditivo
mod.hw = hw(lleg)
mod.hw
# gráfico con la predicción de holt-winters aditivo
autoplot(mod.hw, ts.colour="black")+
scale_y_continuous(breaks=seq(0,3500000,500000))+
labs(x="tiempo",y="nº visitantes")+
theme(axis.title.x = element_text(size=rel(1.5)))+
theme(axis.title.y = element_text(size=rel(1.5)))+
theme(panel.background = element_rect(fill ="white"))+
scale_x_date(date_breaks = "2 years", labels = date_format("%Y"))+
theme_test()
mod.hw$fitted
#Predicción fuera de la muestra (h = 11 períodos)
n = length(lleg)
hh = 11
lleg.2 = lleg[1:(n-hh)]
lleg.2 = ts(lleg.2, start = c(1999,1), end = c(2017,12), frequency = 12)
pred.f.hw = hw(lleg.2, h = hh)
plot(pred.f.hw)
plot(lleg)
lines(pred.f.hw$mean, col=2)
error.hw = pred.f.hw$mean - lleg[(n-hh+1):n]
rmse.hw = sqrt(mean(error.hw^2))
# hacemos la descomposición exponencial HW Multiplicativo
mod.hw2 = hw(lleg, seasonal = "multiplicative")
mod.hw2
autoplot(mod.hw2, ts.colour="black")+
scale_y_continuous(breaks=seq(0,10000000,500000))+
labs(x="año",y="nº visitantes")+
theme(axis.title.x = element_text(size=rel(1.5)))+
theme(axis.title.y = element_text(size=rel(1.5)))+
theme(panel.background = element_rect(fill ="white"))+
scale_x_date(date_breaks = "2 years", labels = date_format("%Y"))+
theme_test()
mod.hw2$fitted
plot(lleg, type="1", xlim=c(1999,2019))
axis(1, at=2000:2018, labels=2000:2018);axis(2);box()
lines(lleg, col=9)
lines(seasadj(mod), col = 4)
lines(seasadj(mod2), col = 10)
lines(mod.hw$fitted, col = 6)
lines(mod.hw2$fitted, col = 11)
legend("topleft",col=c(9, 4, 10, 6, 11),legend =c("serie original", "datos sin el componente estacional de la descomposición aditiva","datos sin el componente estacional de la descomposición multiplicativa", "valores ajustados descomposición H-W aditivo", "valores ajustados descomposición H-W multiplicativa"),
lwd=3, bty = "n")
title(xlab = "tiempo", ylab = "nº de llegadas")
#Predicción fuera de la muestra (h = 11 períodos)
n = length(lleg)
hh = 11
lleg.2 = lleg[1:(n-hh)]
lleg.2 = ts(lleg.2, start = c(1999,1), end = c(2017,12), frequency = 12)
pred.f.hw2 = hw(lleg.2, h = hh, seasonal = "multiplicative")
plot(pred.f.hw2)
plot(lleg)
lines(pred.f.hw2$mean, col=2)
error.hw2 = pred.f.hw2$mean - lleg[(n-hh+1):n]
rmse.hw2 = sqrt(mean(error.hw2^2))
#####################################################################
# Arima - SARIMA (seasonality)
#####################################################################
d.lleg = diff(log(lleg))
acf(d.lleg)#cada 6 periodos hay estacionalidad
d.lleg = ts(d.lleg, start = c(1999,2), end = c(2018,11), frequency = 12)
pacf(d.lleg) # sugiere poner un ar1( se pone retardos hasta que desaparecen los otro)
par(mfrow=c(1,2))
acf(d.lleg)
pacf(d.lleg)
par(mfrow=c(1,1))
fit1=Arima(d.lleg, order=c(1,0,1))
summary(fit1)
coeftest(fit1)
acf(resid(fit1))
# SARIMA
fit2=arima(d.lleg, order=c(1,0,12), seasonal=list(order=c(0,1,1), period=12))
summary(fit2)
coeftest(fit2)
par(mfrow=c(1,2))
acf(resid(fit2))
pacf(resid(fit2))
par(mfrow=c(1,1))
#gráfico de predicción del SARIMA
fit2 %>% forecast %>%
autoplot+
labs(x="años",y="variación visitantes relativa")+
theme(axis.title.x = element_text(size=rel(1.5)))+
theme(axis.title.y = element_text(size=rel(1.5)))+
theme(panel.background = element_rect(fill ="white"))+
scale_x_date(date_breaks = "2 years", labels = date_format("%Y"))+
theme_test()
#Predicción fuera de la muestra (h = 11 períodos)
fit2=Arima(window(d.lleg, end= c(2017,12)), order=c(1,0,12), seasonal=list(order=c(0,1,1), period=12),
lambda =NULL)
accuracy(forecast(fit2, h = hh, lambda = NULL), window(d.lleg, start= c(2018,1)))
pred.f.fit2 = forecast(fit2, h = hh, lambda = NULL)$mean
predicciones_2019<-forecast(fit2,h=13)$mean
diciembre_arima18=exp(series$LOG[239]+predicciones_2019[1])
enero_arima19 = exp(log(diciembre_arima18)+(predicciones_2019[2]))
febrero_arima19 = exp(log(enero_arima19)+(predicciones_2019[3]))
marzo_arima19 = exp(log(febrero_arima19)+(predicciones_2019[4]))
abril_arima19=exp(log(marzo_arima19)+(predicciones_2019[5]))
mayo_arima19=exp(log(abril_arima19)+(predicciones_2019[6]))
junio_arima19=exp(log(mayo_arima19)+(predicciones_2019[7]))
julio_arima19=exp(log(junio_arima19)+(predicciones_2019[8]))
agosto_arima19=exp(log(julio_arima19)+(predicciones_2019[9]))
septiembre_arima19=exp(log(agosto_arima19)+(predicciones_2019[10]))
octubre_arima19=exp(log(septiembre_arima19)+(predicciones_2019[11]))
noviembre_arima19=exp(log(octubre_arima19)+(predicciones_2019[12]))
diciembre_arima19=exp(log(noviembre_arima19)+predicciones_2019[13])
pr<-c(enero_arima19,febrero_arima19,marzo_arima19, abril_arima19, mayo_arima19, junio_arima19,
julio_arima19, agosto_arima19, septiembre_arima19, octubre_arima19, noviembre_arima19,diciembre_arima19)
table(pr)
enero_arima = exp(series$LOG[228]+(pred.f.fit2[1]))
febrero_arima = exp(log(enero_arima)+(pred.f.fit2[2]))
marzo_arima = exp(log(febrero_arima)+(pred.f.fit2[3]))
abril_arima=exp(log(marzo_arima)+(pred.f.fit2[4]))
mayo_arima=exp(log(abril_arima)+(pred.f.fit2[5]))
junio_arima=exp(log(mayo_arima)+(pred.f.fit2[6]))
julio_arima=exp(log(junio_arima)+(pred.f.fit2[7]))
agosto_arima=exp(log(julio_arima)+(pred.f.fit2[8]))
septiembre_arima=exp(log(agosto_arima)+(pred.f.fit2[9]))
octubre_arima=exp(log(septiembre_arima)+(pred.f.fit2[10]))
noviembre_arima=exp(log(octubre_arima)+(pred.f.fit2[11]))
matriz_prediccionesArima = matrix(nrow = 11, ncol = 1, c(enero_arima,febrero_arima,marzo_arima,
abril_arima,mayo_arima,junio_arima,julio_arima,
agosto_arima,septiembre_arima,octubre_arima,
noviembre_arima))
pred.f.fit2.ent = ts(matriz_prediccionesArima, start = c(2018,1), frequency=12)
############################################################
#Gráficos fuera de la muestra ARIMA
############################################################
plot(lleg, type="1", xlim=c(1999,2019), ylim=c(0,3600000))
axis(1, at=2000:2018, labels=2000:2018);axis(2);box()
lines(lleg, col=9)
lines(pred.f.fit2.ent, col='red')
legend("topleft",col=c(9, 10),legend =c("serie original", "predicción fuera de la muestra"),
lwd=3, bty = "n")
title(xlab = "tiempo", ylab = "nº de llegadas")
plot(d.lleg, type="1", xlim=c(1999,2019), ylim=c(-2,1))
axis(1, at=2000:2018, labels=2000:2018);axis(2);box()
lines(d.lleg, col=9)
lines(pred.f.fit2, col='red')
legend("topleft",col=c(9, 10),legend =c("serie original", "predicción fuera de la muestra"),
lwd=3, bty = "n")
title(xlab = "tiempo", ylab = "variación relativa visitantes")
error.ar = pred.f.fit2.ent-lleg[(n-hh+1):n]
rmse.ar = sqrt(mean(error.ar^2))
########Gráfico predicción fuera de la muestra H-W2 y SARIMA############################################################
plot(lleg, type="1", xlim=c(1999,2019), ylim=c(0,5300000))
axis(1, at=2000:2018, labels=2000:2018);axis(2, at=c(1500000,3000000,4500000));box()
lines(lleg, col=9)
lines(pred.f.hw2$mean, col='red')
lines(pred.f.hw$mean, col=4)
lines(pred.f.fit2.ent, col=3)
legend("topleft",col=c(9, 10, 4, 3),legend =c("serie original", "predicción fuera de la muestra H-W Multiplicativo",
"predicción fuera de la muestra H-W Aditivo","predicción fuera de la muestra del modelo SARIMA" ),
lwd=3, bty = "n")
title(xlab = "tiempo", ylab = "nº de llegadas")
#############lm con diflog.lleg##############
lm_est<-lm(series$DL~series$X__1)
summary(lm_est)
M2=lm_est
anova(M1,M2)
AIC(M1, M2)
BIC(M1,M2)
logLik(M1)
logLik(M2)
|
f56120b6d7f38e0e52c41d32150b0e28d210fa1c
|
d93c4c0ab2fc54b7da002a788f96a0e8a70a0ada
|
/man/eline.Rd
|
49c8952528aa5378dd0bb35e7c80ccce9f8549cf
|
[] |
no_license
|
JohnCoene/echarts
|
08dd5cb6d9d053d7eff1214da415dc4d38893f7b
|
ce98035230dfe5d9a497db2d7e303e623a69cf24
|
refs/heads/master
| 2021-06-12T11:30:36.952046
| 2021-05-16T15:32:01
| 2021-05-16T15:32:01
| 83,905,099
| 48
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,288
|
rd
|
eline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.R, R/add_.R
\name{eline}
\alias{eline}
\alias{eline}
\alias{eline_}
\title{Add lines}
\usage{
eline(p, serie, name = NULL, stack = NULL, clickable = TRUE,
xAxisIndex = 0, yAxisIndex = 0, symbol = NULL, symbolSize = "2 | 4",
symbolRotate = NULL, showAllSymbol = FALSE, smooth = TRUE,
legendHoverLink = TRUE, dataFilter = "nearest", z = 2, zlevel = 0,
tooltip, ...)
eline_(p, serie, name = NULL, stack = NULL, clickable = TRUE,
xAxisIndex = 0, yAxisIndex = 0, symbol = NULL, symbolSize = "4",
symbolRotate = NULL, showAllSymbol = FALSE, smooth = TRUE,
legendHoverLink = TRUE, dataFilter = "nearest", z = 2, zlevel = 0,
tooltip, ...)
}
\arguments{
\item{p}{an echart object.}
\item{serie}{value column name to plot.}
\item{name}{of serie.}
\item{stack}{name of the stack.}
\item{clickable}{whether plot is clickable.}
\item{xAxisIndex, yAxisIndex}{axis indexes.}
\item{symbol}{symbol for point marker, see details for valid values.}
\item{symbolSize}{of symbol.}
\item{symbolRotate}{angle by which symbol is rotated, i.e.: \code{30}.}
\item{showAllSymbol}{By default, a symbol will show only when its corresponding axis label does.}
\item{smooth}{whether to smooth line.}
\item{legendHoverLink}{enables legend hover link to the chart.}
\item{dataFilter}{ECharts data filtering strategy, see details.}
\item{z, zlevel}{first and second grade cascading control, the higher z the closer to the top.}
\item{tooltip}{style of tooltip.}
\item{...}{any other argument to pass to the serie.}
}
\description{
Add line serie.
}
\details{
Valid values for \code{symbol}:
\itemize{
\item{\code{circle}}
\item{\code{rectangle}}
\item{\code{triangle}}
\item{\code{diamond}}
\item{\code{emptyCircle}}
\item{\code{emptyRectangle}}
\item{\code{emptyTriangle}}
\item{\code{emptyDiamond}}
\item{\code{heart}}
\item{\code{droplet}}
\item{\code{pin}}
\item{\code{arrow}}
\item{\code{star}}
}
\code{dataFilter}: ECharts will optimize for the situation when data number is much larger than viewport width.
It will filter the data showed in one pixel width. And this option is for data filtering strategy.
Valid values for \code{dataFilter} are:
\itemize{
\item{\code{nearest} (default)}
\item{\code{min}}
\item{\code{max}}
\item{\code{average}}
}
}
\examples{
\dontrun{
df <- data.frame(x = 1:50, y = runif(50, 5, 10), z = runif(50, 7, 12), w = runif(50, 10, 13))
df \%>\%
echart(x) \%>\%
eline(y) \%>\%
eline(z)
# JS sizing function
sizing <- htmlwidgets::JS("function(value){ return value[1]/1.5}")
df \%>\%
echart_("x") \%>\%
eline_("y", "w",
symbolSize = sizing,
showAllSymbol = TRUE,
symbol = "emptyCircle") \%>\%
etooltip() \%>\%
etheme("helianthus")
df \%>\%
echart_("x") \%>\%
eline_("y", stack = "grp") \%>\%
eline_("z", stack = "grp", symbol = "emptyDroplet", showAllSymbol = TRUE, symbolSize = 5) \%>\%
eline_("w", showAllSymbol = TRUE, symbolSize = 4, symbol = "emptyHeart", stack = "grp2") \%>\%
etooltip() \%>\%
elegend() \%>\%
etoolbox_magic(type = list("line", "bar"))
}
}
\seealso{
\href{http://echarts.baidu.com/echarts2/doc/option-en.html#series-i(line)}{official line options docs}
}
|
31c89cfb263fbac6e6b7078034d49b2e00c192f9
|
fa633a6294d56b6a788996dabb8c1d4d21ab8c95
|
/R/app.R
|
4a75d841b181211a0bb2f9732df5750c354339dc
|
[
"MIT"
] |
permissive
|
pr2database/metapr2-shiny
|
08432e2dbc0b56fe30875ace4ee1724bc015f6fe
|
65d3f17aeac9745de2ff1ab4a536e424bf39a4f6
|
refs/heads/main
| 2023-06-08T13:07:36.740518
| 2023-06-04T10:30:33
| 2023-06-04T10:30:33
| 410,160,328
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,769
|
r
|
app.R
|
#metapr2App -------------------------------------------------------
#' @title Launch metapr2 shiny app
#' @examples
#' # Starts shiny application
#'
#' \dontrun{
#' metapr2::run_app()
#' }
#'
#' @export
run_app <- function() {
# Build the whole dataset ---------------------------------------------------------
# Function for computing object sizes
# obj_size <- function(x) {
# cat("Object:",deparse(substitute(x)), "- size: ", round(pryr::object_size(x)/10**6, 2), " Mb \n")
# }
# Remove warnings: -1 remove, 0 save, 1 print
# https://docs.tibco.com/pub/enterprise-runtime-for-R/5.0.0/doc/html/Language_Reference/base/stop.html#:~:text=The%20warn%20option%20(see%20options,by%20calling%20the%20function%20warnings.
options(warn = - 1)
messages <- list()
messages$no_data = tags$div(
tags$h4(tags$b("No data for this taxon in selected samples:")),
# tags$br(),
tags$span(style="color:red","You may want to change minimum number of reads or select more samples")
)
shinymanager::set_labels(
language = "en",
"Please authenticate" = "Choose datasets ",
"Username:" = "Datasets (Leave blank for public version):",
"Password:" = "Password (Leave blank for public version):",
"Login" = "Enter metaPR2"
)
# User interface ----------------------------------------------------------
ui <- fluidPage(
# Booststrap theme:https://rstudio.github.io/shinythemes/
# theme = bslib::bs_theme(bootswatch = "yeti"),
# Tracking not necessary in ui
# shinylogs::use_tracking(),
# Script to close the windows after some inactivity - ACTIVATE for web application
tags$script(inactivity),
# To activate shinyjs
# shinyjs::useShinyjs(),
# To include the favicon.ico
tags$head(tags$link(rel="shortcut icon", href="img/favicon.ico")),
# tags$head(tags$link(rel="shortcut icon", href=system.file("img", 'favicon.ico', package = "metapr2"))),
# Authentification
shinymanager::auth_ui(
id = "auth",
# add image on top ?
tags_top =
tags$div(
# tags$h1("metaPR2", style = "align:center"),
# tags$img(src = "img/metapr2_logo.png", width = 80)
tags$img(src = "https://github.com/pr2database/metapr2-shiny/blob/main/inst/img/metapr2_logo.png?raw=true", width = 80)
),
# add information on bottom ?
tags_bottom = tags$div(
checkboxInput("asv_clustered", "Use clustered ASVs (see Help)", value = TRUE, width = NULL),
tags$p(" "),
tags$h4("metaPR2 version: 2.1.1"),
tags$br(),
tags$h4("Datasets version: 2.1"),
tags$h5("Datasets #: 59 (identical to version 2.0)"),
tags$h5("Assignment: PR2 version 5.0.0"),
tags$br(),
tags$p("No password needed. For other datasets, please contact ",
tags$a(href = "mailto:vaulot@gmail.com", target="_top", "Daniel Vaulot")
)
)
),
# Message for disconnection
shinydisconnect::disconnectMessage(
text = "Server lost connection.",
refresh = "Reload now"
),
# Title
title = "MetaPR2",
# titlePanel(div(img(src='img/metapr2_logo.png', width="80"),"The MetaPR2 database")),
# titlePanel(div(img(src=system.file("img", 'metapr2_logo.png', package = "metapr2"), width="80"),"The MetaPR2 database")),
# --- Side bar layout
sidebarLayout( sidebar(),
mainpanel()
)
)
# Server ------------------------------------------------------------------
server <- function(input, output, session) {
# Stop the application of the session is closed (after 30 min) - ACTIVATE for web application
session$onSessionEnded(stopApp)
# To track usage
shinylogs::track_usage(storage_mode = shinylogs::store_sqlite(path = "logs/"))
# Authentification
authentification <- callModule(module = shinymanager::auth_server,
id = "auth",
check_credentials = shinymanager::check_credentials(credentials))
# Disconnection
observeEvent(input$button_disconnect, {session$close() } )
# Validate the sample selection
# See: https://rstudio.github.io/shinyvalidate/articles/shinyvalidate.html
# Datasets - Reformat the datasets and creates output for download
asv_set <- dataServer("data", taxo, authentification, input$asv_clustered)
# Just print version and whether ASVs are clustered or
display_info_server("info", authentification, input$asv_clustered)
# Utils - Dynamic taxonomy boxes
taxo <- taxoServer("taxo", asv_set$fasta_all)
# Panel - Download
downloadServer("download", asv_set$datasets_selected, asv_set$samples_selected, asv_set$df_selected, asv_set$fasta_selected, taxo, messages)
# Panel - Treemap
treemapServer("treemap", asv_set$df_selected, taxo, messages)
# Panel - Leaflet map
mapServer("map", asv_set$df_selected, asv_set$samples_selected, taxo)
# Panels - Barplot
barplotServer("barplot", asv_set$df_selected, asv_set$samples_selected, taxo, messages)
# Panels - Alpha and beta diversity
phyloseqServer("phyloseq", asv_set$samples_selected, asv_set$df_selected, asv_set$fasta_selected, taxo, messages)
# Panel - Matching ASV
queryServer("query", asv_set$samples_selected, asv_set$df_all, asv_set$fasta_all)
# Panel - Taxonomy table
taxo_table_Server("taxo_table", asv_set$fasta_selected)
# cat("Server: ")
# print(pryr::mem_used())
# Debug
# output$test1 <- renderText(getwd())
# output$test2 <- renderText(nrow(asv_set$df))
# output$test3 <- DT::renderDT(asv_set$df)
}
# Run the shiny app -------------------------------------------------------
shinyApp(ui, server)
}
|
57844d039789e5c02356b84b11237f1bdd0d390e
|
6f21d1b8365183e5708be7e71c003a0d2fc0d3b8
|
/R/create_bins.R
|
0ffc7bc18dde8d376ead82938654b15baf2bd65c
|
[] |
no_license
|
cran/Distance
|
d2700127a013bf8bc5b0d362f6c2f04aacf22020
|
4755df1c9a2ae2e7225b140097449f2db5ccb554
|
refs/heads/master
| 2023-07-24T23:16:56.004912
| 2023-07-17T11:30:02
| 2023-07-17T12:56:55
| 17,678,843
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
create_bins.R
|
#' Create bins from a set of binned distances and a set of cutpoints.
#'
#' This is an internal routine and shouldn't be necessary in normal analyses.
#'
#' @param data `data.frame` with at least the column `distance`.
#' @param cutpoints vector of cutpoints for the bins
#'
#' @return argument `data` with two extra columns `distbegin` and
#' `distend`.
#'
#' @author David L. Miller
#' @export
#' @examples
#' \dontrun{
#' library(Distance)
#' data(minke)
#'
#' # put the minke data into bins 0-1, 1-2, 2-3 km
#' minke_cuts <- create_bins(minke[!is.na(minke$distance),], c(0,1,2,3))
#' }
create_bins <- function(data, cutpoints){
# lazy typist
cp <- cutpoints
# remove distances outside bins
in.cp.ind <- is.na(data$distance) |
(data$distance>=cp[1] & data$distance<=cp[length(cp)])
if(!all(in.cp.ind)){
warning("Some distances were outside bins and have been removed.")
}
data <- data[in.cp.ind, , drop=FALSE]
# use cut() to create bins
chopped <- cut(data$distance, breaks=cp, include.lowest=TRUE, labels = FALSE)
distbegin <- cp[1:(length(cp)-1)]
distend <- cp[2:length(cp)]
# put all that together and make a data.frame
data <- cbind(data,
# process to get bin beginnings/endings
distbegin = distbegin[chopped],
distend = distend[chopped])
data <- data.frame(data)
return(data)
}
|
8f925d61fb30df6b9fb88f82985e301c3c8d0197
|
b520550c46c76ba7855b7f651d1cdaa9596bdd5a
|
/man/ODBCConnect.Rd
|
4a85c2b17706014fdc348cede10ee62c0d7649ad
|
[] |
no_license
|
Tom-Reilly/FSS
|
76fb0a8e35bcc2ee035b1b598bd8500cab05e0ef
|
eeaacd442f84d428ec65c81b3f371f0ca4842473
|
refs/heads/master
| 2023-07-08T22:22:48.721264
| 2023-06-29T14:03:22
| 2023-06-29T14:03:22
| 220,976,725
| 0
| 0
| null | 2023-06-29T14:02:37
| 2019-11-11T12:28:56
|
R
|
UTF-8
|
R
| false
| false
| 1,118
|
rd
|
ODBCConnect.Rd
|
\name{ODBCConnect}
\alias{ODBCConnect}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create Connection to FSS
}
\description{
Create an Online Database Connection (ODBC) to the Fisheries Survey System (FSS) Database. It is necessary to run this function prior to any further attempts to query the FSS database.
}
\usage{
ODBCConnect(name, user, password)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{name}{
character, the name of the database, in this case "FSS".
}
\item{user}{
character, the user name defined when setting up the DSN, this will differ between location, OSE or Scotia.
}
\item{password}{
character, the passord defined when setting up the DSN, this will differ between location, OSE or Scotia.
}
}
\examples{
# Connect to FSS from the OSE
ODBCConnect(name = "FSS", user = "FSSuser", password = "pwd")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
9172970d460715f8be4995766c1201a1ebf94845
|
dd12bef775f15cba4a23a80d6ce9405a719cf0f2
|
/1 Developing the SIR Model/solving ODE.ipynb.r
|
0c45aa4f430b9280d50996c57d3c8e3891eec45c
|
[] |
no_license
|
antoniovitorvb/ICL-Infectious-Disease-Modelling
|
674d37bbcc3d3bfd0b445ec2d3c612c655094aaf
|
e03f8749ab0063527ba401591eea832c7391cc05
|
refs/heads/main
| 2023-02-27T05:57:34.534055
| 2021-02-05T01:08:05
| 2021-02-05T01:08:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,537
|
r
|
solving ODE.ipynb.r
|
# If you are working in RStudio, you will need to install if you haven't already:
# install.packages("ggplot2")
# install.packages("deSolve")
# Run the library() function to load these packages
library(ggplot2)
library(deSolve)
require(deSolve)
result <- ode( y = state_vars # initial state variables: state variables at first timepoint
, times = times # a vector of timepoints
, func = exp_pop_fn # the differential equation itself, in the form of a function
, parms = parms) # parameters for the equation in func
# Now to see what each input actually is.
?ode
# y
state_vars <- c(N = 1) # a vector with a named element
# times
times <- seq(0, 40, by = 0.5) # the shorter the timesteps, the more accurate a solution
# func: a function
exp_pop_fn <- function(time, state, parameters) {
N <- state['N'] # within this function, the state variables - in this case, just N
dN <- parameters['alpha'] * N # alpha is extracted from the 'parameters' argument
# this argument gets fed in by the ode() function
# specify when running ode()
return(list(c(dN))) # tell the function what to return:
# here, we need the calculated dN value, in a list.
# if you have more than one state variable to calculate
# tell the function to return derivatives in the same order
# as you entered them (in the `state` argument)
}
# Remember that this function is an argument into another function; it doesn't do a lot by itself.
# The inputs come from running the ode() function, the output goes back into the ode() function.
# parms
parms <- c(alpha = log(2)) # alpha has been chosen so the population doubles each timestep
parms['alpha'] # you can see "parms" is a vector with one named element, alpha.
# this argument 'parms' gets fed, by ode(), into the function that you specify to use as func
# so it needs to contain whatever variables that function is expecting.
# For this example:
result <- ode(y = state_vars # contains initial N
, times = times # the vector of timepoints
, func = exp_pop_fn # the exponential equation, written as a function
, parms = parms) # parameters for the exponential equation: here, just alpha
head(as.data.frame(result)) # shows the first few lines of the results
result <- as.data.frame(result) # turn the output into a data.frame
# use ggplot to create a graph of times against the population size N
require(ggplot2) # if
expplot <- ggplot(data = result)
expplot <- expplot + geom_line(aes(x = time, y = N)
, colour = "blue")
expplot <- expplot + labs(x = "time (days)")
expplot # shows the graph
logistic_fn <- function(t, state, parameters) { # You'll need a K in the parameters argument
N <- state['N'] # still our only state variable
dN <- parameters['alpha'] * N * (1 - (N / parameters['K']))
# this line represents the differential equation
return(list(c(dN)))
}
parms['K'] <- 1000000
# the vector 'parms' now includes an element named K, assigned the value 1000000 to represent carrying capacity
result_K <- ode( y = state_vars # still only contains initial N
, times = times # the vector of timepoints we already have
, func = logistic_fn # the logistic equation function
, parms = parms) # parms now includes K
result_K <- as.data.frame(result_K)
#check the output, and plot
tail(result_K) # to view the last 6 rows; note that N approaches K
logplot <- ggplot(data = result_K)
logplot <- logplot + geom_line(aes(x = time, y = N)
, colour = "blue")
logplot <- logplot + labs(x = "time (days)")
logplot
with(as.list(c(state_vars, parms)), { # give with() a list made of state_vars and parms
print(alpha) # to find anything referenced within the curly brackets,
print(N) # R looks for names within the object given to with()
})
# clearer code for the logistic function:
logistic_fn <- function(t, state, parameters) { # You'll need a K in the parameters argument
with(as.list(c(state, parameters)), {
# you can now just refer to N directly, so no need to assign
dN <- alpha * N * (1 - (N / K)) # this line represents the differential equation
return(list(c(dN)))
})
}
exp(parms['alpha'])
# alpha was chosen so that at each whole timestep, the population doubles
t <- seq(1, 40, by = 1) # vector of times at which to calculate the population size
# these don't have to be the same as the timepoints as the ode() output was generated at
N_calc <- state_vars['N'] * 2^t # every day, the population doubles
# R automatically vectorises this expression, applying it to each element of 't' in turn, to create vector N_calc
# N_calc should be the same length as t. Make them into a dataframe
pop_df <- data.frame(times = t, N = N_calc)
require(ggplot2)
expplot <- expplot +
geom_point(data = pop_df, # specify different dataframe here
aes(y = N_calc, x = t)
, shape = 4
, size = 2
, colour = "red")
expplot
|
4b2208bb11cfae7a89ba31aed6b0d6c012632494
|
bd4d459aca02be3900cbccd5f1f8a4abe1d30314
|
/man/filter_2pts_gaps.Rd
|
170aaf06f32623684cc6e6f0918868cdbe0eb697
|
[] |
no_license
|
augusto-garcia/onemap
|
98ff53e2825c8cf626cff6c433acd78311ac5fa3
|
d71d3c4800ddb00848a15a8635e08f8f1428bd1d
|
refs/heads/master
| 2023-07-07T02:23:26.351957
| 2022-11-25T19:27:50
| 2022-11-25T19:27:50
| 31,918,564
| 35
| 27
| null | 2023-06-28T16:57:49
| 2015-03-09T19:37:05
|
R
|
UTF-8
|
R
| false
| true
| 1,397
|
rd
|
filter_2pts_gaps.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{filter_2pts_gaps}
\alias{filter_2pts_gaps}
\title{Filter markers based on 2pts distance}
\usage{
filter_2pts_gaps(input.seq, max.gap = 10)
}
\arguments{
\item{input.seq}{object of class sequence with ordered markers}
\item{max.gap}{maximum gap measured in kosambi centimorgans allowed between adjacent markers.
Markers that presents the defined distance between both adjacent neighbors will be removed.}
}
\value{
New sequence object of class \code{sequence}, which is a list containing the
following components: \item{seq.num}{a \code{vector} containing the
(ordered) indices of markers in the sequence, according to the input file.}
\item{seq.phases}{a \code{vector} with the linkage phases between markers
in the sequence, in corresponding positions. \code{-1} means that there are
no defined linkage phases.} \item{seq.rf}{a \code{vector} with the
recombination frequencies between markers in the sequence. \code{-1} means
that there are no estimated recombination frequencies.}
\item{seq.like}{log-likelihood of the corresponding linkage map.}
\item{data.name}{object of class \code{onemap} with the raw
data.} \item{twopt}{object of class \code{rf_2pts} with the
2-point analyses.}
}
\description{
Filter markers based on 2pts distance
}
\author{
Cristiane Taniguti, \email{chtaniguti@tamu.edu}
}
|
76a555ee1ee421c62523f320d1dd23ceceefedcd
|
f313cd982f70daffa643a2f2b1147d284607d613
|
/install_packages.R
|
0b371d943090b98cfbe6008c3eb314e2d6070645
|
[
"Apache-2.0"
] |
permissive
|
xuzhenwu/PML-shiny
|
a0cc59f55c51edc8fb6f278029db038d11faf2b4
|
6d22b310cc264690cff5894b7fc763593540f20f
|
refs/heads/master
| 2023-02-17T17:46:52.206485
| 2021-01-20T09:55:30
| 2021-01-20T09:55:30
| 284,678,124
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 683
|
r
|
install_packages.R
|
slibrary <- function(fun){
fun_name <- as.character(substitute(fun))
if(is.element(fun_name, installed.packages()[,1]) == FALSE)
install.packages(fun_name)
COMMAND <- paste("library(", fun_name, ")", sep = "")
eval(parse(text = COMMAND))
}
# dependences
slibrary(shiny)
slibrary(raster)
slibrary(rgdal)
slibrary(leaflet)
slibrary(RColorBrewer)
slibrary(reshape2)
slibrary(plotly)
slibrary(gapminder)
slibrary(ggplot2)
slibrary(rsconnect)
slibrary(shinyFiles)
slibrary(shinyWidgets)
slibrary(exactextractr)
slibrary(data.table)
slibrary(sf)
slibrary(ncdf4)
slibrary(stringr)
slibrary(leaflet)
slibrary(DT)
slibrary(lubridate)
slibrary(waiter)
slibrary(shinycssloaders)
|
90c37b22e5c4c0d43b6e6c894ba61d69723cb764
|
e31c1eac8e57a4f51879f4d19935ad6eee25946d
|
/man/ec_timeline.Rd
|
0326622b9ce350465d1839c1c4842381b9b6ffc6
|
[] |
no_license
|
jeevanyue/echarter
|
29a482c595a45bf659cb41e94b7212a1bb4b7d0b
|
ab80bda2c1f3aa24f32ff0b5325b04e481e6cd31
|
refs/heads/master
| 2021-06-29T13:11:08.646249
| 2020-10-12T16:08:22
| 2020-10-12T16:08:22
| 155,544,727
| 19
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 501
|
rd
|
ec_timeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api-ec.R
\name{ec_timeline}
\alias{ec_timeline}
\title{timeline}
\usage{
ec_timeline(ec, ..., baseoption = TRUE, add = FALSE)
}
\arguments{
\item{ec}{An \code{echarter} object as returned by \code{\link{echart}}.}
\item{...}{Additional arguments for the timeline
(\url{https://echarts.apache.org/zh/option.html#timeline}).}
\item{baseoption}{default TRUE}
\item{add}{default FALSE}
}
\description{
timeline component.
}
|
123c8564dde42ba9eb2ceccaa7c1b5cfa245e9cf
|
988f267e20a965a9a9983f5d2b2dbaafe62f0845
|
/R/ble-attr.R
|
eccd1ba7813af76a966495cd3d98483bc1738f77
|
[
"MIT"
] |
permissive
|
bjcairns/stubble
|
245d3f01c85fee7439a701c8815f436311155b2d
|
d082e1516236afeed1e5c83ece537be17fae8e8e
|
refs/heads/master
| 2023-04-01T14:23:48.497769
| 2021-02-01T11:15:19
| 2021-02-01T11:15:19
| 214,469,085
| 0
| 0
|
NOASSERTION
| 2021-02-03T13:42:33
| 2019-10-11T15:20:54
|
R
|
UTF-8
|
R
| false
| false
| 1,124
|
r
|
ble-attr.R
|
#' @title
#' Decode vector attributes
#'
#' @description
#' Internal function to decode list attributes into a vector.
#'
#' @concept empirical
#' @concept ecdf
#' @concept sample
#' @concept simulate
#' @concept simulated
#' @concept simulation
#'
#' @keywords datagen
### ble_attr() ###
#' @noRd
ble_attr <- function(x, elements, index = 1L, ..., ctrl = list()){
## Set Control Parameters ##
ctrl <- stubble_ctrl(..., old_ctrl = ctrl, index = index)
## Extract Params ##
elements <- if (missing(elements)) x[["n"]] else elements
method <- x[["sim"]][["method"]]
## Set RNG ##
rng_kind <- ctrl[["rng_kind"]]
old_kind <- RNGkind()[1]
on.exit(RNGkind(kind = old_kind))
RNGkind(kind = rng_kind)
## Generate syn_col ##
syn_col <- switch(
EXPR = method,
agnostic = ble_agnostic(x = x, elements = elements, ctrl = ctrl),
sample = ble_sample(x = x, elements = elements, ctrl = ctrl),
spline = ble_spline(x = x, elements = elements, ctrl = ctrl)
)
## Impute NA Values ##
syn_col <- impute_na(syn_col, p_na = ctrl[["p_na"]])
## Output ##
return(syn_col)
}
|
7fbb73105cbbad2f0fe9a6c2565ac9f47777bf4d
|
3b5dc61bb3e3245adf6c5f7b5b2c787348582ec3
|
/R/4_misc_dopplershift.R
|
82cd4431c9bcef680efcd1931409797d8f7e474f
|
[] |
no_license
|
jzeyl/AcousticCalcs
|
c2e321aa6a6c1233db22135b0593c020d3f20401
|
baa2d87a02e831f614fc634d8221641422114e98
|
refs/heads/master
| 2023-02-08T14:39:12.505689
| 2023-01-21T22:18:29
| 2023-01-21T22:18:29
| 204,171,734
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,419
|
r
|
4_misc_dopplershift.R
|
#'dopplershift
#'
#'Calculates the doppler shift. Input velocity, frequency. The output gives the new frequency and
#'the frequency shift
#'S2 (time shifted sound), Tadd (time added), Time (time)
#' @param v1 speed of first one
#' @param v2 speed of second one
#' @param direction specify the direction as towards or away from each other
#' @param f frequency
#' @examples dopplershift(v1 = 10, v2 = 0, c = 343, freq = 300, direction = "towards")
#'$Frequency
#'[1] 308.7464
#'
#'$`Percent frequency shift`
#'[1] 2.915452
#'
#Doppler
#positive v = movint towards
# negative v = moving away from
#v = speed of receiver relatieve to medium
#Fo = origina frequency
#velocity in m/s
dopplershift<-function(v1 = 10, v2 = 0, c = 340,freq = 300, direction = "towards"){
if(direction == "towards"){
newHz<-((c+v1)/c)*freq
Hzshift<-((newHz-freq)/freq)*100
output<- list("Frequency" = newHz, "Percent frequency shift" = Hzshift)
return(output)
}
else if(direction == "away"){
newHz<-((c-v1)/c)*freq
Hzshift<-((newHz-freq)/freq)*100
output<- list("Frequency" = newHz, "Percent frequency shift" = Hzshift)
return(output)
}
}
#dopplershift(direction = "away")
# ((c+v))
#11mps = 40 km/h
#Foriginal <- 1
#DopplerFtowards <- ((340+11)/340)*Foriginal
#DopplerFaway <- (340+-11/340)*Foriginal
#PercentHzshift<-((DopplerFtowards-Foriginal)/Foriginal)*100
#PercentHzshift
#equals a 3% shift
|
8fc31328395910a1f45b65a4941a09269b1ffb66
|
41f3601582fb30846d564c60975f7771c5181a1e
|
/codes/Mental_Accounting_Initial_Data_Cleaning.R
|
2263d6671a3d5732c3944ca1871deca2ce05d350
|
[] |
no_license
|
edikaQT/Mental-Accounting
|
c69b74a3a28dc4094aee82bf0bc51dcb477a32bc
|
0e836fb761dfc1f4790ce265dc6966c27e5debb5
|
refs/heads/master
| 2021-07-18T00:28:05.018709
| 2020-05-16T22:12:07
| 2020-05-16T22:12:07
| 264,536,992
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,291
|
r
|
Mental_Accounting_Initial_Data_Cleaning.R
|
rm(list = ls(all=TRUE))
library(data.table)
wd <- as.character("C:/credit_card/")
setwd(wd)
memory.limit(size=10000)
memory.limit()
data <- fread("complete_20160408.csv")
data[, Cyc_Xxx_PaymentMinimumDueAmount:=as.numeric(Cyc_Xxx_PaymentMinimumDueAmount)]
data[, Cyc_Xxx_BalanceBeginning:=as.numeric(Cyc_Xxx_BalanceBeginning)]
data[, Cyc_Xxx_BalanceEnding:=as.numeric(Cyc_Xxx_BalanceEnding)]
data[, Cyc_Xxx_CreditLimitTotal:=as.numeric(Cyc_Xxx_CreditLimitTotal)]
data[, Cyc_Xxx_AprMerchant:=as.numeric(Cyc_Xxx_AprMerchant)]
data[, Cyc_Xxx_AprCash:=as.numeric(Cyc_Xxx_AprCash)]
data[, Cyc_Xxx_BalanceAdb:=as.numeric(Cyc_Xxx_BalanceAdb)]
data[, Cyc_Xxx_BalanceRevolving:=as.numeric(Cyc_Xxx_BalanceRevolving)]
data[, beg:=Cyc_Xxx_BalanceBeginning]
data[, bal:=Cyc_Xxx_BalanceEnding]
data[, min:=Cyc_Xxx_PaymentMinimumDueAmount]
data[, cl:=Cyc_Xxx_CreditLimitTotal]
data[, mer_APR:=Cyc_Xxx_AprMerchant]
data[, cash_APR:=Cyc_Xxx_AprCash]
data[, adb:=Cyc_Xxx_BalanceAdb]
data[, rev:=Cyc_Xxx_BalanceRevolving]
data[, Cyc_Xxx_PaymentMinimumDueAmount:=NULL]
data[, Cyc_Xxx_BalanceBeginning:=NULL]
data[, Cyc_Xxx_BalanceEnding:=NULL]
data[, Cyc_Xxx_CreditLimitTotal:=NULL]
data[, Cyc_Xxx_AprMerchant:=NULL]
data[, Cyc_Xxx_AprCash:=NULL]
data[, Cyc_Xxx_BalanceAdb:=NULL]
data[, Cyc_Xxx_BalanceRevolving:=NULL]
#_______________________________________________
#
# Dates
#_______________________________________________
data[, Cycle:=paste(as.character(CyclePeriod),"01", sep="")]
data[, Cycle:=as.Date(Cycle, "%Y%m%d")]
data[, year:=year(Cycle)]
data[, .(AccountNumber, CyclePeriod, Cycle, year)]
data[, OpenDate:=as.Date(OpenDate, "%Y-%m-%d")]
#_______________________________________________
#
# Exclude accounts with multiple opendata
#_______________________________________________
data[,num_opendate:=length(unique(.SD[,OpenDate])), by=AccountNumber]
ex.ac <- unique(data[num_opendate>1, AccountNumber])
data <- data[!which(AccountNumber %in% ex.ac)]
#_______________________________________________
#
# Amount pay
#_______________________________________________
data[, amt_pay:=amt_pay_non_DD + amt_pay_DD]
data[, lag_amt_pay_non_DD:=c(.SD[2:nrow(.SD) ,amt_pay_non_DD], "NA"), by=AccountNumber]
data[, lag_amt_pay_DD:=c(.SD[2:nrow(.SD) ,amt_pay_DD], "NA"), by=AccountNumber]
data[, lag_amt_pay_non_DD:=as.numeric(lag_amt_pay_non_DD)]
data[, lag_amt_pay_DD:=as.numeric(lag_amt_pay_DD)]
data[, lag_amt_pay:=lag_amt_pay_non_DD + lag_amt_pay_DD]
#_______________________________________________
#
# Exclude the repetition problem (some accounts have repeated rows with repeated end dates)
#_______________________________________________
data[, num_end_days:=length(unique(Cyc_Xxx_CycleEndDate)), by=AccountNumber]
data[, num_cycles:=nrow(.SD), by=AccountNumber]
inc_ac <- unique(data[num_end_days==num_cycles, AccountNumber])
length(inc_ac) #313948
data <- data[AccountNumber %in% inc_ac]
#_______________________________________________
#
# Exclude missing minimum account
#_______________________________________________
ex.ac <- unique(data[bal>0 & min<=0, AccountNumber])
data <- data[!which(AccountNumber %in% ex.ac)]
#_______________________________________________
#
# Exclude accounts who ever had min greater than balance with positive balance
#_______________________________________________
ex.ac <- unique(data[bal>0 & bal<min, AccountNumber])
data <- data[!which(AccountNumber %in% ex.ac)]
#_______________________________________________
#
# Exclude accounts closed or charged-off
#_______________________________________________
ex.ac <- unique(data[Status=="Closed" | Status=="Charged-off" | cl==0, AccountNumber])
data <- data[!AccountNumber %in% ex.ac]
#_______________________________________________
#
# Exclude accounts with trancated transaction data without Closed status
# (so we exclude accounts with empty repayment on a day different than 2014M12,
# i.e, those accounts that do not have data until the last month in the data period)
#_______________________________________________
dd <- data[is.na(lag_amt_pay), .(AccountNumber, Cycle, bal)]
ex.ac <- unique(dd[Cycle!="2014-12-01", AccountNumber])
data <- data[!which(AccountNumber %in% ex.ac)]
#_______________________________________________
#
# new_ac dummy
#----------------------
data[,new_ac:=ifelse(OpenDate>="2013-01-01", 1, 0)]
nrow(data[new_ac==1])
#_______________________________________________
#
# Total purchase
#_______________________________________________
data[,amt_purchase:=
amt_mcc0+amt_mcc1+amt_mcc2+amt_mcc3+amt_mcc4+amt_mcc5+amt_mcc6+amt_mcc7+amt_mcc8+amt_mcc9+
amt_mcc10+amt_mcc11+amt_mcc12+amt_mcc13+amt_mcc14+amt_mcc15+amt_mcc16+amt_mcc17+amt_mcc18+amt_mcc19+
amt_mcc20+amt_mcc21+amt_mcc22+amt_mcc23+amt_mcc24+amt_mcc25+amt_mcc26+amt_mcc27]
#_______________________________________________
#
# Exclude accounts with balance transfer
#_______________________________________________
data[,theo_bal:=beg + amt_purchase+
amt_fc_purchase + amt_blank + amt_fee_overlim +
amt_fee_late + amt_fee_forecur +
amt_fc_ca + amt_ca + amt_fee_ca + amt_fc_btcc + amt_fee_btcc +
amt_fee_NFS + amt_fee_others + amt_fee_ma + amt_pay]
ex.ac1 <- unique(data[abs(bal-theo_bal)>=10, AccountNumber])
# Accounts with blank transactions
ex.ac2 <- unique(data[amt_blank!=0, AccountNumber])
ex.ac <- unique(c(ex.ac1, ex.ac2))
data[,BT_ac:=ifelse(AccountNumber %in% ex.ac, 1, 0)]
data[,theo_bal:=NULL]
data <- data[BT_ac==0]
#_______________________________________________
#
# Accounts who ever had zero mer_APR
#_______________________________________________
zero.apr <- unique(data[mer_APR==0, AccountNumber])
data[,zero_APR_ac:=ifelse(AccountNumber %in% zero.apr, 1, 0)]
length(unique(data[new_ac==1 & zero_APR_ac==1, AccountNumber])) #186819
#_______________________________________________
#
# Cycle month number
#_______________________________________________
data <- data[order(AccountNumber, CyclePeriod)]
data[, month:=seq(1, nrow(.SD), by=1), by=AccountNumber]
#_______________________________________________
#
# Exclude last month for each account
#_______________________________________________
unique(data[num_cycles==month, lag_amt_pay])
nrow(data[num_cycles!=month & is.na(amt_pay)])
data <- data[!is.na(lag_amt_pay)]
#_______________________________________________
#
# Exclude new accounts with (1st cycle-OpenDate)>30
# (i.e., excluding accounts for which we do not observe the first months of transactions)
#_______________________________________________
data[,OpenDate:=as.Date(OpenDate, "%Y-%m-%d")]
data[,Cycle:=as.Date(Cycle, "%Y-%m-%d")]
dat <- data[month==1]
dat[,diff_open_cycle:=Cycle-OpenDate]
ex.ac <- dat[new_ac==1 & diff_open_cycle>30, AccountNumber]
data <- data[!AccountNumber %in% ex.ac]
#_______________________________________________
#
# Purchase category
#_______________________________________________
data[,num_purchase:=num_mcc0 + num_mcc1 + num_mcc2 + num_mcc3 + num_mcc4 + num_mcc5 + num_mcc6 + num_mcc7 + num_mcc8 + num_mcc9 + num_mcc10 +
num_mcc11 + num_mcc12 + num_mcc13 + num_mcc14 + num_mcc15 + num_mcc16 + num_mcc17 + num_mcc18 + num_mcc19 + num_mcc20 +
num_mcc21 + num_mcc22 + num_mcc23 + num_mcc24 + num_mcc25 + num_mcc26 + num_mcc27]
data[,purchase_category:=ifelse(amt_purchase<=0, 200,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc0, 0,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc1, 1,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc2, 2,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc3, 3,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc4, 4,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc5, 5,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc6, 6,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc7, 7,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc8, 8,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc9, 9,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc10, 10,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc11, 11,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc12, 12,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc13, 13,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc14, 14,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc15, 15,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc16, 16,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc17, 17,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc18, 18,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc19, 19,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc20, 20,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc21, 21,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc22, 22,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc23, 23,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc24, 24,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc25, 25,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc26, 26,
ifelse(amt_purchase>0 & amt_purchase==amt_mcc27, 27, 100)))))))))))))))))))))))))))))]
data <- data[purchase_category!=200] #200 is no purchase
data[,num_cate:=ifelse(purchase_category==0, num_mcc0,
ifelse(purchase_category==1, num_mcc1,
ifelse(purchase_category==2, num_mcc2,
ifelse(purchase_category==3, num_mcc3,
ifelse(purchase_category==4, num_mcc4,
ifelse(purchase_category==5, num_mcc5,
ifelse(purchase_category==6, num_mcc6,
ifelse(purchase_category==7, num_mcc7,
ifelse(purchase_category==8, num_mcc8,
ifelse(purchase_category==9, num_mcc9,
ifelse(purchase_category==10, num_mcc10,
ifelse(purchase_category==11, num_mcc11,
ifelse(purchase_category==12, num_mcc12,
ifelse(purchase_category==13, num_mcc13,
ifelse(purchase_category==14, num_mcc14,
ifelse(purchase_category==15, num_mcc15,
ifelse(purchase_category==16, num_mcc16,
ifelse(purchase_category==17, num_mcc17,
ifelse(purchase_category==18, num_mcc18,
ifelse(purchase_category==19, num_mcc19,
ifelse(purchase_category==20, num_mcc20,
ifelse(purchase_category==21, num_mcc21,
ifelse(purchase_category==22, num_mcc22,
ifelse(purchase_category==23, num_mcc23,
ifelse(purchase_category==24, num_mcc24,
ifelse(purchase_category==25, num_mcc25,
ifelse(purchase_category==26, num_mcc26,
ifelse(purchase_category==27, num_mcc27,
ifelse(purchase_category==100, num_purchase, NA)))))))))))))))))))))))))))))]
data <- data[num_cate==num_purchase]
#_______________________________________________
#
# Purchase category
#_______________________________________________
data[, cate:=
ifelse(purchase_category==0, "N/A",
ifelse(purchase_category==1, "Airlines",
ifelse(purchase_category==2, "Auto Rental",
ifelse(purchase_category==3, "Hotel/Motel",
ifelse(purchase_category==4, "Restaurants/Bars",
ifelse(purchase_category==5, "Travel Agencies",
ifelse(purchase_category==6, "Other Transportation",
ifelse(purchase_category==7, "Department Stores",
ifelse(purchase_category==8, "Discount Stores",
ifelse(purchase_category==9, "Clothing Stores",
ifelse(purchase_category==10, "Hardware Stores",
ifelse(purchase_category==11, "Drug Stores",
ifelse(purchase_category==12, "Gas Stations",
ifelse(purchase_category==13, "Mail Orders",
ifelse(purchase_category==14, "Food Stores",
ifelse(purchase_category==15, "Vehicles",
ifelse(purchase_category==16, "Interior Furnishing Stores",
ifelse(purchase_category==17, "Electric Appliance Stores",
ifelse(purchase_category==18, "Sporting Goods/Toy Stores",
ifelse(purchase_category==19, "Other Retail",
ifelse(purchase_category==20, "Health Care",
ifelse(purchase_category==21, "Recreation",
ifelse(purchase_category==22, "Education",
ifelse(purchase_category==23, "Utilities",
ifelse(purchase_category==24, "Professional Services",
ifelse(purchase_category==25, "Repair Shops",
ifelse(purchase_category==26, "Other Services",
ifelse(purchase_category==27, "Quasi Cash",
ifelse(purchase_category==100,"Mix",
NA)))))))))) )))))))))) )))))))))]
data[,purchase_category_name:=as.factor(cate)]
data[,purchase_category_name:=relevel(purchase_category_name, ref="Airlines")]
#_______________________________________________
#
# Non-DD observations
#_______________________________________________
data[,DD_flag:=ifelse(lag_amt_pay_DD<0, 1, 0)]
data <- data[DD_flag==0]
#_______________________________________________
#
# Beginning balance 0, purchase amount>5, and positive balance
#_______________________________________________
data <- data[beg==0 & amt_ca==0 & lag_amt_pay<=0 & amt_purchase>5 & bal>0 & min>0]
#_______________________________________________
#
# Utilisation
#_______________________________________________
data[, utilisation:=bal/cl]
#_______________________________________________
#
# Repayment_purchase ratio
#_______________________________________________
data[,RP_ratio:=(-lag_amt_pay)/amt_purchase]
#_______________________________________________
#
# Charge-off rate
#_______________________________________________
data[,charge_off_rate:=Cyc_Xxx_UnitChargeOffRate]
#_______________________________________________
#
# Preliminary data cleaned
#_______________________________________________
data <- data[abs(bal-amt_purchase)<.001]
data[,obs_month:=month]
d <- data[,.(AccountNumber, CustomerNumber, obs_month, OpenDate, mer_APR, cash_APR, charge_off_rate, utilisation, cl, beg, bal, min, amt_purchase, num_purchase,
lag_amt_pay, purchase_category, purchase_category_name,
RP_ratio, CyclePeriod, new_ac, zero_APR_ac,
amt_mcc0, amt_mcc1, amt_mcc2, amt_mcc3, amt_mcc4, amt_mcc5, amt_mcc6, amt_mcc7, amt_mcc8, amt_mcc9,
amt_mcc10, amt_mcc11, amt_mcc12, amt_mcc13, amt_mcc14, amt_mcc15, amt_mcc16, amt_mcc17, amt_mcc18, amt_mcc19,
amt_mcc20, amt_mcc21, amt_mcc22, amt_mcc23, amt_mcc24, amt_mcc25, amt_mcc26, amt_mcc27,
num_mcc0, num_mcc1, num_mcc2, num_mcc3, num_mcc4, num_mcc5, num_mcc6, num_mcc7, num_mcc8, num_mcc9,
num_mcc10, num_mcc11, num_mcc12, num_mcc13, num_mcc14, num_mcc15, num_mcc16, num_mcc17, num_mcc18, num_mcc19,
num_mcc20, num_mcc21, num_mcc22, num_mcc23, num_mcc24, num_mcc25, num_mcc26, num_mcc27)]
output_file <- file.path(wd, "data_for_mental_accounting_20170530.csv")
write.table(d, file=output_file, sep=',', row.names=F, col.names=T)
|
424d17fd26a98bfbc83f0bda1ae1ca5e19206f6f
|
c8f5a54afc75dc2a628c7a784703211b7d2b424e
|
/clustering/uprefs-dt.R
|
94243242d2378cdcf86ebedddfc9fb1d97a35086
|
[] |
no_license
|
pearsonca/montreal-digest
|
8b4391b252d871c5ec5dcdccc1185c22243ff9c4
|
692274dafa28e514999afe4f314822a462b69385
|
refs/heads/master
| 2020-12-14T14:41:00.263537
| 2016-08-07T14:23:02
| 2016-08-07T14:23:02
| 51,030,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
uprefs-dt.R
|
## take clustering computations and make a user preferences dt
rm(list=ls())
args <- commandArgs(trailingOnly = T)
# args <- c("input/digest/filter/detail_input.rds", "input/digest/clustering/locrefs.rds", "input/digest/clustering/userrefs.rds")
require(data.table)
srcs <- lapply(args, readRDS)
names(srcs) <- c("censor.dt","loccluster.dt","userref.dt")
saveRDS(with(srcs,{
intermediate <- censor.dt[
user_id %in% userref.dt$user_id, list(visits = .N), by=list(user_id, location_id)
][,
list(location_id, pref=visits/sum(visits)), keyby=user_id
][,
list(user_id, pref), keyby=location_id
]
intermediate[
loccluster.dt[
location_id %in% unique(intermediate$location_id),
list(lifetime_cat, pwr_clust, vMFcluster), keyby=location_id
]
][,
list(lifetime_cat, pwr_clust, vMFcluster, pref),
keyby=user_id
]
}), pipe("cat","wb"))
|
f471d8d589b045ecb5fe3b22b01d43938b42fdd0
|
a8e88daba53369ffd3acb502fd71cd38a4eaf5ca
|
/src/RScript/hw5.r
|
17715df28685041842f1919816d3345919a850ba
|
[] |
no_license
|
Younhong/DataScience
|
dce1e271539605924fdd236cfc23abbcbaac9108
|
6ee05128b23c3253e5a05d25ffac0d7a4ea3874c
|
refs/heads/master
| 2023-01-29T20:38:09.480954
| 2020-12-11T10:54:03
| 2020-12-11T10:54:03
| 258,997,531
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,389
|
r
|
hw5.r
|
load('hw5_student.RData')
library(ROCR)
calcRMSE <- function(label, estimation){
return(sqrt(mean((label - estimation) ** 2)))
}
calcR2 <- function(label, estimation){
RSS = sum((label - estimation) ** 2)
SStot = sum((label - mean(label)) ** 2)
return(1-RSS/SStot)
}
getRecall <- function(i) {
conf.credit.train <- table(pred=credit_train$pred>i, actual=credit_train$default.payment.next.month)
conf.credit.train[2,2]/sum(conf.credit.train[,2])
}
calAUC <- function(predCol, targetCol){
perf <- performance(prediction(predCol, targetCol), 'auc')
as.numeric(perf@y.values)
}
# Part 1
## 1
student_model <- lm(G3 ~ ., student.train)
student_model
summary(student_model)
## 2
student.train$pred <- predict(student_model, newdata = student.train)
student.test.nolabel$pred <- predict(student_model, newdata = student.test.nolabel)
calcRMSE(student.train$G3, student.train$pred)
calcR2(student.train$G3, student.train$pred)
## 4
student_model <- lm(G3 ~ . + I(studytime^2)+I(health^2), student.train)
student.train$pred <- predict(student_model, newdata = student.train)
calcRMSE(student.train$G3, student.train$pred)
calcR2(student.train$G3, student.train$pred)
## 5
student_model <- lm(G3 ~ school + address + famsize + Pstatus + Medu + Fedu + Mjob + Fjob + reason
+ guardian + traveltime + studytime + failures + schoolsup + famsup + paid + nursery
+ higher + internet + romantic + famrel + freetime + goout + Dalc + Walc + health
+ absences + class, student.train)
student.train$pred <- predict(student_model, newdata = student.train)
calcRMSE(student.train$G3, student.train$pred)
calcR2(student.train$G3, student.train$pred)
## 6
student_model <- lm(G3 ~ . + I(studytime^2)+I(health^2), student.train)
student.train$pred <- predict(student_model, newdata = student.train)
student.test.nolabel$pred <- predict(student_model, newdata = student.test.nolabel)
pred_grade_test <- student.test.nolabel$pred
save(pred_grade_test, file="st21400022.RData")
# 2
## 1
fmla <- "default.payment.next.month~."
credit_model <- glm(fmla, data=credit_train, family = binomial(link='logit'))
credit_model
summary(credit_model)
coefficients(credit_model)
credit_train$pred <- predict(credit_model, newdata = credit_train, type="response")
credit_test$pred <- predict(credit_model, newdata = credit_test, type="response")
## 2
calAUC(credit_train$pred, credit_train$default.payment.next.month)
## 3
threshold <- 0.5
conf.credit.train <- table(pred=credit_train$pred>threshold, actual=credit_train$default.payment.next.month)
credit_train_prec <- conf.credit.train[2,2]/sum(conf.credit.train[2,])
credit_train_prec
credit_train_rec <- conf.credit.train[2,2]/sum(conf.credit.train[,2])
credit_train_rec
credit_train_acc <- sum(diag(conf.credit.train)) / sum(conf.credit.train)
credit_train_acc
thres_list <- seq(0.01, 0.50, 0.01)
rec_list <- sapply(thres_list, getRecall)
plot(x=thres_list, rec_list, xlab="Threshold", ylab="Recall", type="l")
thres_list <- 0.01
conf.credit.train <- table(pred=credit_train$pred>threshold, actual=credit_train$default.payment.next.month)
credit_train_prec <- conf.credit.train[2,2]/sum(conf.credit.train[2,])
credit_train_prec
credit_train_rec <- conf.credit.train[2,2]/sum(conf.credit.train[,2])
credit_train_rec
credit_train_acc <- sum(diag(conf.credit.train)) / sum(conf.credit.train)
credit_train_acc
## 4
fmla <- "default.payment.next.month~.+I(AGE^2)+I(EDUCATION^2)+I(MARRIAGE^2)+I(PAY_AMT2^2)+I(PAY_AMT3^2)"
credit_model <- glm(fmla, data=credit_train, family = binomial(link='logit'))
credit_train$pred <- predict(credit_model, newdata = credit_train, type="response")
credit_test$pred <- predict(credit_model, newdata = credit_test, type="response")
calAUC(credit_train$pred, credit_train$default.payment.next.month)
conf.credit.train <- table(pred=credit_train$pred>threshold, actual=credit_train$default.payment.next.month)
credit_train_prec <- conf.credit.train[2,2]/sum(conf.credit.train[2,])
credit_train_prec
credit_train_rec <- conf.credit.train[2,2]/sum(conf.credit.train[,2])
credit_train_rec
credit_train_acc <- sum(diag(conf.credit.train)) / sum(conf.credit.train)
credit_train_acc
## 5
prob_default_test <- credit_test$pred
pred_default_test <- credit_test$pred > threshold
save(prob_default_test, pred_grade_test, pred_default_test, file="st21400022.RData")
|
ff8d8c07aba3eedde3dfa983d750f706218acf35
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ecoval/examples/msk.hydrol.2011.create.Rd.R
|
4bd26548234aaa5ed3912e741742ab6fe47e6b3a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
msk.hydrol.2011.create.Rd.R
|
library(ecoval)
### Name: msk.hydrol.2011.create
### Title: Creates a Value Function for River Hydrology
### Aliases: msk.hydrol.2011.create
### ** Examples
hydrol <- msk.hydrol.2011.create()
plot(hydrol)
hydrol.german <- msk.hydrol.2011.create("Deutsch")
plot(hydrol.german)
|
3fa203792e918e0b97f5469b8dfe5e7a63d03719
|
f5071dae06aa2e84a6514b3642b6890405bdf38c
|
/Code/Ejercicio6/Tukey.R
|
c60f6c6e7defd47e68081885559d1c1d876f3343
|
[] |
no_license
|
CristianPachacama/DisenioExperimental
|
2be6075550474a39eea9a24898672912fe45a706
|
9b4b937a4234c1b46ba2f1e844342b3e69feda70
|
refs/heads/master
| 2020-04-12T09:16:52.522685
| 2018-12-19T07:36:18
| 2018-12-19T07:36:18
| 162,397,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 285
|
r
|
Tukey.R
|
source("Code/Ejercicio6/Anovas.R",local = TRUE)
p_valor=Anv$`Pr(>F)`[1]
if(p_valor<0.05){
verif = "rechaza"
tuk = TukeyHSD(x=aov(modelo), 'Parte')
tuk2 = TukeyHSD(x=aov(modelo), 'Operador')
tuk3 = TukeyHSD(x=aov(modelo), 'Ensayo')
}else{
verif = "no_rechaza"
}
# return(tuk)
|
4bf48eb648ccf8d008a6e8e10fb761ff94e94f2f
|
6f3c9d73dc7bb0e22a702cd6856dc550c105998e
|
/hybrid_paper/illustration_params.R
|
ee0e734bb0bc7585e86015ec4bd2cdf4ed49bb91
|
[] |
no_license
|
kapelner/GreedyExperimentalDesign
|
00862ce598d6b5676fc8c88dbbeeb8b214394751
|
565db6345e18d29c228a1ae5ec66abf7241b8581
|
refs/heads/master
| 2023-07-25T01:48:05.242114
| 2023-07-11T04:46:36
| 2023-07-11T04:46:36
| 29,105,366
| 1
| 2
| null | 2020-03-13T18:55:02
| 2015-01-11T20:42:45
|
R
|
UTF-8
|
R
| false
| false
| 225
|
r
|
illustration_params.R
|
model_codes_properly_ordered = c("Z", "L", "LsNL", "LNL", "NL", "HL", "HLNL", "HNL")
design_levels_properly_ordered = c("BCRD", "R", "G", "M", "MR", "MG")
manual_colors = c("black", "purple", "red", "green", "blue", "orange")
|
b807c0530ce7ea09b4ac2cd032cb6274dbb93e94
|
99f23e5d2f7fdff9e80f0ad774d835b4b6d0ce53
|
/ThesisScriptNouse.R
|
ce57db7f328761adb1a2c539298e1c731ad9f164
|
[] |
no_license
|
krishzinx/MastersThesis
|
da1577ca4b9f288b294dc7b760c0f345851109d0
|
aba0f840ee1aa910939031a8d639726fca3fc382
|
refs/heads/master
| 2020-07-03T17:42:38.673114
| 2019-08-12T19:04:23
| 2019-08-12T19:04:23
| 201,991,172
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 883
|
r
|
ThesisScriptNouse.R
|
MyData <- read.csv(file="C:/Users/krish/Downloads/DataCollection1.csv", header=TRUE, sep=",")
MyData
MyData1 <- read.csv(file="C:/Users/krish/Downloads/DataCollection2.csv", header=TRUE, sep=",")
polite.data <- politeness::politeness(MyData1$Comment, parser="spacy")
polite.holdout<- politeness::politeness(MyData1$Replies, parser="spacy")
project<-politenessProjection(polite.data,
$condition,
polite.holdout)
for (row in 1:nrow(MyData1)) {
Replies <- MyData1[row, "Replies"]
Reply_vector <- stri_split_lines(Replies, omit_empty = FALSE)
pol_score <- vector()
for reply in Reply_vector:
pol_score <- politeness::politenessProjection(polite.data,
phone_offers$condition,
reply)
print(pol_score)
print(mean(pol_score))
}
|
c16109a2129752cf75985cce98e670edda86b774
|
7797ee6832f469768dfaffbf63d7dae6bf3426ae
|
/R/EUAC.R
|
33454b7d35332c0422dfe58461247f24c58fd2d9
|
[] |
no_license
|
AmateurECE/economics
|
27dd67e0a04bdfd999d5e124d2114230ab518358
|
37109c8a8376627e840dc061b6fc802a64e8ff30
|
refs/heads/master
| 2020-05-02T23:39:26.440627
| 2020-02-04T19:02:35
| 2020-02-04T19:02:35
| 178,286,376
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
EUAC.R
|
EUAC <-
function(ic, sv, ac, i, yr) {
initialCost <- ic / annuityPByA(interestRate=i, maturity=yr)
salvageValue <- sv / annuityFByA(interestRate=i, maturity=yr)
(initialCost - salvageValue) + ac
}
|
76e9ce83ea3e235d4df61eabb615631072e1745f
|
d801bed8afe736bd3f8f616dc3043fbee58df1e7
|
/GOVHACK/Shiny Apps/disease/server.R
|
e36034e733477d7ebd29d51f61d1f29ac58789a7
|
[] |
no_license
|
cyckuan/2gather
|
bba3018c0c8e8091c40fb8d6007779faf0931980
|
8735dadbf908bd1c805e81bdbba7451e75b01069
|
refs/heads/master
| 2021-01-01T20:08:39.688478
| 2017-07-30T07:11:48
| 2017-07-30T07:11:48
| 98,775,479
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 494
|
r
|
server.R
|
library(shiny)
library(ggplot2)
# Define server logic required to draw a histogram
disease <- read.csv("data/disease.csv")
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
disease$date <- as.Date(disease$date)
ggplot(data=disease, aes(x=date, y=disease_score, group=1)) +
xlab("Date") + ylab("Local Disease Score") +
geom_line(colour="blue", size=1.5) +
geom_point(colour="blue", size=4, shape=21, fill="white")
})
})
|
4ead94ddd306115509bf9c94ae2d352b7b175cce
|
d237c1376cf0e418d9fe73506c8952460fe9d214
|
/Data/Tidying_Case_Study_1.R
|
78d473fcf92846277e312cbfb5560441952bb013
|
[] |
no_license
|
pefthimion/Case-Study-1
|
b9b9690feebf2c2489ce2cb2f56f7862c632916f
|
d2e42209aa54bfe9071d5039ba4dbb5ea3a05a10
|
refs/heads/master
| 2021-05-02T18:07:48.206553
| 2016-11-03T03:41:50
| 2016-11-03T03:41:50
| 72,381,808
| 0
| 0
| null | 2016-11-01T23:05:45
| 2016-10-30T23:25:11
|
R
|
UTF-8
|
R
| false
| false
| 1,680
|
r
|
Tidying_Case_Study_1.R
|
# Phillip Efthimion
# Intro to Data Science
# Case Study 1
# We will be cleaning data with this code
# First we will be making copies of the original downloaded files
# This is so we do not change the originals. We will not be using the originals anymore after this
GDP2 <- GDP
Edu2 <- Edu
# Remove blank rows and columns from the data of GDP2
# The first 4 rows that are all blank
GDPdata <- GDP2[5:330,]
# Remove empty column
GDPdata <- GDPdata[,1:5]
GDPdata <- GDPdata[,-3]
# Name the columns
names(GDPdata) <- c("CountryCode", "Ranking", "Table.Name", "GDP")
# Remove more NAs and empty spaces from the data
# This will also remove the listed countries that do not have a GDP or a ranking
GDPdata2<-subset(GDPdata, GDPdata$Ranking !="")
GDPdata2 <- GDPdata[!is.na(GDPdata$Ranking),]
GDPdata2<-subset(GDPdata, GDPdata$CountryCode !="")
GDPdata2 <- GDPdata[!is.na(GDPdata$CountryCode),]
GDPdata2<-subset(GDPdata, GDPdata$Table.Name !="")
GDPdata2 <- GDPdata[!is.na(GDPdata$Table.Name),]
GDPdata2<-subset(GDPdata, GDPdata$GDP !="")
GDPdata2 <- GDPdata[!is.na(GDPdata$GDP),]
GDPdata2<-subset(GDPdata, GDPdata$Ranking !="")
GDPdata2 <- GDPdata[!is.na(GDPdata$Ranking),]
GDPdata2<-GDPdata2[1:190,]
# Begin tidying Edu2 data set
# Remove excess columns that we will not be using in our analysis
Edu2<-Edu2[,-5:-29]
Edu2<-Edu2[,-4]
Edu2<-Edu2[,-5]
Edu2<-Edu2[,-2]
# Remove any NAs or blank spaces in the remaining columns
Edu2<-subset(Edu2, Edu2$Income.Group !="")
Edu2 <- Edu2[!is.na(Edu2$Income.Group),]
Edu2<-subset(Edu2, Edu2$CountryCode !="")
Edu2 <- Edu2[!is.na(Edu2$CountryCode),]
Edu2<-subset(Edu2, Edu2$Table.Name !="")
Edu2 <- Edu2[!is.na(Edu2$Table.Name),]
|
d190ccfca275c1a0bb0ed35e91ae1283522fa8b9
|
cbf97af589ec6bd7f342442e49bf54cf35294d01
|
/plot2.R
|
6458ab8776432b180a2c8506369cfdf0ff026578
|
[] |
no_license
|
Reynel6891/ExData_Plotting1
|
84cbfe6750cbc112370277e4dd84d27fc7446229
|
8539afce2ad975fe14e49c1e02dd0b4e61960d8b
|
refs/heads/master
| 2021-01-12T10:45:43.573337
| 2016-11-06T18:52:03
| 2016-11-06T18:52:03
| 72,680,310
| 0
| 0
| null | 2016-11-02T20:51:46
| 2016-11-02T20:51:45
| null |
UTF-8
|
R
| false
| false
| 436
|
r
|
plot2.R
|
## Script for Plot2
## in the working directory
> table1 <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
## code for plot2
> table_times <- strptime(paste(table_dates$Date,table_dates$Time,sep = " "), "%d/%m/%Y %H:%M:%S")
> png("Plot2.png",width= 480, height = 480, units= "px")
> plot(x = table_times, y = table_dates_active_power, type = "l", xlab="",ylab="Global Active Power (kilowatts)")
> dev.off()
|
75074f4d9c8b2c0c8071af14c952c158edc5721f
|
4babea068edd360a3b71cceca213e4322de9ecec
|
/pollutantmean.R
|
34c54ddeda9a819265bd5605c7a1c63b4746755d
|
[] |
no_license
|
joancardonasa/rprog-project-1
|
b4adfd9d06ab5bbb7429f5021cb0299e939e244d
|
f9e01e83264e799dfc0d3453185d690a487cd088
|
refs/heads/master
| 2016-09-10T11:08:05.294225
| 2015-09-04T15:36:09
| 2015-09-04T15:36:09
| 41,922,961
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 515
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332)
{
files <- list.files(path = directory, full.names = T) #puts all files in a vector
df <- data.frame() #generates empty dataframe
for (i in id) #only grabs chosen id's: the files are in order
{
df <- rbind(df, read.csv(files[i]))
}
if (pollutant == "sulfate")
{
mean(df$sulfate, na.rm = T)
}
else if (pollutant == "nitrate")
{
mean(df$nitrate, na.rm = T)
}
else
{
"The given pollutant is not correct"
}
}
|
a9e92b212158b412bd2efe31b0fca2cb766d2596
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query58_query27_1344/query58_query27_1344.R
|
9c929b91305f6ac50b8a64b5e47efae00e03aeed
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
query58_query27_1344.R
|
7e5db1db8531c5ba2a8c2a4fbcb9c09c query58_query27_1344.qdimacs 1805 3981
|
4c35100a3ad5240892f13d5ea8fdb60a911b7497
|
4ecc3c39c7c5b4d7d72c881381b3b2af6da50edb
|
/R/sintegral.R
|
5f89e1dc52eefb21b73f82db2c7ce1614dabc8c5
|
[] |
no_license
|
cran/Bolstad2
|
e1a089ef39275817172e106e80efb2dd913d6b1f
|
34d0854c3b8af01aac374d6911975ebecdb293b1
|
refs/heads/master
| 2022-04-30T06:26:54.804699
| 2022-04-11T08:22:32
| 2022-04-11T08:22:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,809
|
r
|
sintegral.R
|
#' Numerical integration using Simpson's Rule
#'
#' Takes a vector of \eqn{x} values and a corresponding set of postive
#' \eqn{f(x)=y} values and evaluates the area under the curve: \deqn{
#' \int{f(x)dx} }.
#'
#'
#' @param x a sequence of \eqn{x} values.
#' @param fx the value of the function to be integrated at \eqn{x}.
#' @param n.pts the number of points to be used in the integration.
#' @return returns a list with the following elements \item{x}{the x-values at
#' which the integral has been evaluated} \item{y}{the cummulative integral}
#' \item{int}{the value of the integral over the whole range}
#' @keywords misc
#' @examples
#'
#' ## integrate the normal density from -3 to 3
#' x=seq(-3,3,length=100)
#' fx=dnorm(x)
#' estimate=sintegral(x,fx)$int
#' true.val=diff(pnorm(c(-3,3)))
#' cat(paste('Absolute error :',round(abs(estimate-true.val),7),'\n'))
#' cat(paste('Relative percentage error :', 100*round((abs(estimate-true.val)/true.val),6),'%\n'))
#'
#' @export sintegral
sintegral = function(x, fx, n.pts = 256) {
## numerically integrates fx over x using Simpsons rule x - a sequence of x values fx - the
## value of the function to be integrated at x n.pts - the number of points to be used in
## the integration
n.x = length(x)
if (n.x != length(fx))
stop("Unequal input vector lengths")
if (n.pts < 64)
n.pts = 64
if (length(x) > n.pts)
n.pts = length(x)
## use linear approximation to get equally spaced x values
ap = approx(x, fx, n = 2 * n.pts + 1)
h = diff(ap$x)[1]
integral = h * (ap$y[2 * (1:n.pts) - 1] + 4 * ap$y[2 * (1:n.pts)] + ap$y[2 * (1:n.pts) + 1])/3
(list(x = ap$x[2 * (1:n.pts)], y = cumsum(integral), int = sum(integral)))
}
|
45b09500b4b075787ef2896b63c8acbfe4dfe4ce
|
f701867ae202db12d82e82083303ff3f926516ab
|
/server.R
|
7c2cbd31206996fab7ceaf38ec91c54f41724711
|
[] |
no_license
|
yuejuny/DataProduct
|
c78def60f4818118c3788b8cea71bc7ea0efff34
|
2d21766f4eec329a423280776d39cf27ba126e3d
|
refs/heads/master
| 2021-01-19T09:37:50.231212
| 2017-04-10T07:08:42
| 2017-04-10T07:08:42
| 87,774,356
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 663
|
r
|
server.R
|
shinyServer(function(input,output){
load("HeightWeight.Rdata")
library(ggplot2)
output$scatterPlot <- renderPlot({
id <- sample(DT$Index,size=input$percentage/100*25000,replace=FALSE)
df <- DT[id,]
ggplot(data=df,aes(Height,Weight))+geom_point(alpha=0.1,colour='blue')+
labs(x="Height, inch",y="Weight, pound")
})
output$text1 <- renderText({
paste("Height range:",input$HeightBand[1],"-",input$HeightBand[2],"inches.")
})
output$HeightHist <- renderPlot({
id2 <- DT$Height >= input$HeightBand[1]&
DT$Height <= input$HeightBand[2]
df2 <- DT[id2,]
ggplot(data=df2,aes(Weight))+geom_histogram()
})
})
|
60162aabf492ab9814342f367ec3c3b871378086
|
40ca43dd95d960135f518185dbc81a6afa997a68
|
/MachineLearning.R
|
11b53e9c11b1fb216f1ad69108f18fd01f3d8ee3
|
[] |
no_license
|
steph-d/Machine_Learning
|
a89319658b94e8b708ac1e893b683a2c71604051
|
49e31da6e14671fa52b7797f2f1f1c2f5359bcca
|
refs/heads/master
| 2021-01-10T03:46:05.103374
| 2015-09-27T23:32:59
| 2015-09-27T23:32:59
| 43,268,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,329
|
r
|
MachineLearning.R
|
setwd("D:/R/R-travail/MOOC R programming march2015/8-Machine Learning")
getwd()
library(caret)
library(kernlab)
data(spam)
inTrain <- createDataPartition(y=spam$type, p=0.75, list=FALSE) # to create a training and test dataset
training <- spam[inTrain,]
testing <- spam[-inTrain,]
dim(training)
# K-folds
set.seed(32323)
folds <- createFolds(y=spam$type, k=10, list=TRUE, returnTrain=TRUE)
sapply(folds,length)
folds[[1]][1:10]
# to return the test set
set.seed(32323)
folds <- createFolds(y=spam$type, k=10, list=TRUE, returnTrain=FALSE)
sapply(folds,length)
folds[[1]][1:10]
# resampling
set.seed(32323)
folds <- createResample(y=spam$type, times=10, list=TRUE)
sapply(folds,length)
folds[[1]][1:10]
# time slices
set.seed(32323)
tme <- 1:1000 # time vector
folds <- createTimeSlices(y=tme, initialWindow=20, horizon=10) # 20 samples per window, horizon=10 is to predict the next 10 samples after the initial window
names(folds)
folds$train[[1]]
folds$test[[1]]
# training options
library(caret)
library(kernlab)
data(spam)
inTrain <- createDataPartition(y=spam$type, p=0.75, list=FALSE) # to create a training and test dataset
training <- spam[inTrain,]
testing <- spam[-inTrain,]
modelFit <- train(type~.,data=training, method="glm")
args(train.default)
args(trainControl)
# Plotting predictors
library(ISLR)
library(ggplot2)
library(caret)
data(Wage)
summary(Wage)
# get training/test sets
inTrain <- createDataPartition(y=Wage$wage, p=0.7, list=FALSE)
training <- Wage[inTrain,]
testing <- Wage[-inTrain,]
dim(training); dim(testing)
featurePlot(x=training[,c("age","education","jobclass")], y=training$wage, plot="pairs")
qplot(age,wage,data=training)
qplot(age,wage,colour=jobclass,data=training)
qq <- qplot(age,wage,colour=education,data=training)
qq + geom_smooth(method="lm", formula=y~x)
library(Hmisc)
cutWage <- cut2(training$wage, g=3)
table(cutWage)
p1 <- qplot(cutWage, age, data=training, fill=cutWage, geom=c("boxplot"))
p1
p2 <- qplot(cutWage, age, data=training, fill=cutWage, geom=c("boxplot","jitter"))
library(gridExtra)
grid.arrange(p1,p2,ncol=2)
t1 <- table(cutWage, training$jobclass)
t1
prop.table(t1,1)
qplot(wage, colour=education, data=training, geom="density")
# basic preprocessing
library(caret)
library(kernlab)
data(spam)
inTrain <- createDataPartition(y=spam$type, p=0.75, list=FALSE) # to create a training and test dataset
training <- spam[inTrain,]
testing <- spam[-inTrain,]
hist(training$capitalAve, main="", xlab="ave. capital run length")
mean(training$capitalAve)
sd(training$capitalAve)
trainCapAve <- training$capitalAve
trainCapAveS <- (trainCapAve - mean(trainCapAve))/sd(trainCapAve)
mean(trainCapAveS)
sd(trainCapAveS)
# on the test set:
testCapAve <- testing$capitalAve
testCapAveS <- (testCapAve - mean(trainCapAve))/sd(trainCapAve)
mean(testCapAveS)
sd(testCapAveS)
# with preProcess:
preObj <- preProcess(training[,-58], method=c("center","scale"))
trainCapAveS <- predict(preObj, training[,-58])$capitalAve
mean(trainCapAveS)
sd(trainCapAveS)
# on the test set:
testCapAveS <- predict(preObj, testing[,-58])$capitalAve
mean(testCapAveS)
sd(testCapAveS)
# apply in train directly:
set.seed(32343)
modelFit <- train(type ~., data=training, preProcess=c("center","scale"), method="glm")
modelFit
# boxcox transformation
preObj <- preProcess(training[,-58], method=c("BoxCox"))
trainCapAveS <- predict(preObj, training[,-58])$capitalAve
par(mfrow=c(1,2))
hist(trainCapAveS)
qqnorm(trainCapAveS)
# NA
set.seed(13343)
# make some values NA
training$capAve <- training$capitalAve
selectNA <- rbinom(dim(training)[1], size=1, prob=0.05)==1
training$capAve[selectNA] <- NA
# impute and standardize
preObj <- preProcess(training[,-58], method="knnImpute")
capAve <- predict(preObj, training[,-58])$capAve
# standardize true values
capAveTruth <- training$capitalAve
capAveTruth <- (capAveTruth-mean(capAveTruth))/sd(capAveTruth)
# to see the difference between real and umpute values:
quantile(capAve - capAveTruth)
quantile((capAve-capAveTruth)[selectNA])
quantile((capAve-capAveTruth)[!selectNA])
# covariate creation
library(ISLR)
library(caret)
data(Wage)
inTrain <- createDataPartition(y=Wage$wage, p=0.7, list=FALSE)
training <- Wage[inTrain,]
testing <- Wage[-inTrain,]
table(training$jobclass)
dummies <- dummyVars(wage ~ jobclass, data=training)
head(predict(dummies, newdata=training))
nsv <- nearZeroVar(training, saveMetrics=TRUE)
nsv
library(splines)
bsBasis <- bs(training$age, df=3) # 3rd degree polynomial
head(bsBasis)
lm1 <- lm(wage~bsBasis, data=training)
par(mfrow=c(1,1))
plot(training$age, training$wage, pch=19, cex=0.5)
points(training$age, predict(lm1, newdata=training), col="red", pch=19, cex=0.5)
head(predict(bsBasis, age=testing$age))
# PCA preProcess
library(caret)
library(kernlab)
data(spam)
inTrain <- createDataPartition(y=spam$type, p=0.75, list=FALSE) # to create a training and test dataset
training <- spam[inTrain,]
testing <- spam[-inTrain,]
M <- abs(cor(training[,-58])) # 58= spam/ham
diag(M) <- 0 # diag=cor(x1 by x1)=1 so not interesting. So put = 0
which(M > 0.8, arr.ind=T)
names(spam)[c(34,32)]
plot(spam[,34], spam[,32])
X <- 0.71*training$num415 + 0.71*training$num857
Y <- 0.71*training$num415 - 0.71*training$num857
plot(X,Y)
smallSpam <- spam[,c(34,32)] # just num857 and num415
prComp <- prcomp(smallSpam)
plot(prComp$x[,1],prComp$x[,2])
prComp$rotation
typeColor <- ((spam$type=="spam")*1+1) # give 1 or 2, to have black or red
prComp <- prcomp(log10(spam[,-58]+1))
plot(prComp$x[,1], prComp$x[,2], col=typeColor,xlab="PC1",ylab="PC2")
# PCA in caret
preProc <- preProcess(log10(spam[,-58]+1), method="pca", pcaComp=2)
spamPC <- predict(preProc, log10(spam[,-58]+1))
plot(spamPC[,1], spamPC[,2], col=typeColor)
# on training set
preProc <- preProcess(log10(training[,-58]+1), method="pca", pcaComp=2)
trainPC <- predict(preProc, log10(training[,-58]+1))
modelFit <- train(training$type ~ ., method="glm", data=trainPC)
testPC <- predict(preProc, log10(testing[,-58]+1))
confusionMatrix(testing$type, predict(modelFit, testPC))
# alternative
modelFit <- train(training$type ~ ., method="glm", preProcess="pca", data=training)
confusionMatrix(testing$type, predict(modelFit, testing))
# predicting with regression
library(caret)
data(faithful)
set.seed(333)
inTrain <- createDataPartition(y=faithful$waiting, p=0.5, list=FALSE)
trainFaith <- faithful[inTrain,]
testFaith <- faithful[-inTrain,]
head(trainFaith)
plot(trainFaith$waiting, trainFaith$eruptions, pch=19, col="blue", xlab="Waiting",ylab="Duration")
lm1 <- lm(eruptions ~ waiting, data=trainFaith)
summary(lm1)
lines(trainFaith$waiting, lm1$fitted, lwd=3)
coef(lm1)[1] + coef(lm1)[2]*80
newdata <- data.frame(waiting=80)
predict(lm1, newdata)
par(mfrow=c(1,2))
plot(trainFaith$waiting, trainFaith$eruptions, pch=19, col="blue", xlab="Waiting",ylab="Duration")
lines(trainFaith$waiting, lm1$fitted, lwd=3)
plot(testFaith$waiting, testFaith$eruptions, pch=19, col="green", xlab="Waiting",ylab="Duration")
lines(testFaith$waiting, predict(lm1, newdata=testFaith), lwd=3)
par(mfrow=c(1,1))
sqrt(sum((lm1$fitted-trainFaith$eruptions)^2)) # calculate RMSE on training
sqrt(sum((predict(lm1,newdata=testFaith)-testFaith$eruptions)^2)) # calculate RMSE on test
pred1 <- predict(lm1, newdata=testFaith, interval="prediction") # we want an interval
ord <- order(testFaith$waiting) # ordering the values of test set
plot(testFaith$waiting, testFaith$eruptions, pch=19, col="blue")
matlines(testFaith$waiting[ord],pred1[ord,],type="l",col=c(1,2,2),lty=c(1,1,1),lwd=3)
modFit <- train(eruptions ~waiting, data=trainFaith, method="lm")
summary(modFit$finalModel)
# Predicting with regression Multiple Covariates
library(ISLR)
library(ggplot2)
library(caret)
data(Wage)
Wage <- subset(Wage, select=-c(logwage))
summary(Wage)
inTrain <- createDataPartition(y=Wage$wage, p=0.7, list=FALSE)
training <- Wage[inTrain,]
testing <- Wage[-inTrain,]
dim(training)
dim(testing)
featurePlot(x=training[,c("age","education","jobclass")],y=training$wage, plot="pairs")
qplot(age,wage,data=training)
qplot(age,wage,colour=jobclass, data=training)
qplot(age,wage,colour=education, data=training)
modFit <- train(wage~age+jobclass+education, method="lm", data=training)
finMod <- modFit$finalModel
print(modFit)
plot(finMod,1,pch=19,cex=0.5,col="#00000010")
qplot(finMod$fitted, finMod$residuals,colour=race,data=training)
plot(finMod$residuals,pch=19)
pred <- predict(modFit, testing)
qplot(wage,pred,colour=year,data=testing)
modFitAll <- train(wage~., method="lm", data=training)
pred <- predict(modFitAll, testing)
qplot(wage,pred,data=testing)
#-----------QUIZZZZZZ ----------------------
library(AppliedPredictiveModeling)
library(caret)
d=data(AlzheimerDisease)
names(d)
library(AppliedPredictiveModeling)
data(concrete)
head(mixtures)
library(caret)
set.seed(1000)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
summary(concrete)
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
data <- training[,grep("^IL", names(training))]
names(data)
preProc <- preProcess(data, method="pca", thresh=0.91)
names(data2)
data2 <- cbind(training$diagnosis,data)
names(data2)[1] <- "diagnosis"
model1 <- train(diagnosis ~ ., method="glm", data=data2)
confusionMatrix(testing$diagnosis, predict(model1, testing))
# Acc modelnon PCA = 0.6463
preProc <- preProcess(data2[,-1], method="pca", thresh=0.81)
trainPC <- predict(preProc, data2[,-1])
model2 <- train(diagnosis~., method="glm", data=trainPC)
datatest <- testing[,grep("^IL", names(testing))]
datatest <- cbind(testing$diagnosis,datatest)
names(datatest)[1] <- "diagnosis"
testPC <- predict(preProc, datatest[,-1])
confusionMatrix(testing$diagnosis, predict(model2, testPC))
names(testing)
data(iris)
library(ggplot2)
names(iris)
table(iris$Species)
# training / test sets
inTrain <- createDataPartition(y=iris$Species, p=0.7, list=FALSE)
training <- iris[inTrain,]
testing <- iris[-inTrain,]
dim(training); dim(testing)
qplot(Petal.Width, Sepal.Width, colour=Species, data=training)
library(rpart)
modFit <- train(Species ~ ., method="rpart", data=training)
print(modFit$finalModel)
par(mar=c(2,2,2,2))
plot(modFit$finalModel, uniform=TRUE, main="Classification Tree")
text(modFit$finalModel, use.n=TRUE, all=TRUE, cex=0.5)
library(rattle)
fancyRpartPlot(modFit$finalModel)
predict(modFit, newdata=testing)
# Bagging
library(ElemStatLearn)
data(ozone)
ozone <- ozone[order(ozone$ozone),]
head(ozone)
ll <- matrix(NA, nrow=10, ncol=155)
for(i in 1:10){
ss <- sample(1:dim(ozone)[1],replace=TRUE)
ozone0 <- ozone[ss,]
ozone0 <- ozone0[order(ozone0$ozone),]
loess0 <- loess(temperature ~ ozone, data=ozone0, span=0.2)
ll[i,] <- predict(loess0, newdata=data.frame(ozone=1:155))
}
plot(ozone$ozone, ozone$temperature, pch=19, cex=0.5)
for(i in 1:10){
lines(1:155, ll[i,],col="grey", lwd=2)
}
lines(1:155, apply(ll,2,mean),col="red",lwd=2)
predictors <- data.frame(ozone=ozone$ozone)
temperature <- ozone$temperature
treebag <- bag(predictors, temperature, B=10, bagControl = bagControl(fit=ctreeBag$fit, predict=ctreeBag$pred, aggregate=ctreeBag$aggregate))
plot(ozone$ozone, temperature, col="lightgrey", pch=19)
points(ozone$ozone, predict(treebag$fits[[1]]$fit,predictors),pch=19,col="red")
points(ozone$ozone, predict(treebag,predictors),pch=19,col="blue")
ctreeBag$fit
ctreeBag$pred
ctreeBag$aggregate
# random forests
data(iris)
library(ggplot2)
inTrain <- createDataPartition(y=iris$Species, p=0.7, list=FALSE)
training <- iris[inTrain,]
testing <- iris[-inTrain,]
modFit <- train(Species~., data=training,method="rf",prox=TRUE)
modFit
getTree(modFit$finalModel, k=2)
irisP <- classCenter(training[,c(3,4)], training$Species, modFit$finalModel$prox)
irisP <- as.data.frame(irisP)
irisP$Species <- rownames(irisP)
p <- qplot(Petal.Width, Petal.Length, col=Species, data=training)
p + geom_point(aes(x=Petal.Width, y=Petal.Length, col=Species),size=5,shape=4,data=irisP)
pred <- predict(modFit, testing)
testing$predRight <- pred==testing$Species
table(pred,testing$Species)
qplot(Petal.Width, Petal.Length, colour=predRight, data=testing, main="newdata predictions")
# boosting
library(ISLR)
data(Wage)
library(ggplot2)
library(caret)
Wage <- subset(Wage, select=-c(logwage))
inTrain <- createDataPartition(y=Wage$wage,p=0.7,list=FALSE)
training <- Wage[inTrain,]
testing <- Wage[-inTrain,]
modFit <- train(wage~., method="gbm",data=training,verbose=FALSE)
print(modFit)
qplot(predict(modFit,testing),wage,data=testing)
# model based prediction
data(iris)
library(ggplot2)
names(iris)
table(iris$Species)
inTrain <- createDataPartition(y=iris$Species, p=0.7, list=FALSE)
training <- iris[inTrain,]
testing <- iris[-inTrain,]
modlda <- train(Species ~ ., data=training, method="lda")
modnb <- train(Species ~ ., data=training, method="nb")
plda <- predict(modlda,testing)
pnb <- predict(modnb, testing)
table(plda,pnb)
equalPredictions <- (plda==pnb)
qplot(Petal.Width, Sepal.Width, colour=equalPredictions, data=testing)
# ----------------- QUIZZZZ 3 -------------------
library(AppliedPredictiveModeling)
data(segmentationOriginal)
library(caret)
?segmentationOriginal
training <- subset(segmentationOriginal, segmentationOriginal$Case == "Train")
testing <- subset(segmentationOriginal, segmentationOriginal$Case == "Test")
set.seed(125)
library(rpart)
mod1 <- train(Class~., method="rpart", data=training)
print(mod1$finalModel)
plot(mod1$finalModel, uniform=TRUE)
text(mod1$finalModel, use.n=TRUE, all=TRUE, cex=0.5)
# Answer 1: Total=23000 => PS / Total=50000+fiber=10 => WS / Total = 57000 + Fiber=8 ==> PS / Fiber => not possible
# Answer 2: K small: more bias, less variance
# The bias is larger and the variance is smaller. Under leave one out cross validation K is equal to the sample size.
library(pgmm)
data(olive)
olive = olive[,-1]
head(olive)
mod2 <- train(Area~., method="rpart", data=olive)
print(mod2$finalModel)
plot(mod2$finalModel, uniform=TRUE)
text(mod2$finalModel, use.n=TRUE, all=TRUE, cex=0.5)
predict(mod2, newdata= as.data.frame(t(colMeans(olive))))
# Answer 3: 2.783. It is strange because Area should be a qualitative variable - but tree is reporting the average value of Area as a numeric variable in the leaf predicted for newdata
library(ElemStatLearn)
data(SAheart)
set.seed(8484)
train = sample(1:dim(SAheart)[1],size=dim(SAheart)[1]/2,replace=F)
trainSA = SAheart[train,]
testSA = SAheart[-train,]
set.seed(13234)
mod4 <- glm(chd~age+alcohol+obesity+tobacco+typea+ldl, family="binomial", data=trainSA)
summary(mod4)
missClass = function(values,prediction){sum(((prediction > 0.5)*1) != values)/length(values)}
missClass(trainSA$chd, predict(mod4, newdata=trainSA, type="response"))
missClass(testSA$chd, predict(mod4, newdata=testSA, type="response"))
# Answer 4: train=0.27 and test=0.31
library(ElemStatLearn)
data(vowel.train)
data(vowel.test)
head(vowel.test)
vowel.train$y <- as.factor(vowel.train$y)
vowel.test$y <- as.factor(vowel.test$y)
set.seed(33833)
mod5 <- train(y~., data=vowel.train, method="rf", prox=TRUE)
varImp(mod5)
|
4f55c1c64805e107e89411a7d7ed8af19b181142
|
6a8d92fd37f50e71f0067cc04b7aa1620f152305
|
/R/filterOut.R
|
f3a8f4c01401b3f9ba3311a79e3cfbed1a90fc01
|
[
"MIT"
] |
permissive
|
jtheorell/flowSpecs
|
81c119cc345fe35d7ef46a2f154f6945e2a941b4
|
d18e16b3e48f705cc895b3a56ca1ba0bff0b7749
|
refs/heads/master
| 2023-04-13T18:03:59.902738
| 2023-04-05T14:05:59
| 2023-04-05T14:05:59
| 208,816,662
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
filterOut.R
|
# Select events based on filter
#
#
# @param flowObj The fcs object to be subsetted. Both flowFrames and flowSets
# are accepted.
# @param filterName The name of the filter variable in the flow object to be
# used for the subsetting.
# @param withinFilter Should the events within or outsitde of the filter be
# selected?
# @return The flowObj, now smaller, as it only contains the data within,
# or outside of the filterName filter.
#' @importFrom flowCore fsApply
filterOut <- function(flowObj, filterName, withinFilter = TRUE) {
if (withinFilter) {
gateVal <- 1
} else {
gateVal <- 0
}
if (inherits(flowObj, "flowSet")) {
resultObj <- fsApply(flowObj, function(x) {
return(x[which(exprs(x[, filterName])[, 1] == gateVal), ])
})
} else if (inherits(flowObj, "flowFrame")) {
resultObj <- flowObj[which(exprs(flowObj[, filterName])[, 1] ==
gateVal), ]
} else {
stop("The flowObj needs to be either a flowSet or a flowFrame")
}
return(resultObj)
}
|
8531fac716088264d86e4ec75bbad9fef13ed01a
|
7d9e74e8449aa58a1ea9c06f77e075748b1a6d98
|
/R/pal_lancet_custom.R
|
f7f7878aa7926b594ad5a171273f5a24ea502d93
|
[] |
no_license
|
kuppal2/xmsPANDA
|
7bb7ca6f7aacb7d95a559fdabef12690189c6e47
|
3f90abe24892e82f99dcc290b41e860f3354ca1a
|
refs/heads/master
| 2023-05-05T21:01:38.557988
| 2021-05-14T22:17:15
| 2021-05-14T22:17:15
| 268,897,144
| 1
| 9
| null | 2021-03-02T07:24:22
| 2020-06-02T20:04:48
|
R
|
UTF-8
|
R
| false
| false
| 679
|
r
|
pal_lancet_custom.R
|
pal_lancet_custom <-
function (palette = c("lanonc"), alpha = 1)
{
palette = match.arg(palette)
if (alpha > 1L | alpha <= 0L)
stop("alpha must be in (0, 1]")
raw_cols = c(
"CongressBlue" = "#00468B", "Red" = "#ED0000",
"Apple" = "#42B540", "BondiBlue" = "#0099B4",
"TrendyPink" = "#925E9F", "MonaLisa" = "#FDAF91",
"Carmine" = "#AD002A", "Edward" = "#ADB6B6",
"CodGray" = "#1B1919"
)
raw_cols_rgb = col2rgb(raw_cols)
alpha_cols = rgb(raw_cols_rgb[1L, ], raw_cols_rgb[2L, ],
raw_cols_rgb[3L, ], alpha = alpha * 255L, names = names(raw_cols),
maxColorValue = 255L)
manual_pal(unname(alpha_cols))
}
|
c61faf12cdd91ae5ce680eabbb54b27dd8ed791f
|
2bb7ad3c5739373511421b4476eb2b0955e73b85
|
/plot4.R
|
e87749c409289faf8246d54223ab325b857b19cd
|
[] |
no_license
|
ShannonHolgate/ExData_Plotting1
|
267071ad866abbe95cda848659faa91759ab2eef
|
59a4359d4591006abfde9eee366f074004afa0b4
|
refs/heads/master
| 2021-01-19T23:41:33.885246
| 2015-04-09T20:31:31
| 2015-04-09T20:31:31
| 33,690,317
| 0
| 0
| null | 2015-04-09T20:12:55
| 2015-04-09T20:12:55
| null |
UTF-8
|
R
| false
| false
| 945
|
r
|
plot4.R
|
## Retreive data setup functions
source('plot_data_setup.R')
## Setup the initial data
setup_files()
chart_data <- setup_data()
## Build up the final set of plots to show relationship
png(filename="plot4.png", width = 480, height = 480)
par(mfrow=c(2,2))
plot(Global_active_power ~ Date_time, chart_data, type="l", ylab="Global Active Power (Kilowatts)", xlab="")
plot(Voltage ~ Date_time, chart_data, type="l", ylab="Voltage", xlab="datetime")
plot(Sub_metering_1 ~ Date_time,chart_data, type="n", ylab="Energy sub metering", xlab="")
points(Sub_metering_1 ~ Date_time,chart_data, type="l", col="black")
points(Sub_metering_2 ~ Date_time,chart_data, type="l", col="red")
points(Sub_metering_3 ~ Date_time,chart_data, type="l", col="blue")
legend("topright",lty=1, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),col=c("black", "red", "blue") )
plot(Global_reactive_power ~ Date_time, chart_data, type="l")
dev.off()
|
5b8da558b982beb37659503e07bf3bb694edee9d
|
f81ac43a1d02013a9cb9eebc2a7d92da4cae9169
|
/man/criteria.Rd
|
acc87d16a5de4949c530172cb081e83e4de9c8e2
|
[] |
no_license
|
gdemin/expss
|
67d7df59bd4dad2287f49403741840598e01f4a6
|
668d7bace676b555cb34d5e0d633fad516c0f19b
|
refs/heads/master
| 2023-08-31T03:27:40.220828
| 2023-07-16T21:41:53
| 2023-07-16T21:41:53
| 31,271,628
| 83
| 15
| null | 2022-11-02T18:53:17
| 2015-02-24T17:16:42
|
R
|
UTF-8
|
R
| false
| true
| 6,908
|
rd
|
criteria.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/criteria_functions.R
\name{criteria}
\alias{criteria}
\alias{as.criterion}
\alias{is.criterion}
\alias{equals}
\alias{eq}
\alias{not_equals}
\alias{ne}
\alias{neq}
\alias{less}
\alias{lt}
\alias{less_or_equal}
\alias{le}
\alias{lte}
\alias{greater}
\alias{gt}
\alias{greater_or_equal}
\alias{ge}
\alias{gte}
\alias{thru}
\alias{\%thru\%}
\alias{when}
\alias{is_max}
\alias{is_min}
\alias{contains}
\alias{like}
\alias{fixed}
\alias{perl}
\alias{regex}
\alias{has_label}
\alias{from}
\alias{to}
\alias{items}
\alias{not_na}
\alias{is_na}
\alias{other}
\alias{and}
\alias{or}
\alias{not}
\title{Criteria functions}
\usage{
as.criterion(crit)
is.criterion(x)
equals(x)
not_equals(x)
less(x)
less_or_equal(x)
greater(x)
greater_or_equal(x)
thru(lower, upper)
lower \%thru\% upper
when(x)
is_max(x)
is_min(x)
contains(
pattern,
ignore.case = FALSE,
perl = FALSE,
fixed = TRUE,
useBytes = FALSE
)
like(pattern)
fixed(
pattern,
ignore.case = FALSE,
perl = FALSE,
fixed = TRUE,
useBytes = FALSE
)
perl(
pattern,
ignore.case = FALSE,
perl = TRUE,
fixed = FALSE,
useBytes = FALSE
)
regex(
pattern,
ignore.case = FALSE,
perl = FALSE,
fixed = FALSE,
useBytes = FALSE
)
has_label(x)
from(x)
to(x)
items(...)
not_na(x)
is_na(x)
other(x)
and(...)
or(...)
not(x)
}
\arguments{
\item{crit}{vector of values/function which returns logical or logical vector. It will be
converted to function of class criterion.}
\item{x}{vector}
\item{lower}{vector/single value - lower bound of interval}
\item{upper}{vector/single value - upper bound of interval}
\item{pattern}{character string containing a regular expression (or character
string for \code{'fixed'}) to be matched in the given character vector.
Coerced by as.character to a character string if possible.}
\item{ignore.case}{logical see \link[base:grep]{grepl}}
\item{perl}{logical see \link[base:grep]{grepl}}
\item{fixed}{logical see \link[base:grep]{grepl}}
\item{useBytes}{logical see \link[base:grep]{grepl}}
\item{...}{numeric indexes of desired items for items, logical vectors or criteria for boolean functions.}
}
\value{
function of class 'criterion' which tests its argument against
condition and return logical value
}
\description{
Produce criteria which could be used in the different situations - see
'\link{recode}', '\link{na_if}', '\link{count_if}', '\link{match_row}',
'\link{\%i\%}' and etc. For example, \code{'greater(5)'} returns function
which tests whether its argument greater than five. \code{'fixed("apple")'}
returns function which tests whether its argument contains "apple". For
criteria logical operations (|, &, !, xor) are defined, e. g. you can write
something like: \code{'greater(5) | equals(1)'}.
List of functions:
\itemize{
\item{comparison criteria - \code{'equals'}, \code{'greater'} and etc.}{ return
functions which compare its argument against value.}
\item{\code{'thru'}}{ checks whether a value is inside interval.
\code{'thru(0,1)'} is equivalent to \code{'x>=0 & x<=1'}}
\item{\code{'\%thru\%'}}{ is infix version of \code{'thru'}, e. g. \code{'0
\%thru\% 1'}}
\item{\code{'is_max'} and \code{'is_min'}}{ return TRUE where vector value is
equals to maximum or minimum.}
\item{\code{'contains'}}{ searches for the pattern in the strings. By default,
it works with fixed patterns rather than regular expressions. For details
about its arguments see \link[base:grep]{grepl}}
\item{\code{'like'}}{ searches for the Excel-style pattern in the strings. You
can use wildcards: '*' means any number of symbols, '?' means single symbol.
Case insensitive.}
\item{\code{'fixed'}}{ alias for contains.}
\item{\code{'perl'}}{ such as \code{'contains'} but the pattern is perl-compatible
regular expression (\code{'perl = TRUE'}). For details see \link[base:grep]{grepl}}
\item{\code{'regex'}}{ use POSIX 1003.2 extended regular expressions
(\code{'fixed = FALSE'}). For details see \link[base:grep]{grepl}}
\item{\code{'has_label'}}{ searches values which have supplied label(-s). We
can used criteria as an argument for 'has_label'.}
\item{\code{'to'}}{ returns function which gives TRUE for all elements of
vector before the first occurrence of \code{'x'} and for \code{'x'}.}
\item{\code{'from'}}{ returns function which gives TRUE for all elements of
vector after the first occurrence of \code{'x'} and for \code{'x'}.}
\item{\code{'not_na'}}{ returns TRUE for all non-NA vector elements.}
\item{\code{'other'}}{ returns TRUE for all vector elements. It is intended
for usage with \code{'recode'}.}
\item{\code{'items'}}{ returns TRUE for the vector elements with the given
sequential numbers.}
\item{\code{'and'}, \code{'or'}, \code{'not'}}{ are spreadsheet-style boolean functions.}
}
Shortcuts for comparison criteria:
\itemize{
\item{'equals'}{ - \code{'eq'}}
\item{'not_equals'}{ - \code{'neq'}, \code{'ne'}}
\item{'greater'}{ - \code{'gt'}}
\item{'greater_or_equal'}{ - \code{'gte'}, \code{'ge'}}
\item{'less'}{ - \code{'lt'}}
\item{'less_or_equal'}{ - \code{'lte'}, \code{'le'}}
}
}
\examples{
# operations on vector, '\%d\%' means 'diff'
1:6 \%d\% greater(4) # 1:4
1:6 \%d\% (1 | greater(4)) # 2:4
# '\%i\%' means 'intersect
1:6 \%i\% (is_min() | is_max()) # 1, 6
# with Excel-style boolean operators
1:6 \%i\% or(is_min(), is_max()) # 1, 6
letters \%i\% (contains("a") | contains("z")) # a, z
letters \%i\% perl("a|z") # a, z
letters \%i\% from("w") # w, x, y, z
letters \%i\% to("c") # a, b, c
letters \%i\% (from("b") & to("e")) # b, d, e
c(1, 2, NA, 3) \%i\% not_na() # c(1, 2, 3)
# examples with count_if
df1 = data.frame(
a=c("apples", "oranges", "peaches", "apples"),
b = c(32, 54, 75, 86)
)
count_if(greater(55), df1$b) # greater than 55 = 2
count_if(not_equals(75), df1$b) # not equals 75 = 3
count_if(greater(32) & less(86), df1$b) # greater than 32 and less than 86 = 2
count_if(and(greater(32), less(86)), df1$b) # the same result
# infix version
count_if(35 \%thru\% 80, df1$b) # greater than or equals to 35 and less than or equals to 80 = 2
# values that started on 'a'
count_if(like("a*"), df1) # 2
# the same with Perl-style regular expression
count_if(perl("^a"), df1) # 2
# count_row_if
count_row_if(perl("^a"), df1) # c(1,0,0,1)
# examples with 'n_intersect' and 'n_diff'
data(iris)
iris \%>\% n_intersect(to("Petal.Width")) # all columns up to 'Species'
# 'Sepal.Length', 'Sepal.Width' will be left
iris \%>\% n_diff(from("Petal.Length"))
# except first column
iris \%n_d\% items(1)
# 'recode' examples
qvar = c(1:20, 97, NA, NA)
recode(qvar, 1 \%thru\% 5 ~ 1, 6 \%thru\% 10 ~ 2, 11 \%thru\% hi ~ 3, other ~ 0)
# the same result
recode(qvar, 1 \%thru\% 5 ~ 1, 6 \%thru\% 10 ~ 2, greater_or_equal(11) ~ 3, other ~ 0)
}
\seealso{
\link{recode}, \link{count_if},
\link{match_row}, \link{na_if}, \link{\%i\%}
}
|
89b4f298fa1fa27fd18e6ed1f0248f612f94d94c
|
ee35c0bffcbf2a03b16bf6de8b144e8a60e0fba4
|
/parser.R
|
0fd1a6edf5f35c6ae0b6dbb06fa6e9d8a7cb848a
|
[] |
no_license
|
panios/VR_BPM
|
8be3c2ac47255c27602604b8788287d7d233fabd
|
060d88601e7b4bbc5648829b0a2df2bbc16778a5
|
refs/heads/master
| 2022-11-11T16:20:17.577815
| 2020-07-01T20:33:31
| 2020-07-01T20:33:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,908
|
r
|
parser.R
|
library(plyr)
library(readr)
library(dplyr)
library(tidyverse)
files <- list.files("data_BMP/raw",full.names=TRUE)
tbl <- sapply(files, read_csv,col_name =c("time", "value"), simplify=FALSE) %>%
bind_rows(.id = "id")
tbl$id<-stringr::str_replace(tbl$id, "data_BMP/raw/", "")
#Conver timezone
attr(tbl$time, "tzone") <- "Europe/Helsinki"
#Relative time
#tbl<- tbl %>% group_by(id) %>% mutate(counter = row_number())
tbl<- tbl %>% group_by(id) %>% mutate(counter = row_number())
# library(data.table)
# tbl <- data.table(tbl)
# tbl[, counter := seq_len(.N), by = id]
# tbl[, counter := rowid(id)]
#Levels
tbl$id = factor(tbl$id, levels = c('user4', 'user5', "user6", "user7",
"user8", "user9",'user10', 'user11', "user12", "user13",
"user14", "user15","user16", "user17","user18","user19", "user20",
"user28", "user29",
"user30", "user31","user32", "user33","user34","user35", "user36"),
labels = c(4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,28,29,30,31,32,33,34,35,36))
#Metadata
library(readr)
meta_data <- read_tsv("meta_data.csv", col_names = TRUE)
meta_data$Start_time<- strptime(meta_data$Start_time, format="%m.%d.%Y %H.%M.%S")
meta_data$End_time<- strptime(meta_data$End_time, format="%m.%d.%Y %H.%M.%S")
# Plot time series with vertical lines
user4<- tbl %>% filter(id == "4")
#start 2020-10-02 02:52:56
meta_data[ ,1:3]
user4 %>% filter(time == "2020-2-10 14:52:56")
user4 %>% filter(time == "2020-2-10 14:58:58")
# ggplot(data=user4, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(626,988),colour="#BB0000")
# Plot time series with vertical lines
user5<- tbl %>% filter(id == "5")
meta_data[ ,1:3]
user5 %>% filter(time == "2020-2-10 15:16:45")
user5 %>% filter(time == "2020-2-10 15:19:06")
# plot(user5$counter, user5$value, type='l')
# abline(v=c(448,589), lwd=2, col='red')
# ggplot(data=user5, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(448,589),colour="#BB0000")
# Plot time series with vertical lines
user6<- tbl %>% filter(id == "6")
meta_data[ ,1:3]
user6 %>% filter(time == "2020-2-10 15:37:34")
user6 %>% filter(time == "2020-2-10 15:40:22")
# plot(user6$counter, user6$value, type='l')
# abline(v=c(455,623), lwd=2, col='red')
# ggplot(data=user6, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(455,623),colour="#BB0000")
# Plot time series with vertical lines
user7<- tbl %>% filter(id == "7")
meta_data[ ,1:3]
user7 %>% filter(time == "2020-2-10 15:57:48")
user7 %>% filter(time == "2020-2-10 15:59:10")
# plot(user7$counter, user7$value, type='l')
# abline(v=c(455,623), lwd=2, col='red')
# ggplot(data=user7, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(448,530),colour="#BB0000")
# Plot time series with vertical lines
user8<- tbl %>% filter(id == "8")
meta_data[ ,1:3]
user8 %>% filter(time == "2020-2-10 16:23:57")
user8 %>% filter(time == "2020-2-10 16:25:49")
# plot(user8$counter, user8$value, type='l')
# abline(v=c(552,664), lwd=2, col='red')
#
# ggplot(data=user8, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(552,664),colour="#BB0000")
# Plot time series with vertical lines
user9<- tbl %>% filter(id == "9")
meta_data[ ,1:3]
user9 %>% filter(time == "2020-2-10 16:47:32")
user9 %>% filter(time == "2020-2-10 16:49:04")
# plot(user9$counter, user9$value, type='l')
# abline(v=c(598,690), lwd=2, col='red')
#
# ggplot(data=user9, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(598,690),colour="#BB0000")
# Plot time series with vertical lines
user10<- tbl %>% filter(id == "10")
meta_data[ ,1:3]
user10 %>% filter(time == "2020-2-11 12:26:54")
user10 %>% filter(time == "2020-2-11 12:28:35")
# plot(user10$counter, user10$value, type='l')
# abline(v=c(607,708), lwd=2, col='red')
# ggplot(data=user10, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(607,708),colour="#BB0000")
# Plot time series with vertical lines
user11<- tbl %>% filter(id == "11")
meta_data[ ,1:3]
user11 %>% filter(time == "2020-2-11 12:52:13")
user11 %>% filter(time == "2020-2-11 12:56:16")
# plot(user11$counter, user11$value, type='l')
# abline(v=c(532,775), lwd=2, col='red')
# ggplot(data=user11, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(532,775),colour="#BB0000")
# Plot time series with vertical lines
user12<- tbl %>% filter(id == "12")
meta_data[ ,1:3]
user12 %>% filter(time == "2020-2-11 13:20:44")
user12 %>% filter(time == "2020-2-11 13:22:35")
# plot(user12$counter, user12$value, type='l')
# abline(v=c(574,685), lwd=2, col='red')
# ggplot(data=user12, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(574,685),colour="#BB0000")
# Plot time series with vertical lines
user13<- tbl %>% filter(id == "13")
meta_data[ ,1:3]
user13 %>% filter(time == "2020-2-11 13:43:46")
user13 %>% filter(time == "2020-2-11 13:46:49")
# plot(user13$counter, user13$value, type='l')
# abline(v=c(556,739), lwd=2, col='red')
#
# ggplot(data=user13, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(556,739),colour="#BB0000")
# Plot time series with vertical lines
user14<- tbl %>% filter(id == "14")
meta_data[ ,1:3]
user14 %>% filter(time == "2020-2-11 14:07:32")
user14 %>% filter(time == "2020-2-11 14:09:20")
# plot(user14$counter, user14$value, type='l')
# abline(v=c(528,636), lwd=2, col='red')
# ggplot(data=user14, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(528,636),colour="#BB0000")
# Plot time series with vertical lines
user15<- tbl %>% filter(id == "15")
meta_data[ ,1:3]
user15 %>% filter(time == "2020-2-11 14:47:32")
user15 %>% filter(time == "2020-2-11 14:48:45")
# plot(user15$counter, user15$value, type='l')
# abline(v=c(660,733), lwd=2, col='red')
# ggplot(data=user15, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(660,733),colour="#BB0000")
# Plot time series with vertical lines
user16<- tbl %>% filter(id == "16")
meta_data[ ,1:3]
user16 %>% filter(time == "2020-2-11 15:11:40")
user16 %>% filter(time == "2020-2-11 15:14:41")
# plot(user16$counter, user16$value, type='l')
# abline(v=c(607,788), lwd=2, col='red')
#
# ggplot(data=user16, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(607,788),colour="#BB0000")
# Plot time series with vertical lines
user17<- tbl %>% filter(id == "17")
meta_data[ ,1:3]
user17 %>% filter(time == "2020-2-11 15:40:15")
user17 %>% filter(time == "2020-2-11 15:44:43")
# plot(user17$counter, user17$value, type='l')
# abline(v=c(626,894), lwd=2, col='red')
#
# # Plot time series with vertical lines
user18<- tbl %>% filter(id == "18")
meta_data[ ,1:3]
user18 %>% filter(time == "2020-2-12 14:38:18")
user18 %>% filter(time == "2020-2-12 14:44:47")
# plot(user18$counter, user18$value, type='l')
# abline(v=c(569,958), lwd=2, col='red')
user19<- tbl %>% filter(id == "19")
meta_data[ ,1:3]
user19 %>% filter(time == "2020-2-12 15:05:28")
user19 %>% filter(time == "2020-2-12 15:06:28")
# plot(user19$counter, user19$value, type='l')
# abline(v=c(415,475), lwd=2, col='red')
user20<- tbl %>% filter(id == "20")
meta_data[ ,1:3]
user20 %>% filter(time == "2020-2-12 15:42:09")
user20 %>% filter(time == "2020-2-12 15:51:27")
# plot(user20$counter, user20$value, type='l')
# abline(v=c(480,1038), lwd=2, col='red')
#
user28<- tbl %>% filter(id == "28")
meta_data[ 18:26,1:3]
user28 %>% filter(time == "2020-02-27 13:20:20")
user28 %>% filter(time == "2020-02-27 13:24:49")
#ggplot(data=user28, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(587,856),colour="#BB0000") + labs(x = "User20", y = "")
user29<- tbl %>% filter(id == "29")
meta_data[ 18:26,1:3]
user29 %>% filter(time == "2020-02-27 13:55:03")
user29 %>% filter(time == "2020-02-27 13:57:03")
#ggplot(data=user29, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(509,629),colour="#BB0000") + labs(x = "User20", y = "")
user30<- tbl %>% filter(id == "30")
meta_data[ 18:26,1:3]
user30 %>% filter(time == "2020-02-27 14:26:06")
user30 %>% filter(time == "2020-02-27 14:28:06")
ggplot(data=user30, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(610,730),colour="#BB0000") + labs(x = "User20", y = "")
user31<- tbl %>% filter(id == "31")
meta_data[ 18:26,1:3]
user31 %>% filter(time == " 2020-02-27 15:20:35")
user31 %>% filter(time == " 2020-02-27 15:21:35")
ggplot(data=user31, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(469,529),colour="#BB0000") + labs(x = "User20", y = "")
user32<- tbl %>% filter(id == "32")
meta_data[ 18:26,1:3]
user32 %>% filter(time == "2020-02-28 13:22:05")
user32 %>% filter(time == " 2020-02-28 13:23:16")
ggplot(data=user32, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(497,568),colour="#BB0000") + labs(x = "User20", y = "")
# user33<- tbl %>% filter(id == "33")
# meta_data[ 18:26,1:3]
# user33 %>% filter(time == "2020-02-28 14:15:50")
# user33 %>% filter(time == "2020-02-28 14:19:33")
# ggplot(data=user33, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(479,702),colour="#BB0000") + labs(x = "User20", y = "")
user34<- tbl %>% filter(id == "34")
meta_data[ 18:26,1:3]
user34 %>% filter(time == "2020-02-28 15:09:40")
user34 %>% filter(time == "2020-02-28 15:12:16")
ggplot(data=user34, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(428,584),colour="#BB0000") + labs(x = "User20", y = "")
user35<- tbl %>% filter(id == "35")
meta_data[ 18:26,1:3]
user35 %>% filter(time == "2020-03-03 12:41:00")
user35 %>% filter(time == "2020-03-03 12:42:08")
ggplot(data=user35, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(519,587),colour="#BB0000") + labs(x = "User20", y = "")
user36<- tbl %>% filter(id == "36")
meta_data[ 18:26,1:3]
user36 %>% filter(time == "2020-03-03 13:12:00")
user36 %>% filter(time == "2020-03-03 13:15:46")
ggplot(data=user36, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(420,646),colour="#BB0000") + labs(x = "User20", y = "")
#Multi Plot
library(gridExtra)
library(ggplot2)
p4<- ggplot(data=user4, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(626,988),colour="#BB0000") + labs(x = "User1", y = "")
p5 <- ggplot(data=user5, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(448,589),colour="#BB0000") + labs(x = "User2", y = "")
p6 <- ggplot(data=user6, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(455,623),colour="#BB0000") + labs(x = "User3", y = "")
p7 <- ggplot(data=user7, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(448,530),colour="#BB0000") + labs(x = "User4", y = "")
p8 <- ggplot(data=user8, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(552,664),colour="#BB0000") + labs(x = "User5", y = "")
p9 <- ggplot(data=user9, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(598,690),colour="#BB0000") + labs(x = "User6", y = "")
p10 <- ggplot(data=user10, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(607,708),colour="#BB0000") + labs(x = "User7", y = "")
p11 <- ggplot(data=user11, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(532,775),colour="#BB0000") + labs(x = "User8", y = "")
p12 <- ggplot(data=user12, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(574,685),colour="#BB0000") + labs(x = "User9", y = "")
p13<- ggplot(data=user13, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(556,739),colour="#BB0000") + labs(x = "User10", y = "")
p14 <- ggplot(data=user14, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(528,636),colour="#BB0000") + labs(x = "User11", y = "")
p15 <- ggplot(data=user15, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(660,733),colour="#BB0000") + labs(x = "User12", y = "")
p16 <- ggplot(data=user16, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(607,788),colour="#BB0000") + labs(x = "User13", y = "")
p17 <- ggplot(data=user17, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(626,894),colour="#BB0000") + labs(x = "User14", y = "")
p18 <- ggplot(data=user18, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(569,958),colour="#BB0000") + labs(x = "User15", y = "")
p19 <- ggplot(data=user19, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(415,475),colour="#BB0000") + labs(x = "User16", y = "")
p20 <- ggplot(data=user20, aes (x=counter, y=value)) + geom_line() + geom_vline(xintercept = c(480,1038),colour="#BB0000") + labs(x = "User17", y = "")
png("figures/Fig5.png", units="px", width=3000, height=1600, res=300)
grid.arrange(p5,p9,p12,p13,p15,p16,p17,p18,p19,p20, ncol=3,top="Control Group",
left="HR")
dev.off()
png("figures/Fig5_btm.png", units="px", width=3000, height=1600, res=300)
grid.arrange(p4,p7, p8,p14, p6,p10,p11, ncol=3,top="Experimental Group", bottom="Users",
left="HR")
dev.off()
#HR
tsuser4<- user4 %>% filter(counter %in% (626:988))
tsuser5<- user5 %>% filter(counter %in% (448:589))
tsuser6<- user6 %>% filter(counter %in% (455:623))
tsuser7<- user7 %>% filter(counter %in% (448:530))
tsuser8<- user8 %>% filter(counter %in% (552:664))
tsuser9<- user9 %>% filter(counter %in% (598:690))
tsuser10<- user10 %>% filter(counter %in% (607:708))
tsuser11<- user11 %>% filter(counter %in% (532:775))
tsuser12<- user12 %>% filter(counter %in% (574:685))
tsuser13<- user13 %>% filter(counter %in% (556:739))
tsuser14<- user14%>% filter(counter %in% (528:636))
tsuser15<- user15 %>% filter(counter %in% (660:733))
tsuser16<- user16 %>% filter(counter %in% (607:788))
tsuser17<- user17 %>% filter(counter %in% (626:894))
tsuser18<- user18 %>% filter(counter %in% (569:958))
tsuser19<- user19 %>% filter(counter %in% (415:475))
tsuser20<- user20 %>% filter(counter %in% (480:1038))
#Plot relative times of all users
library(ggplot2)
ggplot(tbl, aes(x = tbl$counter, y = tbl$value, colour = id)) +
geom_line() +
geom_smooth(method = "lm") +
facet_wrap( ~ id)
dev.copy(png,'./figures/all.png')
dev.off()
#Ploting min and max for all users
library(ggplot2)
ggplot(data = tbl, mapping = aes(x = id, y = value)) +
geom_boxplot()
dev.copy(png,'./figures/Box1.png')
dev.off()
ggplot(data = tbl, mapping = aes(x = id, y = value)) +
geom_boxplot(alpha = 0) +
geom_jitter(alpha = 0.1, color = "tomato")
dev.copy(png,'./figures/Box2.png')
dev.off()
# Violin basic
library(hrbrthemes)
library(viridis)
tbl %>%
ggplot( aes(x=id, y=value, fill=id)) +
geom_violin() +
scale_fill_viridis(discrete = TRUE, alpha=0.6, option="A") +
theme_ipsum() +
theme(
legend.position="none",
plot.title = element_text(size=11)
) +
ggtitle("Violin chart") +
xlab("")
dev.copy(png,'./figures/violin.png')
dev.off()
library(ggjoy)
bpm.min <- min(tbl$value, na.rm = T)
bpm.max <- max(tbl$value, na.rm = T)
breaks <- c(
bpm.min,
(bpm.min + 70) / 2,
(109 + 80) / 2,
(80 + 90) / 2,
(90 + 107) / 2,
bpm.max
)
ggplot(
tbl,
aes(x = value, y = id, height = ..density.., fill = ..x..)
) +
scale_fill_gradientn(
colors = c("royalblue", "royalblue", "green", "yellow", "orange", "red"),
breaks = breaks
) +
geom_joy_gradient(na.rm = TRUE, col = "grey70", scale = 1) +
theme_joy(font_size = 10) +
theme(
legend.position = "none"
)
dev.copy(png,'./figures/Hist1.png')
dev.off()
|
57f2c0d5b522e4c77c6ca04d729f9657f77e1192
|
bfdd25aee18dd0d22fce534c29217aae5111553a
|
/10_Capstone_Project_Next_Word_Prediction/Language Model/model.R
|
468b82dbe9106dcc777ca15ebaeb1c24e8ca13a7
|
[] |
no_license
|
nasimulhasan/Coursera_Data_Science_Specialization
|
508ae912b1de710ba0be9ecd598bc78cd955f702
|
7d89e770ea724a58c3e3852c8329bfd9da68e2b4
|
refs/heads/master
| 2020-03-18T08:43:51.630036
| 2016-09-25T15:14:13
| 2016-09-25T15:14:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
model.R
|
setwd("F:/Taufeeq/ML/Data Science/10. Capstone/Task0")
getwd()
# required library
library(data.table)
library(parallel)
library(tm)
library(stringr)
library(SnowballC) # Provides wordStem() for stemming
library(RWeka)
library(stylo)
library(markovchain)
dir()
options(mc.cores = 2)
# ***************************************************
#
# Building 4-gram model
gram.4 <- fread("4_gram.csv")
gram.4 <- data.frame(gram.4)
dim(gram.4)
colnames(gram.4)
View(gram.4)
train.gram.4 <- gram.4[gram.4$frequency >= 2, ]
dim(train.gram.4)
train.gram.4 <- as.vector(train.gram.4$term)
# create single token from quad token
train.token <- scan_tokenizer(train.gram.4)
head(train.token)
length(train.token)
train.token
save(train.token, file = "train.token.RData")
load("train.token.RData")
length(train.token)
# *******************************************************
twitter <- fread("F:/Taufeeq/ML/Data Science/10. Capstone/Dataset/final/gram_4/twitter.4.gram.csv")
twitter <- data.frame(twitter)
dim(twitter)
colnames(twitter)
twitter.v1 <- twitter[twitter$frequency >= 2, ]
dim(twitter.v1)
news <- fread("F:/Taufeeq/ML/Data Science/10. Capstone/Dataset/final/gram_4/news.4.gram.csv")
news <- data.frame(news)
dim(news)
colnames(news)
news.v1 <- news[news$frequency >= 2, ]
dim(news.v1)
blogs <- fread("F:/Taufeeq/ML/Data Science/10. Capstone/Dataset/final/gram_4/blogs.4.gram.csv")
blogs <- data.frame(blogs)
dim(blogs)
colnames(blogs)
blogs.v1 <- blogs[blogs$frequency > 2, ]
dim(blogs.v1)
gram.4 <- rbind(twitter.v1,
news.v1,
blogs.v1
)
dim(gram.4)
# ******************************************************
train.gram.4 <- as.vector(gram.4$term)
# create single token from quad token
train.token <- scan_tokenizer(train.gram.4)
head(train.token)
length(train.token)
# *******************************************************
#prediction model
model.fit <- markovchainFit(data = train.token,
method = "laplace",
laplacian = .01
)
# spliting the prediction text
x <- "I can't deal with unsymetrical things. I can't even hold an uneven number of bags of groceries in each "
prediction.text <- as.vector(unlist(strsplit(x, " ", fixed = TRUE)))
prediction <- "the"
prediction <- predict(model.fit$estimate,
newdata = prediction.text,
n.ahead = 1)
prediction
|
fce6fe42782c992bce90a193be2070bd3169d29a
|
77626deb51b6785f43241084a7c77aa158c00298
|
/R/prepareJSON.r
|
13afc28818deaaa3379208c41920e3aea2c9690e
|
[] |
no_license
|
cran/filterviewR
|
49b56f84ea69b0418eaf40c3178fa5dc1d2846c9
|
3cd48114b33d2400eb3b861f94e57a13f1da860d
|
refs/heads/master
| 2016-09-05T15:45:47.218523
| 2012-02-28T00:00:00
| 2012-02-28T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
prepareJSON.r
|
#' Returns a JSON object from a data frame.
#'
#' @param df input dataframe
#' @param cols numeric vector of columns to use
#' @param cnames string vector of column names
#' @export
#' @examples
#' library(rjson)
#' three_cluster <- data.frame(fromJSON(file = system.file("data2", "three_cluster.json", package="filterviewR")))
#' prepareJSON(three_cluster)
prepareJSON <- function(df, cols, cnames){
if(missing(cols)) cols <- 1:ncol(df)
if(missing(cnames)) cnames <- names(df)
df <- df[, cols, drop = FALSE]
names(df) <- cnames
toJSON(as.list(df))
}
|
3fecf6a4f4a9066b901e6d107c6c418ef3e54bd2
|
adc6d6ee825cbc93f0573d88c5222c2fffb6fa3f
|
/serend_pipeline.R
|
3ae8e343a4a43bc75e24dd2a7e4c6a4cd7b4b529
|
[] |
no_license
|
dschlauch/workspace
|
53dc2021ca8cd2fa59e7cbf6e3dfbf36175ae048
|
4788a672e50b02d701b6a2e73aae4a73f48c7133
|
refs/heads/master
| 2020-05-21T04:25:09.113078
| 2017-04-28T00:38:28
| 2017-04-28T00:38:28
| 43,386,682
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 782
|
r
|
serend_pipeline.R
|
yeastDir <- "~/gd/Harvard/Research/data/YeastData/"
motifs <- read.table(file.path(yeastDir,"YeastData_Motif.txt"), stringsAsFactors=F)
transFactors <- as.character(unique(motifs[,1]))
lapply(transFactors, function(tf){
write.table(cbind(motifs[motifs[,1]==tf,2:3], 100, 1000, "forward"), file=file.path("~/gd/Harvard/Research/SEREND/YeastCC/MotifScores", paste(tf,".txt",sep="")), quote=F, row.names=F, col.names=F, sep="\t")
})
write.table(transFactors, file=file.path("~/gd/Harvard/Research/SEREND/YeastCC/", "TFlist.txt"), quote=F, row.names=F, col.names=F, sep="\n")
directEvidence <- motifs
directEvidence[,3] <- '+-'
write.table(directEvidence, file=file.path("~/gd/Harvard/Research/SEREND/YeastCC/", "directEvidence.txt"), quote=F, row.names=F, col.names=F, sep="\t")
|
5cd9f1358ca17981c085a6c99f07b42f651a90bc
|
a69d03eb40a4b015eceb68fe173968006681c5ca
|
/cachematrix.R
|
16e29ca1dfab82c2d0bb289220b0f063d09f6897
|
[] |
no_license
|
dfelker11/ProgrammingAssignment2
|
ea97247fc1af7a0972c1bc0afcf4b1614ecdf1e6
|
7c37d3f30e4e2c22dd595a8f76b3924cf90a7832
|
refs/heads/master
| 2020-12-03T05:28:01.685901
| 2015-10-19T20:42:14
| 2015-10-19T20:42:14
| 44,559,472
| 0
| 0
| null | 2015-10-19T19:47:04
| 2015-10-19T19:47:03
| null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
cachematrix.R
|
## This code creates a function to calculate and cache the inverses of a matrix
## Creates a special matrix objset that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL # initialize
set <- function(y) { # new function where value is cached
x <<- y
inv <<- NULL
}
get <- function() x # get value of inverse
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv # pass the value of the function
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Computes the inverse of the special matrix returned by makeCacheMatrix.
## If the inverse has already been calculated, and has not changed, then it
## retrieves it from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
# if the inverse exists, return it
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
# if the inverse does not exist, calculat it and return it
mat.data <- x$get()
inv <- solve(mat.data,...)
x$setinverse(inv)
return(inv)
}
|
52afdbb0c74fdc765c8880234d177751325379d6
|
513e99e0b775fbe404234c15c06f01a61b5beca8
|
/man/encoding.Rd
|
80061a771c0cb7396c31b90e25e360003c1aac59
|
[] |
no_license
|
cran/tau
|
726f57c1ef88ec69674ae47a781edfbc5bd95768
|
9c6888a56d01979348e4228742ffaa0278362a5c
|
refs/heads/master
| 2021-08-04T05:58:45.982510
| 2021-07-21T10:07:16
| 2021-07-21T10:07:16
| 17,700,370
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,556
|
rd
|
encoding.Rd
|
\name{encoding}
\alias{is.utf8}
\alias{is.ascii}
\alias{is.locale}
\alias{translate}
\alias{fixEncoding}
\title{Adapt the (Declared) Encoding of a Character Vector}
\description{
Functions for testing and adapting the (declared) encoding
of the components of a vector of mode \code{character}.
}
\usage{
is.utf8(x)
is.ascii(x)
is.locale(x)
translate(x, recursive = FALSE, internal = FALSE)
fixEncoding(x, latin1 = FALSE)
}
\arguments{
\item{x}{a vector (of character).}
\item{recursive}{option to process list components.}
\item{internal}{option to use internal translation.}
\item{latin1}{option to assume \code{"latin1"} if the declared
encoding is \code{"unknown"}.}
}
\details{
\code{is.utf8} tests if the components of a vector of character
are true UTF-8 strings, i.e. contain one or more valid UTF-8
multi-byte sequence(s).
\code{is.locale} tests if the components of a vector of character
are in the encoding of the current locale.
\code{translate} encodes the components of a vector of \code{character}
in the encoding of the current locale. This includes the \code{names}
attribute of vectors of arbitrary mode. If \code{recursive = TRUE}
the components of a \code{list} are processed. If \code{internal = TRUE}
multi-byte sequences that are invalid in the encoding of the current
locale are changed to literal hex numbers (see FIXME).
\code{fixEncoding} sets the declared encoding of the components of
a vector of character to their correct or preferred values. If
\code{latin1 = TRUE} strings that are not valid UTF-8 strings are
declared to be in \code{"latin1"}. On the other hand, strings that
are true UTF-8 strings are declared to be in \code{"UTF-8"} encoding.
}
\value{
The same type of object as \code{x} with the (declared) encoding
possibly changed.
}
\references{FIXME PCRE, RFC 3629}
\author{Christian Buchta}
\note{
Currently \code{translate} uses \code{iconv} and therefore is not
guaranteed to work on all platforms.
}
\seealso{\code{\link{Encoding}} and \code{\link{iconv}}.}
\examples{
## Note that we assume R runs in an UTF-8 locale
text <- c("aa", "a\xe4")
Encoding(text) <- c("unknown", "latin1")
is.utf8(text)
is.ascii(text)
is.locale(text)
## implicit translation
text
##
t1 <- iconv(text, from = "latin1", to = "UTF-8")
Encoding(t1)
## oops
t2 <- iconv(text, from = "latin1", to = "utf-8")
Encoding(t2)
t2
is.locale(t2)
##
t2 <- fixEncoding(t2)
Encoding(t2)
## explicit translation
t3 <- translate(text)
Encoding(t3)
}
\keyword{utilities}
\keyword{character}
|
c131955fe3a234dbbb2631f6ffdc594f34c97607
|
08dd50d111221d8be31c9331812b41006b2e2e16
|
/Actividad.R
|
3ec94cb741f70629648d7cd236647d74b4a93d2f
|
[] |
no_license
|
gerardolml/Piramide-
|
e6f9d5c2eef7b41c7a0a7592af29d2adeaaff581
|
5bc788bbfa6a3306fbe680208558b894677dacb7
|
refs/heads/master
| 2023-01-27T20:39:49.871277
| 2020-12-09T00:56:17
| 2020-12-09T00:56:17
| 319,801,379
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,228
|
r
|
Actividad.R
|
library(readxl)
library(tidyverse)
library(magrittr)
Tasas<-function(estados,anio){
indicadores<-read_xlsx("Indicadores demográficos (1).xlsx")
edad<-c("00-05 años","06-11 años","12-14 años","15-17 años",
"17-19 años","20-24 años","25-29 años","30-49 años",
"50-64 años","64 años y mas")
RepublicaPoblacion<-indicadores %>% filter(AÑO==anio,
ENTIDAD=="República Mexicana") %>%
select(starts_with("P")) %>% t() %>%
as.data.frame()%>%cbind(edad)
colnames(RepublicaPoblacion)=c("Poblacion","Edades")
RepublicaDefuncion<-indicadores %>% filter(AÑO==anio,
ENTIDAD=="República Mexicana") %>%
select(starts_with("D")) %>% t() %>%
as.data.frame()%>%cbind(edad)
colnames(RepublicaDefuncion)=c("Defunciones","Edades")
Republica<-Reduce(merge,list(RepublicaPoblacion,RepublicaDefuncion))
Republica %<>%mutate(TasaEstandar=Defunciones/Poblacion*1000)
Tablas<-list()
Tasas<-data.frame()
for (i in 1:length(estados)) {
edo<-indicadores %>% filter(AÑO==anio,ENTIDAD==estados[i])
mujeres<-edo %>% select(starts_with("M")) %>%t() %>%
as.data.frame()%>%cbind(edad)
colnames(mujeres)=c("Mujeres","Edades")
hombres<-edo %>% select(starts_with("H")) %>%t() %>%
as.data.frame()%>%cbind(edad)
colnames(hombres)=c("Hombres","Edades")
poblacion<-edo %>% select(starts_with("P")) %>%t() %>%
as.data.frame()%>%cbind(edad)
colnames(poblacion)=c("Poblacion","Edades")
defunciones<-edo %>% select(starts_with("D")) %>%t() %>%
as.data.frame()%>%cbind(edad)
colnames(defunciones)=c("Defunciones","Edades")
Tablas[[estados[i]]]=Reduce(merge,
list(mujeres,hombres,poblacion,defunciones))
Tablas[[estados[i]]] %<>% mutate(Defunciones_Esperadas=
Poblacion*Republica$TasaEstandar/1000)
Tasabruta<-(sum(Tablas[[estados[i]]]$Defunciones)/sum(
Tablas[[estados[i]]]$Poblacion))*1000
FactordeAjuste=sum(Tablas[[estados[i]]]$Defunciones)/sum(
Tablas[[estados[i]]]$Defunciones_Esperadas)
TasaEstandarizada=1000*sum(Republica$Defunciones)/sum(Republica$Poblacion)*FactordeAjuste
Tasas %<>% rbind(c(Tasabruta,FactordeAjuste,TasaEstandarizada))
}
colnames(Tasas)=c("Tasa bruta","Factor de Ajuste","Tasa Estandarizada")
rownames(Tasas)=estados
Piramide<-list()
for (i in 1:length(estados)) {
ClasporSexo<-Tablas[[estados[i]]] %>% select(Edades,Mujeres,Hombres) %>%
pivot_longer(cols = c("Hombres","Mujeres"),
names_to = "Sexo",
values_to = "Poblacion por Sexo")
Piramide[[estados[i]]]=as.data.frame(ClasporSexo)
}
for (i in 1:length(estados)) {
x=Piramide[[estados[i]]]
plt <- ggplot(x, aes(x = `Edades`,
y = `Poblacion por Sexo`,
fill = Sexo))+
# Seccion de HOMBRES
geom_col(data = subset(x, Sexo == "Hombres") %>%
# Convertimos los datos de los Hombres en negativos
mutate(`Poblacion por Sexo` = -`Poblacion por Sexo`),
width = 0.5, fill = "blue") +
# Seccion de MUJERES
geom_col(data = subset(x, Sexo == "Mujeres"),
width = 0.5, fill = "pink") +
# Cambio de ejes de coordenadas
coord_flip() +
scale_y_continuous(
breaks = c(seq(-(max(x$`Poblacion por Sexo`)+10000),
-50000, by = max(x$`Poblacion por Sexo`)/5),
seq(0, max(x$`Poblacion por Sexo`)+10000,
by = max(x$`Poblacion por Sexo`)/5)),
labels = c(seq(-(max(x$`Poblacion por Sexo`)+10000), -50000,
by = max(x$`Poblacion por Sexo`)/5) * -1,
seq(0,max(x$`Poblacion por Sexo`)+10000,
by =max(x$`Poblacion por Sexo`)/5)))+
ggtitle(str_c(estados[i]," en el año ",anio))
print(plt)
}
Tasas %<>%t() %>% as.data.frame()
return(Tasas)
}
estado<-c("Jalisco","México","Sinaloa","Tlaxcala")
Tasas(estado,2017)
|
97ea44c080d5a29007658c1dbef98e75edd12cee
|
304e5b878bbe66d6bc44738ef96123eef9ae052f
|
/plot2.R
|
38c6247d6abdba4807f75bf471aad82d718d5729
|
[] |
no_license
|
P-Chevalier/ExData_Plotting1
|
699836b1a8d12bb307a094d6f5e474d5985a2113
|
6bb7eb2a7e49ecb792d0a3fa069109b551c34d4d
|
refs/heads/master
| 2020-12-02T15:04:52.704018
| 2015-07-10T15:52:10
| 2015-07-10T15:52:10
| 38,649,273
| 0
| 0
| null | 2015-07-06T22:13:21
| 2015-07-06T22:13:21
| null |
UTF-8
|
R
| false
| false
| 1,244
|
r
|
plot2.R
|
# plot2.R
# Usage: source plot2.R
# This script expects the data file household_power_consumption.txt to be in the
# current working directory.
# This script requires the data.table library.
# The data file should downloaded and then unzip'ed from the following location:
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# Load table package needed by plot2.R
library("data.table")
# Check to see if household_power_consumption.txt exists in the current working directory
if ((!file.exists("./household_power_consumption.txt"))) {
stop("household_power_consumption.txt file is missing - please download file to continue")
}
# Read in only the 1/2/2007 data
df<-fread("grep ^1\\/2\\/2007 household_power_consumption.txt")
# Read in only the 2/2/2007 data
df2<-fread("grep ^2\\/2\\/2007 household_power_consumption.txt")
# Combine both day's data together
alldf<-rbind(df,df2)
# Combine date and time and format to date_time
date_time<-strptime(paste(alldf$V1, alldf$V2, sep=" "),"%d/%m/%Y %H:%M:%S")
# Create the png file
png("plot2.png", width=480, height=480, units="px")
# Create the plot
plot(date_time, alldf$V3, type="l", xlab=NA, ylab="Global Active Power (kilowatts)")
dev.off()
|
a5599580adbd1dc546d90840c4d639cb6bfb510f
|
4324922f5ff6c667e912f42170a7a71ed448c6f8
|
/R/recommend.R
|
23ad97b98e53887c27f65523322df36855a93bcb
|
[] |
no_license
|
tera-insights/gtStats
|
6db22e2847bc723fb3d30e688921e3b26683c016
|
7b5ebcd74cb32abc9a2803d17dad5077de07af3a
|
refs/heads/master
| 2020-04-04T10:39:07.429581
| 2017-06-06T14:42:55
| 2017-06-06T14:42:55
| 25,887,912
| 1
| 1
| null | 2016-07-01T20:58:39
| 2014-10-28T20:08:22
|
R
|
UTF-8
|
R
| false
| false
| 1,349
|
r
|
recommend.R
|
Recommend <- function(data, ..., outputs = error) {
gby1 <- eval(call("GroupBy", Read(gs_streams), groupAtts = as.symbol("songid"),
substitute(Count(inputs = tsadded, outputs = Count1))))
gby2 <- eval(call("GroupBy", Read(gs_streams), groupAtts = as.symbol("userid"),
substitute(Count(inputs = tsadded, outputs = Count2))))
data <- eval(call("GroupBy", Read(gs_streams), groupAtts = substitute(c(userid, songid)),
substitute(Count(inputs = tsadded, outputs = Count3))))
if (missing(inputs))
inputs <- convert.schema(data$schema)
else
inputs <- substitute(inputs)
inputs <- convert.exprs(inputs)
outputs <- substitute(outputs)
check.atts(outputs)
outputs <- convert.atts(outputs)
if (length(outputs) != 1)
stop("There must be exactly one output specified.")
constructor <- RecommendMake(...)
agg <- Aggregate(data, constructor$GLA, inputs, outputs, states = list(gby1, gby2))
agg
}
RecommendMake <- function(f = 100, threshold = 100,
y1 = 0.007, y2 = 0.007, y3 = 0.001,
l6 = 0.005, l7 = 0.015, l8 = 0.015) {
GLA <- GLA(
gs::Recommend,
y1 = y1,
y2 = y2,
y3 = y3,
l6 = l6,
l7 = l7,
l8 = l8,
f = f,
t = threshold
)
list(GLA = GLA)
}
|
06c118e3ed11cde0c6b4c6bf606fc9789fd9ff56
|
c26ebaab8d53325944046390c71905752ad71a48
|
/R_codes_and_data_sets/BLMM_Peak_Licks.R
|
c1c63dda82867f4c2620135e74de37b4f554aa60
|
[] |
no_license
|
GELopezTolsa/Timing_and_SIB
|
4f131cd7070e5455b78b10b84e0aff413c041046
|
be567365cf3171a6f0e2ed1c40a514cd775061da
|
refs/heads/main
| 2023-05-04T04:19:46.566005
| 2021-05-27T21:18:17
| 2021-05-27T21:18:17
| 356,766,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
BLMM_Peak_Licks.R
|
rm(list=ls())
library(brms)
setwd("~/Dropbox/Doctorado UNED/PhD Thesis/FI+SID/R_FI_SID")
data <- read.csv("data_peak.csv")
m0 = brm(formula = Peak ~ (1|Subject),
data = data, chains = 4, iter = 2000,
cores = getOption("mc.cores", 2L),
control = list(adapt_delta=0.99, max_treedepth = 12),
seed = 1702, save_all_pars = T)
############# BEST MODEL #################3
m1 = brm(formula = Peak ~ FI + (1|Subject),
data = data, chains = 4, iter = 2000,
cores = getOption("mc.cores", 2L),
control = list(adapt_delta=0.99, max_treedepth = 12),
seed = 1702, save_all_pars = T)
m2 = brm(formula = Peak ~ FI + (1|Subject) + (1|Session),
data = data, chains = 4, iter = 2000,
cores = getOption("mc.cores", 2L),
control = list(adapt_delta=0.99, max_treedepth = 12),
seed = 1702, save_all_pars = T)
post_samples_m1 = posterior_samples(m1)
head(post_samples_m1 %>% round(1))
post_samples_m1
plot(m1)
ppm1<-pp_check(m1,nsamples = 1000)
ppm1
ppm2<-pp_check(m2,nsamples = 1000)
ppm2
mean(post_samples_m1$b_FIB>0)
mean(post_samples_m1$b_FIC>0)
bayes_factor(m1,m0)
|
5cb206f5ae777c36ca03dab663827525093e68e3
|
7ae196f6cf7a7f859eaa3cb75ac4662a323382db
|
/aquarius DA/lab4_boosting1.R
|
2e7a6534f86bc7e255aa7d5c91573eb3343ef5e3
|
[] |
no_license
|
priteshmaheshwari/DataAnalytics2021_Pritesh_Maheshwari
|
dda90b2cda7b5a14609aa449ff493408a3e0d91d
|
7e3d9587cc3bbd8acfe6dbc17ac467ba44b78c58
|
refs/heads/master
| 2023-03-28T20:17:03.759601
| 2021-03-29T02:06:35
| 2021-03-29T02:06:35
| 335,808,943
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 932
|
r
|
lab4_boosting1.R
|
require(ggplot2) # or load package first
data(diamonds)
head(diamonds) # look at the data!
#
ggplot(diamonds, aes(clarity, fill=cut)) + geom_bar()
ggplot(diamonds, aes(clarity)) + geom_bar() + facet_wrap(~ cut)
ggplot(diamonds) + geom_histogram(aes(x=price)) + geom_vline(xintercept=12000)
ggplot(diamonds, aes(clarity)) + geom_freqpoly(aes(group = cut, colour = cut))
diamonds$Expensive <- ifelse(diamonds$price >= 12000,1,0)
head(diamonds)
diamonds$price<-NULL
require(glmnet) # or load package first
x<-model.matrix(~., diamonds[,-ncol(diamonds)])
y<-as.matrix(diamonds$Expensive)
mglmnet<-glmnet(x=x,y=y,family="binomial")
plot(mglmnet)
set.seed(51559)
sample(1:10)
require(rpart)
mTree<-rpart(Expensive~.,data=diamonds)
plot(mTree)
text(mTree)
require(boot)
mean(diamonds$carat)
ds(diamonds$carat)
boot.mean<-function(x,i)
{
mean(x[i])
}
boot(data=diamonds$carat, statistic=boot.mean,R=120)
|
1d7745855d01b8f299a965e789e00e077dab8a26
|
c1109441c484b2526232ece4c63aea9c9e7fa7f4
|
/MaFMethodology/R/prune/HV/15/kruskaloutput.R
|
1bdda3d50be96711b3df1e98cf9859c5fbc01664
|
[] |
no_license
|
fritsche/hhcopreliminaryresults
|
b8779e22ae7464afad6ee9d44654a0e4c829df92
|
f63bca8cc4d55b9ce1eeb6f36fae25b771dc1790
|
refs/heads/master
| 2022-03-27T02:13:44.500907
| 2019-12-09T12:04:10
| 2019-12-09T12:04:10
| 224,172,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
kruskaloutput.R
|
Kruskal-Wallis rank sum test
data: ARRAY and categs
Kruskal-Wallis chi-squared = NaN, df = 5, p-value = NA
HHCOR2LPNORM HHCOR2MINMAX HHCOR2SDE HHCORandomLPNORM HHCORandomMINMAX
HHCOR2MINMAX 1 - - - -
HHCOR2SDE 1 1 - - -
HHCORandomLPNORM 1 1 1 - -
HHCORandomMINMAX 1 1 1 1 -
HHCORandomSDE 1 1 1 1 1
|
a93757a447448478ac0594e66f2f6f9bac5d983c
|
9a4d33b3aa79c9684867c8e038659573932cb429
|
/man/predict_knn.Rd
|
18671fe0e8985421f7e3b5fedcd6565f3120ff48
|
[
"MIT"
] |
permissive
|
joshloyal/STAT542
|
795fd29cdfff4628b28dc4b72739a86bbfc647bf
|
4659eb828047f6fbaca386e687cd67ed0c988b16
|
refs/heads/master
| 2021-07-09T05:16:33.612467
| 2017-10-07T00:45:44
| 2017-10-07T00:45:44
| 102,968,018
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 874
|
rd
|
predict_knn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neighbors.R
\name{predict_knn}
\alias{predict_knn}
\title{K-Nearest Neighbor Predictions}
\usage{
predict_knn(X, Y, x_query, k, dist_func = mydist, ...)
}
\arguments{
\item{X}{dataset of shape [n_samples, n_features]}
\item{Y}{target vector of shape [n_samples]}
\item{x_query}{vector of shape [n_features] to make predictions on.}
\item{k}{number of nearest neighbors to use for predictions.}
\item{dist_func}{function used to calculate the distance between two points.}
\item{...}{These arguments are passed into \code{dist_func}.}
}
\value{
A single prediction for \code{x_query}.
}
\description{
Make predictions on a data point \code{x_query}
based on the k-nearest neighbors in the dataset
\code{X}. Predictions are made by averaging the
\code{Y} values of the k-neighest neighbors.
}
|
3bf558817c38cc83376ba38d11d2ee7083d6f163
|
927e74b280556daae5b90929a1586aec858dbdb7
|
/R/zzz.r
|
4b8b566b0d8061a6d0a2a0591fcde19595e21667
|
[] |
no_license
|
alfcrisci/rWeatherITA
|
37ff6ebcb2842e680d3419abdae3f270a771980d
|
8f4c39438894e2d6f94402c799392c0ce56c34c0
|
refs/heads/master
| 2020-12-24T06:42:21.603561
| 2016-11-28T15:39:39
| 2016-11-28T15:39:39
| 52,808,245
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 279
|
r
|
zzz.r
|
.onAttach <- function(libname, pkgname) {
packageStartupMessage("Welcome to rWeatherITA package.")
}
.onLoad <- function(libname, pkgname) {
if(!require(GSODTools)) {devtools::install_github("environmentalinformatics-marburg/GSODTools","develop")}
library(GSODTools)
}
|
6060dff7d0d33d4b391cfa1a845d7dcad3d612b4
|
79afffae6d108b1a93aea7c72a55cf1fc7247498
|
/man/nnr.rd
|
5dce58c59680dc281fa1b40f0d04b1cf747c45b8
|
[] |
no_license
|
cran/assist
|
efbbad8da52741412f5dc933457774672de90b12
|
866a22f739a0e84d8631044225e3676651c987f2
|
refs/heads/master
| 2023-09-01T13:13:28.031385
| 2023-08-22T07:00:02
| 2023-08-22T07:30:44
| 17,718,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,248
|
rd
|
nnr.rd
|
\name{nnr}
\alias{nnr}
\title{
Nonlinear Non-parametric Regression
}
\description{
Fit a nonlinear nonparametric regression models with spline smoothing based on extended Gauss-Newton/Newton-Raphson and backfitting.
}
\usage{
nnr(formula, func, spar="v", data=list(),
start=list(),verbose=FALSE, control=list())
}
\arguments{
\item{formula}{
a model formula, with the response on the left of a \eqn{\mbox{\textasciitilde}}{~} operator and on the right an expression representing
the mean function with a nonparametric function appearing with a symbol, e.g. f.
}
\item{func}{
a required formula specifying the spline components necessary to estimate the non-parametric function.
On the left of a \eqn{\mbox{\textasciitilde}}{~} operator is the unknow function symbol as well as its arguments, while the right side
is a list of two components, an optional \code{nb} and a required \code{rk}. \code{nb} and \code{rk} are
similar to \code{formula} and \code{rk} in \code{ssr}. A missing \code{nb} denotes an empty null space.
}
\item{spar}{
a character string specifying a method for choosing the smoothing parameter. "v", "m" and "u" represent GCV, GML and
UBR respectively. Default is "v" for GCV.
}
\item{data}{
an optional data frame.
}
\item{start}{
a list of vectors or expressions which input inital values for the unknown functions. If expressions,
the argument(s) inside should be the same as in \code{func}. The length of \code{start} should be the same as
the number of unknown functions. If named, the names of the list should match those in "func". If not named, the order
of the list is taken as that appearing in "func".
}
\item{verbose}{
an optional logical numerical value. If \code{TRUE}, information on
the evolution of the iterative algorithm is printed. Default is \code{FALSE}.
}
\item{control}{
an optional list of control values to be used. See nnr.control for details.
}}
\value{
an object of class \code{nnr} is returned, containing fitted values, fitted function values as well as
other information used to assess the estimate.
}
\details{
A nonlinear nonparametric model is fitted using the algorithms developed in Ke and Wang (2002).
}
\references{
Ke, C. and Wang, Y. (2002). Nonlinear Nonparametric Regression Models. Submitted.
}
\author{Chunlei Ke \email{chunlei_ke@yahoo.com} and Yuedong Wang \email{yuedong@pstat.ucsb.edu}.}
\seealso{
\code{\link{nnr.control}}, \code{\link{ssr}}, \code{\link{print.nnr}}, \code{\link{summary.nnr}}, \code{\link{intervals.nnr}}
}
\examples{
\dontrun{
x<- 1:100/100
y<- exp(sin(2*pi*x))+0.3*rnorm(x)
fit<- nnr(y~exp(f(x)), func=list(f(u)~list(~u, cubic(u))), start=list(0))
## fit a generalized varying coefficient models
data(Arosa)
Arosa$csmonth <- (Arosa$month-0.5)/12
Arosa$csyear <- (Arosa$year-1)/45
ozone.vc.fit <- nnr(thick~f1(csyear)+exp(f2(csyear))*f3(csmonth),
func=list(f1(x)~list(~I(x-.5),cubic(x)), f2(x)~list(~I(x-.5)-1,cubic(x)),
f3(x)~list(~sin(2*pi*x)+cos(2*pi*x)-1,lspline(x,type="sine0"))),
data=Arosa[Arosa$year\%\%2==1,], spar="m", start=list(f1=mean(thick),f2=0,f3=sin(csmonth)),
control=list(backfit=1))
}
}
\keyword{file}
|
d09877291585415a1b92919b1c4deaed8f4743b9
|
cc4232590de5ed93b4606581bb96b38e207e88ea
|
/man/exprSet2cellcycle.Rd
|
041f0528b94b3ad4edd332b0d980ad9353943e87
|
[] |
no_license
|
faker1c/BPscRNAseq
|
47d3a690f1427810738f2049008e7849ed12adc6
|
d1948ba690583be0890cc6ce42ba5f12bd1ffe42
|
refs/heads/master
| 2020-05-04T20:29:07.402323
| 2018-08-22T03:53:52
| 2018-08-22T03:53:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 832
|
rd
|
exprSet2cellcycle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exprSet2cellcycle.R
\name{exprSet2cellcycle}
\alias{exprSet2cellcycle}
\title{Assign cell cycle status based on expression matrix}
\usage{
exprSet2cellcycle(exprSet, geneType = "ensembl", species = "human")
}
\arguments{
\item{exprSet}{The expression matrix(which shoud be normalized,like log2(cpm+1) or log2(tpm+1))}
\item{geneType}{Choose ensembl or symbol,defaults: ensembl}
\item{species}{Choose human or mouse,defaults: human}
}
\value{
assigned A list of (phases,scores,normalized.scores) return from cyclone(scran)
}
\description{
The rownames of expression matrix, should be ensembl IDs or gene symbols
One should set the species as human or mouse.
}
\examples{
exprSet2cellcycle
exprSet2cellcycle(exprSet,geneType='ensembl',species='human')
}
|
ec8108bab1820752d1cc53a0523851eb3159e0c9
|
9571a2c8c77a96dcc90fc54112c97b0926f0e3cf
|
/workshop/R_course/linear.R
|
ec88f4a3816d8707a619a7478683e76cbcff8136
|
[] |
no_license
|
wnarifin/wnarifin.github.io
|
58ce0b6e777cbccd2f7d806c9dabbb5fb8f1ad22
|
141a021293d9eed7fdedde5857dac00ba4dfc656
|
refs/heads/master
| 2023-09-04T12:05:53.696075
| 2023-08-28T06:17:32
| 2023-08-28T06:17:32
| 96,778,033
| 3
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,421
|
r
|
linear.R
|
#--------------------------------------------------------
# A Short Course on Data Analysis Using R Software (2017)
#--------------------------------------------------------
# Wan Nor Arifin
#--------------------------------------------------------
# Linear regression
#--------------------------------------------------------
# Preliminaries
## Load libraries
library(car)
library(psych)
## Load data set
salary = Salaries # data from `car`, Salaries for Professors...
?Salaries
str(salary)
names(salary)
# View the levels of categorical variables
sapply(salary[c("rank", "discipline", "sex")], levels)
# Linear Regression
## Data exploration
### Descriptive statistics
describe(salary[c(3,4,6)]) # var 3, 4, 6 are numbers
summary(salary[c(1,2,5)]) # var 1, 2, 5 are factors
lapply(salary[c(1,2,5)], function(x) summary(x)/length(x)*100) # in percent
# Salary by groups
describeBy(salary$salary, salary$rank)
describeBy(salary$salary, salary$discipline)
describeBy(salary$salary, salary$sex)
#lapply(salary[c(1,2,5)], function(x) describeBy(salary$salary, x)) # one line code
### Plots
multi.hist(salary[c(3,4,6)])
plot(salary)
## Univariable
str(salary)
names(salary)
# - Years since PhD,
linear.u.phd = glm(salary ~ yrs.since.phd, data = salary)
summary(linear.u.phd)
# - Years in service,
linear.u.ser = glm(salary ~ yrs.service, data = salary)
summary(linear.u.ser)
# - Rank,
linear.u.ran = glm(salary ~ rank, data = salary)
summary(linear.u.ran)
# - Discipline,
linear.u.dis = glm(salary ~ discipline, data = salary)
summary(linear.u.dis)
# - Sex,
linear.u.sex = glm(salary ~ sex, data = salary)
summary(linear.u.sex)
# - LR test
linear.u0 = glm(salary ~ 1, data = salary)
summary(linear.u0)
cat(names(salary), sep = " + ")
add1(linear.u0, scope = ~ rank + discipline + yrs.since.phd + yrs.service + sex, test = "LRT")
# - p on add that var = univar
## Multivariable
# - All
linear.m.all = glm(salary ~ rank + discipline + yrs.since.phd + yrs.service + sex, data = salary)
summary(linear.m.all)
drop1(linear.m.all, test = "LRT") # p on rmv that var
# - Stepwise
linear.m.step = step(linear.m.all, direction = "both")
summary(linear.m.step)
linear.m.step$anova
# - Chosen model
linear.m1 = glm(salary ~ rank + discipline + yrs.since.phd + yrs.service, data = salary)
summary(linear.m1)
# - LR test
drop1(linear.m1, test = "LRT") # p on rmv that var
## MC
cbind(summary(linear.m1)$coefficients[,1:2]) # SE
vif(linear.m1) # VIF
## Interaction
add1(linear.m1, scope = ~ . + rank*discipline*yrs.since.phd*yrs.service, test = "LRT")
# - two interactions: discipline:yrs.service; yrs.since.phd:yrs.service
## Revised models
linear.m2 = glm(salary ~ rank + discipline + yrs.since.phd + yrs.service +
yrs.since.phd:yrs.service + discipline:yrs.service, data = salary)
summary(linear.m2) # interractions included
vif(linear.m2) # very large VIF
# - remove yrs.since.phd, yrs.service
linear.m1.1 = glm(salary ~ rank + discipline, data = salary)
summary(linear.m1.1)
# effect of adding them
add1(linear.m1.1, scope = ~ . + yrs.since.phd + yrs.service, test = "LRT")
# - add yrs.since.phd
linear.m1.2 = glm(salary ~ rank + discipline + yrs.since.phd, data = salary)
summary(linear.m1.2)
# - add yrs.service
linear.m1.3 = glm(salary ~ rank + discipline + yrs.service, data = salary)
summary(linear.m1.3)
summary(linear.m1) # too much discrepancy between model w & w/out yrs.since.phd, yrs.service
# - chosen one
linear.m3 = linear.m1.1 # salary ~ rank + discipline
summary(linear.m3)
## Residuals & Influentials
plot(linear.m3) # all defaults 1:4
# - Normality
hist(resid(linear.m3), main = "Residuals", xlab = "Residuals", ylab = "Frequency")
plot(linear.m3, which = 2)
# - Linearity
plot(linear.m3, which = 1) # residuals vs predicted
plot(linear.m3, which = 3)
plot(linear.m3$residuals ~ salary$rank, ylab = "Residuals", xlab = "Rank") # prof. variance is big
plot(linear.m3$residuals ~ salary$discipline, ylab = "Residuals", xlab = "Discipline")
# - Influentials
plot(linear.m3, which = 4) # all D < 1
plot(linear.m3, which = 5) # leverage < 0.5
plot(linear.m3, which = 6)
par( mfrow = c(2, 3) ); plot(linear.m3, which = 1:6)
par( mfrow = c(1, 1) ) # reset
# - May need to handle these influential cases, but beyond the context of this workshop
# - Somehow, ended up with only cat var, basically an ANOVA
summary( aov(linear.m3) )
# - But it depends on your obj. of analysis, predict / compare groups
## Final model
# - Accept linear.m3
summary(linear.m3)
library(rsq) # R^2 for GLM
rsq(linear.m3)
# - salary ~ rank + discipline
final = cbind( salary[c("rank", "discipline", "salary")], predicted_salary = predict(linear.m3) )
final_ranked = final[order(final$rank), ]; head(final_ranked); tail(final_ranked)
# - review back levels/var
levels(salary$rank)
levels(salary$discipline)
# - if rank = "Prof", discipline = "B"
predict(linear.m3, list(rank = "Prof", discipline = "B"), se.fit = T)
head( salary[salary$rank == "Prof" & salary$discipline == "B", c("rank", "discipline", "salary")] )
mean( salary[salary$rank == "Prof" & salary$discipline == "B", "salary"] )
# - if rank = "AsstProf", discipline = "B"
predict(linear.m3, list(rank = "AsstProf", discipline = "B"), se.fit = T)
head( salary[salary$rank == "AsstProf" & salary$discipline == "B", c("rank", "discipline", "salary")] )
mean( salary[salary$rank == "AsstProf" & salary$discipline == "B", "salary"] )
|
f9fdfa7092fd394f83f27b3712327a261d9c54c2
|
172c131b7456c76b300d5c7213528b281617db8d
|
/man/print-methods.Rd
|
2bb7e2ade86a0f9971cc2faf9b3aacc46d5ad2cd
|
[] |
no_license
|
cran/momentfit
|
7b23f8d5ed63e32cc4ba368277a106acc4dc8618
|
b7c29f7d8b6f4c3fa0ea5356409b654ede8c8df7
|
refs/heads/master
| 2023-06-08T23:42:57.900725
| 2023-06-05T14:20:02
| 2023-06-05T14:20:02
| 237,930,018
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,057
|
rd
|
print-methods.Rd
|
\name{print-methods}
\docType{methods}
\alias{print-methods}
\alias{print,confint-method}
\alias{print,mconfint-method}
\alias{print,ANY-method}
\alias{print,momentModel-method}
\alias{print,gmmfit-method}
\alias{print,gelfit-method}
\alias{print,sgmmfit-method}
\alias{print,rlinearModel-method}
\alias{print,rslinearModel-method}
\alias{print,rsnonlinearModel-method}
\alias{print,rfunctionModel-method}
\alias{print,rformulaModel-method}
\alias{print,rnonlinearModel-method}
\alias{print,summaryGmm-method}
\alias{print,summaryGel-method}
\alias{print,summarySysGmm-method}
\alias{print,specTest-method}
\alias{print,momentWeights-method}
\alias{print,sysMomentWeights-method}
\alias{print,sysModel-method}
\alias{print,hypothesisTest-method}
\alias{print,sSpec-method}
\title{ Methods for Function \code{print} in Package \pkg{base}}
\description{
Print method for all \code{"momentModel"}, \code{"gmmfit"},
\code{"summaryGmm"} \code{"hypothesisTest"} and \code{"specTest"} objects.
}
\section{Methods}{
\describe{
\item{\code{signature(x = "ANY")}}{
}
\item{\code{signature(x = "momentModel")}}{
}
\item{\code{signature(x = "sSpec")}}{
}
\item{\code{signature(x = "confint")}}{
}
\item{\code{signature(x = "mconfint")}}{
}
\item{\code{signature(x = "sysModel")}}{
}
\item{\code{signature(x = "sysMomentWeights")}}{
}
\item{\code{signature(x = "gmmfit")}}{
}
\item{\code{signature(x = "gelfit")}}{
}
\item{\code{signature(x = "sgmmfit")}}{
}
\item{\code{signature(x = "summaryGmm")}}{
}
\item{\code{signature(x = "summaryGel")}}{
}
\item{\code{signature(x = "summarySysGmm")}}{
}
\item{\code{signature(x = "specTest")}}{
}
\item{\code{signature(x = "rlinearModel")}}{
}
\item{\code{signature(x = "rformulaModel")}}{
}
\item{\code{signature(x = "rslinearModel")}}{
}
\item{\code{signature(x = "rsnonlinearModel")}}{
}
\item{\code{signature(x = "rnonlinearModel")}}{
}
\item{\code{signature(x = "rfunctionModel")}}{
}
\item{\code{signature(x = "hypothesisTest")}}{
}
\item{\code{signature(x = "momentWeights")}}{
}
}}
\keyword{methods}
|
9c916375e56150ab82e31825820b9788c0a4e4aa
|
b80560f563cfd9a6a5e25c98dd1221e1161ffa0a
|
/R/segmentation.R
|
6b948105d4b0a9c9a44ec9d221e8006cf03d418e
|
[] |
no_license
|
tridentasf/segmentation
|
ed129e68751638051c509d4f72e8a1ab0674e74f
|
127118dc15e973ed6ab52a85c4d3838c6d8e416c
|
refs/heads/master
| 2021-01-15T11:46:33.175777
| 2015-05-14T12:52:33
| 2015-05-14T12:52:33
| 35,567,452
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 147
|
r
|
segmentation.R
|
getSegment <- function(answers) {
#load("data/segment_fit.rda")
pred <- predict(segment_fit, newdata=answers, type="class")
return(pred)
}
|
9f102bfe58f4279ce5b9369d1b8c1a3a015f09a2
|
cd812e9a7d34e00a26ce8bdcf81b5eb2d43a4a66
|
/t=0.R
|
c22af6d3173c27b61eca3f7c0956d8a8321dc451
|
[] |
no_license
|
anniekellner/ch1_landing
|
e0644f8b97578dbae499530e20658691b100f99b
|
f71c76e0e40761cc0b6606bacedf560119b2d1da
|
refs/heads/master
| 2022-12-07T22:50:23.557591
| 2022-12-02T22:31:48
| 2022-12-02T22:31:48
| 140,736,188
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,223
|
r
|
t=0.R
|
#########################################################
### DETERMINING TIME OF ORIGIN FOR TTE MODEL ##########
#########################################################
# Calling t= 0 on average date when SIC is ~ 50% over the shelf and falls shortly thereafter.
# 50% is considered the lower threshold for suitable habitat for polar bears (T Atwood and D. Douglas pers. comm.)
# icemean_50 is a mean SIC of 50% over the entire shelf
library(dplyr)
library(lubridate)
library(tidyselect)
rm(list = ls())
# Format data
ice <- read.csv("C:/Users/akell/OneDrive - Colostate/PhD/Polar_Bears/Data/sbs_daily_icestats_1979_2017_v6.csv")
class(ice$date) # Date imported as factor 1/1/1979
ice$date <- mdy(ice$date, tz = NULL)
ice$month <- month(ice$date)
ice$year <- year(ice$date)
ice <- ice %>%
mutate(ordinal = yday(date)) %>%
select(date, ordinal, year, month, starts_with("sbs_shelf") & ends_with("_50")) %>%
filter(month < 9)
icepct <- ice %>%
group_by(year(date)) %>%
filter(sbs_shelf_icepct_50 > 50) %>%
slice(n())
icemean <- ice %>%
group_by(year(date)) %>%
filter(sbs_shelf_icemean_50 > 85) %>%
slice(n())
# June 1 is when 85% of the shelf is covered by 50% mean ice concentration
|
5737ff02458b434d2aa074faec0230e9a7b7257f
|
0e26112fdff7dd8beacd4a51e38014833a5ebdf7
|
/src/41_fullPT_summaries.R
|
2b113ffd3dc57d4fc249e62e413e95a917ef5c48
|
[] |
no_license
|
jkbest2/tunabayes
|
278fd782d92c60432c01fcf125e48e6ad489a3b6
|
4911af91a7b8d614392b3039dd86026c341a2982
|
refs/heads/master
| 2020-07-15T02:57:17.951095
| 2019-11-14T04:04:12
| 2019-11-14T04:06:57
| 205,463,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 308
|
r
|
41_fullPT_summaries.R
|
source("src/40_postprocess.R")
fullPT_fits <- readRDS("results/fullPT_fits.Rds")
fullPT_summaries <- summarize_posteriors(fullPT_fits)
saveRDS(fullPT_summaries, "results/fullPT_summaries.Rds")
fullPT_diagnostics <- diagnose_fits(fullPT_fits)
saveRDS(fullPT_diagnostics, "results/fullPT_diagnostics.Rds")
|
c08a66fa0367d4f48dc375e00742f765e0424efe
|
ad21d3e027aa8d6b23f4ffa91c811bf686d77a9d
|
/done-r/seouldongMap.r
|
137e999fa4b94165cde103b6041f2937ddbf987c
|
[] |
no_license
|
i-hs/R-project1
|
834462e81815ebeb70830bcb7bf0cac5760dfa90
|
dde28b1e722acf4ce2b5d5587e7141dc9367b9be
|
refs/heads/master
| 2022-11-18T14:10:41.379385
| 2019-12-12T01:54:28
| 2019-12-12T01:54:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,086
|
r
|
seouldongMap.r
|
#서울 지도 시각화
rm(list=ls())
install.packages('ggmap')
install.packages('ggplot2')
install.packages('raster')
install.packages('rgeos')
install.packages('maptools')
install.packages('rgdal')
library(ggmap)
library(ggplot2)
library(raster)
library(rgeos)
library(maptools)
library(rgdal)
setwd ('C:/dev/R-project1') # 프로젝트 폴더 변경
# 시각화할 자료 여기에 :
# P<- read.csv('data/sample.csv,header = TRUE') 현 자료에 SEOUL_ID 과 일치하는 id칼럼을 미리 삽입해야 함.
#map <- shapefile('C:/dev/R-project1/maps/TL_SCCO_SIG.shp')# 지리 정보 자료/201703
map <- shapefile('dongmaps/TL_SCCO_EMD.shp')# 지리 정보 자료/201703 / 읍면동
# https://givitallugot.tistory.com/2
map@polygons[[1]]@Polygons[[1]]@coords %>% head(n=10L) # 10행까지 좌표 확인
map <- spTransform(map, CRSobj = CRS('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs'))
# 여기서 에러
map <- gBuffer(map, byid=TRUE, width=0)
new_map <- fortify(map,region = 'EMD_CD') # fortify 함수로 map을 data-frame으로 변환.
# EMD_CD 칼럼이 id로 변환됨
View(new_map)
seoul_map_ggplot <- ggplot() + geom_polygon(data = new_map,
aes(x = long,
y = lat, group=group),
fill = 'white', color = 'black' )
#11740 ÷ 1337748 =,0.008775942853213,
# id <=11740 인 지역이 서울시 정보.
new_map$id <- as.numeric(new_map$id) # 문자로 입력된 id를 숫자로 변환한 후, 26100000 이하(서울)만 추출한다.
seoul_map <- new_map[new_map$id <= 26100000,]
View(seoul_map)
# P와 seoul_map의 통합. 시각화 자료를 위한 것 :
# P_merge <- merge(seoul_map, P, by='id')
seoul_map_ggplot <- ggplot() + geom_polygon(data = seoul_map,
aes(x = long,
y = lat, group=group),
fill = 'white', color = 'black' ) # 데이터 통합 후 data = P_merge로 변환
seoul_map_ggplot
|
f66f2dc62f9959d065ccd198ebe94426f91e9f37
|
4c92bcc96d9f86da59f05367cf3293c688e0732e
|
/Baby Names Analysis/Baby Names Script.R
|
e3abbd6b13f5ac69ac2bc5f5f873d0f4ff736658
|
[] |
no_license
|
karlgourgue/R-Projects
|
0406693c9d05aac7a4a0cd0ebdae88176afdc03d
|
a3d7bb416662e537992f2e36b18d350c8557568a
|
refs/heads/master
| 2020-05-27T21:21:04.967779
| 2017-03-02T19:43:50
| 2017-03-02T19:43:50
| 83,666,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
Baby Names Script.R
|
# Part 1, Loading Files Into R
a = seq(1880,2014)
babynames = NULL
#Files are in a folder with csv files, each labeled "yob1880.txt" -> "yob2014.txt"
for (i in a) { #a loop that loads each file into a df "babynams"
temp = read.csv(paste("yob", toString(i), ".txt", sep=''), header=FALSE)
b = c(rep(i, nrow(temp)))
d = cbind(temp,b)
babynames = rbind(d,babynames)
}
names(babynames) = c("Name", "Gender", "Count", "Year")
#Part 2, The Popularity of "Karl" (my name) Over TIME
myname = babynames[babynames$Name == "Karl" & babynames$Gender == "M",]
plot(myname$Year,myname$Count,xlab="Year",ylab="Count",main="Popularity of Karl over time")
#Part 3, Plotting Number of Unique Girl Names Over Time
onlygirls = babynames[babynames$Gender == "F",]
onlygirlsf = data.frame(table(onlygirls$Year))
names(onlygirlsf) = c("Year","Count")
plot(onlygirlsf$Year,onlygirlsf$Count,xlab="Year",ylab="Number of Unique Names",main="Growth in Unique Names")
#Part 4, Finding Most Popular Name / Gender + Year
toptennames <- function (year,gender) {
ttn = babynames[babynames$Year == year & babynames$Gender == gender,]
ttn[order("Count",decreasing = TRUE)]
head(ttn, n=10)
} #function takes year,gender as input and outputs top 10 names
#Part 5, Fingin Most Gender Neutral Names
#Accomplished by ranking frequency of male + female names, then putting those together to see
#which names rank highest across both
male = babynames[babynames$Year == 2014 & babynames$Gender == "M",]
male = cbind(male, rank(-male$Count))
male = subset(male, select=c("Name", "rank(-male$Count)"))
female = babynames[babynames$Year == 2014 & babynames$Gender == "F",]
female = cbind(female, rank(-female$Count))
female = subset(female, select=c("Name", "rank(-female$Count)"))
neutral = merge(male,female)
names(neutral)[names(neutral)=="rank(-male$Count)"] <- "mrank"
names(neutral)[names(neutral)=="rank(-female$Count)"] <- "frank"
maxrank = pmax(neutral$mrank,neutral$frank)
neutral = cbind(neutral,maxrank)
head(neutral[order(maxrank),])
|
3d630c08ed6c533efedcf6362872785e4a5113e2
|
3c38d8cbe00ffb6d1150682ea1f3c79acfc33d96
|
/timings/7a8a49d0b763c0abed4eae1c1e08bbeb3c2c928d/grattan/tests/testthat/test_lf_inflator.R
|
0393170f9162c649d30e09ded3eee60fe7c489a8
|
[] |
no_license
|
HughParsonage/grattan
|
c0dddf3253fc91511d122870a65e65cc918db910
|
cc3e37e1377ace729f73eb1c93df307a58c9f162
|
refs/heads/master
| 2023-08-28T00:12:35.729050
| 2023-08-25T08:02:25
| 2023-08-25T08:02:25
| 30,398,321
| 26
| 11
| null | 2022-06-26T15:44:27
| 2015-02-06T06:18:13
|
R
|
UTF-8
|
R
| false
| false
| 2,666
|
r
|
test_lf_inflator.R
|
context("lf inflator")
test_that("Error handling", {
expect_error(lf_inflator_fy(from_fy = "2012-13", to_fy = "2099-00", allow.projection = FALSE), regexp = "to_fy are in labour force data")
})
test_that("upper and lower series produce higher and lower forecasts", {
expect_gte(lf_inflator_fy(labour_force = 1,
from_fy = "2012-13",
to_fy = "2018-19",
forecast.series = "upper"),
lf_inflator_fy(labour_force = 1,
from_fy = "2012-13",
to_fy = "2018-19",
forecast.series = "mean"))
expect_lte(lf_inflator_fy(labour_force = 1,
from_fy = "2012-13",
to_fy = "2018-19",
forecast.series = "lower"),
lf_inflator_fy(labour_force = 1,
from_fy = "2012-13",
to_fy = "2018-19",
forecast.series = "mean"))
})
test_that("lf_inflator returns known results", {
expect_equal(lf_inflator(from_date = "1981-01-01", to_date = "1981-02-01"), 1.00124729250057, tol = 0.001)
})
test_that("lf_inflator returns long", {
expect_equal(round(lf_inflator_fy(labour_force = c(1, 2), from_fy = "2010-11", to_fy = "2012-13"), 3),
round(c(1.02691290641353, 2.05382581282705), 3))
expect_equal(lf_inflator(from_date = c("1981-01-01", "1981-02-01"), to_date = c("1981-02-01", "1981-01-01")),
c(1.00124729250057, 0.998754261299966), tol = 0.001)
})
test_that("lf_inflator_fy accepts multiple dates", {
length_of <- length(lf_inflator_fy(labour_force = c(1, 2), from_fy = c("2010-11", "2012-13"), to_fy = c("2012-13", "2013-14")))
expect_equal(length_of, 2)
})
test_that("ABS connection", {
expect_equal(lf_inflator(from_date = c("1981-01-01", "1981-02-01"), to_date = c("1981-02-01", "1981-01-01"), useABSConnection = TRUE),
c(1.00124729250057, 0.998754261299966), tol = 0.001)
expect_equal(lf_inflator(from_date = "1981-01-01", to_date = "1981-02-01"),
lf_inflator(from_date = "1981-01-01", to_date = "1981-02-01", useABSConnection = TRUE),
tol = 0.001)
})
test_that("Custom lf series", {
x <- lf_inflator_fy(1, from_fy = "2015-16", to_fy = "2017-18",
forecast.series = "custom", lf.series = data.table(fy_year = c("2015-16", "2016-17", "2017-18"),
r = c(0, 0, 0.10)))
expect_equal(x, 1.1)
})
|
cbdd5c1a5b9d0c22cc2e75a1ba7145cc0bab4814
|
7714f332a98addb262171d2cf9aeda7f049d455d
|
/scripts/merge_pig_contig_bin_species_clusters.R
|
2e5c7e61e614b049cb8e7e5141012749fdc9658a
|
[] |
no_license
|
GaioTransposon/metapigs_wars
|
a6a56f77a476e0915fa82fc7b6b4b20aa8a53a75
|
115dd80ad8efff023bd0a93740c711320570eebe
|
refs/heads/main
| 2023-05-11T05:39:25.662662
| 2023-04-28T13:18:38
| 2023-04-28T13:18:38
| 316,386,023
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,338
|
r
|
merge_pig_contig_bin_species_clusters.R
|
# /shared/homes/12705859/contig_abundances/no_reps_contigs_PigBinContig.csv 1762877983
# pig,bin,contigName
# 29667,bins.13.fa,k141_100
# 29778,bins.130.fa,k141_100
# head no_reps_contigs_PigBinContig.csv -n 1000 > test1
#
# /shared/homes/12705859/cdhit_work/cd_hit_onMacrel/contigs_all_clusters 5964739
# pig,contig,ORF_number,cluster_100,cluster_90,cluster_95,cluster_98
# 14171,k141_189950,1,Cluster 0,Cluster 0,Cluster 0,Cluster 0
# 14200,k141_40977,1,Cluster 0,Cluster 0,Cluster 0,Cluster 0
# cp contigs_all_clusters test2
#
#
# /shared/homes/s1/pig_microbiome/kraken_on_concat_bins/all_concatenated_220_essential.kraken 3250400
# C 14159_bins.100.fa CAG-914 sp000437895 (taxid 15578) 1230079
# C 14159_bins.101.fa PeH17 sp001940845 (taxid 25959) 1543424
# cp all_concatenated_220_essential.kraken test3
# runs from the HPC
# language: R
#This script requires the following packages:
install.packages("base", repos = "http://cran.us.r-project.org")
install.packages("data.table", repos = "http://cran.us.r-project.org", dependencies = TRUE)
install.packages("dplyr", repos = "http://cran.us.r-project.org")
install.packages("stringr", repos = "http://cran.us.r-project.org")
install.packages("utils", repos = "http://cran.us.r-project.org")
install.packages("splitstackshape", repos = "http://cran.us.r-project.org")
install.packages("readr", repos = "http://cran.us.r-project.org")
#upload all libraries
library(base)
library(readr)
library(data.table)
library(dplyr)
library(stringr)
library(utils)
library(splitstackshape)
macrel.dir="/shared/homes/12705859/cdhit_work/cd_hit_onMacrel"
fileA <- read_csv("/shared/homes/12705859/contig_abundances/no_reps_contigs_PigBinContig.csv", col_types = cols(pig = col_character()))
fileB <- read.csv(file.path(macrel.dir,"contigs_all_clusters"), sep = ",",
row.names = NULL, header = TRUE, stringsAsFactors = FALSE)
fileC <- read.csv("/shared/homes/s1/pig_microbiome/kraken_on_concat_bins/all_concatenated_220_essential.kraken", sep = "\t",
row.names = NULL, header = FALSE, stringsAsFactors = FALSE)
## run on local:
# fileA <- read_csv("Desktop/bins_clustering_parsing_DFs/parsing_metapigs_wars/test1", col_types = cols(pig = col_character()))
#
# fileB <- read.csv("Desktop/bins_clustering_parsing_DFs/parsing_metapigs_wars/test2", sep = ",",
# row.names = NULL, header = TRUE, stringsAsFactors = FALSE)
#
# fileC <- read.csv("Desktop/bins_clustering_parsing_DFs/parsing_metapigs_wars/test3", sep = "\t",
# row.names = NULL, header = FALSE, stringsAsFactors = FALSE)
#
colnames(fileA)[colnames(fileA)=="contigName"] <- "contig"
fileC <- cSplit(fileC, "V2", "_")
fileC <- cSplit(fileC, "V3", "(")
fileC <- fileC %>%
dplyr::select(V2_1, V2_2, V3_1)
colnames(fileC) <- c("pig","bin","species")
head(fileC)
head(fileA)
head(fileB)
head(fileC)
NROW(fileA)
NROW(fileB)
fileAB <- right_join(fileA,fileB)
NROW(fileAB)
NROW(fileC)
fileABC <- right_join(fileC,fileAB)
NROW(fileABC)
head(fileABC)
fileABC <- fileABC %>%
dplyr::select(species, everything()) %>%
dplyr::select(bin, everything()) %>%
dplyr::select(contig, everything()) %>%
dplyr::select(pig, everything())
# save
fwrite(x = fileABC,
file = file.path(macrel.dir,"merge_pig_contig_bin_species_clusters_out"))
|
77b3c7e8c5632d3bb4124f4483241ac416fc45d5
|
688152e35866f78a078a078fb9a679c16979535f
|
/soogeunz/data_gen_sparse_ortho_eigenvectors.R
|
57fb2b8904e81730e4cd3a25da4999d324224a3a
|
[] |
no_license
|
davidevdt/sparsePCAproject
|
b134aea9c2a8a421e98b6f7573a45da5d3178fa1
|
245debf534e82ffe9289a87299520358ba3c16e1
|
refs/heads/master
| 2020-03-21T00:29:26.245232
| 2019-03-25T13:05:01
| 2019-03-25T13:05:01
| 137,896,218
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,571
|
r
|
data_gen_sparse_ortho_eigenvectors.R
|
# data generation through sparse and orthogonal eigenvectors = right singular vectors #
# this is a trick that allows specification of sparse and column-orthogonal matrix #
# (original) PCA always has column-orthogonal W and P which are equal to each other.
# with this trick we can make sparse W and P which fulfill that property.
# this procedure relies on SVD #
# recall that PCA is the same as SVD.
# SVD: X = UDV'
# PCA: X = XWP' = TP'
# from the above equations, we know that:
# V = W = P
# XW = T = UD
# we make use of the X = UDV' decomposition to specify our X
# we need to define U, D, V that meet the properties of SVD:
# U'U = I (left singular vectors)
# V'V = I (right singular vectors)
# D diagonal matrix with singular values
# importantly, we need to find V which is also sparse on top of column-orthogonality!
n <- 100
p <- 30
R <- 3 # number of components
# ** step 1. specify the sparse and orthogonal matrix V **
# (i named it V. it can either be W or P depending on your interpretation.)
# V = W = P
V <- matrix(0,p,R)
V[1:10,1] <- 1
V[11:20,2] <- 1
V[11:30,3] <- 1
V
# some common-dinstinctive processes in here
# not column-orthogonal
t(V) %*% V
# extract a block of the V matrix
# look at the rows that have elements in multiple columns
# in our V matrix, this is the rows 11:20
block <- V[11:20,2:3]
# orthogonalize this block
block_ortho <- qr.Q(qr(block))
# put this block back into the V matrix
V[11:20,2:3] <- block_ortho
t(V) %*% V
# you can observe that the off-diagonals are all zeros now
# this is a function that normalizes each column vector to unit vector
normalize <- function(MATRIX) {apply(MATRIX,2,function(x){x/norm(as.matrix(x),"F")})}
V <- normalize(V)
# V'V = I
t(V) %*% V
# ** step 2. specify the U and D **
# UD = T = XW
# randomly generate U from multivariate normal
# (columns of U are not correlated - remember that PCA components are uncorrelated)
set.seed(11)
U <- MASS::mvrnorm(n = 100, mu = rep(0,R), Sigma = diag(R), empirical=TRUE)
# orthogonalizing the U
# by doing this, we achieve U'U = I which is a property of left singular vectors
# (also means that our components are uncorrelated)
U <- qr.Q(qr(U))
t(U) %*% U
# now we specify the D matrix
# within SVD: this is the diagonal matrix with singular values
# this defines the amount of variance each corresponding principal component has
D <- diag(c(50, 30, 20))
# so now we have them all:
# V: V'V = I and sparse
# U: U'U = I
# D
X <- U %*% D %*% t(V)
svd1 <- svd(X)
round(svd1$v[,1:3] ,6)
# yes
round(prcomp(X)$rotation[,1:3], 6)
|
e2443de7db6fd35a9fe7bdf3dbd1f49643d56fed
|
770629c2b2a09f0c2fda4e80bd1f66a2af8a16bb
|
/R/event.convert.r
|
5f748db55ae16f97a996f9b612310f8b27053a99
|
[] |
no_license
|
cran/event.chart
|
ca731bbe21920da8d1d8e4559c1f188747845782
|
2ee6d0a7fb57ac7d2ac93bc11fa92f2ce92fe1f7
|
refs/heads/master
| 2021-01-20T15:42:38.622206
| 2001-09-22T00:00:00
| 2001-09-22T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,362
|
r
|
event.convert.r
|
# event.convert.r
# convert 2-column coded events to multiple event time for event.chart()
# input: a matrix or dataframe with at least 2 columns
# by default, the first column contains the event time and
# the second column contains the k event codes (e.g. 1=dead, 0=censord)
# ouput: a matrix of k columns, each column contains the time of kth coded event
#
event.convert <- function(data2, event.time = 1, event.code = 2)
{
dim.d <- dim(data2)
len.t <- length(event.time)
if(len.t != length(event.code))
stop("length of event.time and event.code must be the same")
if(any(event.time > dim.d[2]))
stop(paste("Column(s) in event.time cannot be greater than ",
dim.d[2]))
if(any(event.code > dim.d[2]))
stop(paste("Column(s) in event.code cannot be greater than ",
dim.d[2]))
name.data <- names(data2)[event.time]
if(is.null(name.data)) {
name.data <- paste("V", event.time, sep = "")
}
n.level <- rep(NA, len.t)
for(i in (1:len.t)) {
n.level[i] <- length(table(data2[, event.code[i]]))
}
tot.col <- sum(n.level)
data.out <- matrix(NA, dim.d[1], tot.col)
name.col <- rep(NA, tot.col)
n.col <- 1
for(i in (1:len.t)) {
tab.d <- table(data2[, event.code[i]])
if(is.null(class(data2[, event.code[i]])))
level.value <- as.numeric(names(tab.d))
else level.value <- names(tab.d)
for(j in (1:length(tab.d))) {
data.out[, n.col] <- rep(NA, dim.d[1])
check <- data2[, event.code[i]] == level.value[j]
check[is.na(check)] <- F
data.out[, n.col][data2[, event.code[i]] == level.value[
j]] <- data2[, event.time[i]][check]
name.col[n.col] <- paste(name.data[i], ".", names(tab.d
)[j], sep = "")
n.col <- n.col + 1
}
}
dimnames(data.out) <- list(1:dim.d[1], name.col)
return(as.matrix(data.out))
}
|
6a495a36e795bf48441eb7b1321c69fb8968c03f
|
5f179d46db7e5e6c93e8d54b29170e55d57f6513
|
/tests/testthat/test_04_Utility.R
|
900e841d1cbb5e8610b911a20e57c1db6dea6058
|
[] |
no_license
|
BenEngbers/RBaseX
|
727538fab9a10ad72496c2e2eb1d3d6fe81f8be0
|
64a3de8bd3d0e31cb9789a8bf30fa89a50f4729c
|
refs/heads/master
| 2022-11-09T03:55:29.343796
| 2022-11-05T00:25:37
| 2022-11-05T00:25:37
| 243,994,987
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,413
|
r
|
test_04_Utility.R
|
test_that("Sequence result is converted to frame", {
skip_unless_socket_available()
Session <- BasexClient$new("localhost", 1984L, username = "Test", password = "testBaseX")
Query_1 <- Session$Query(paste("declare variable $name external;",
"for $i in 1 to 3 return ( element { $name } { $i }, $i, $i mod 2 = 0)"))
Query_1$queryObject$Bind("$name", "number")
re <- Query_1$queryObject$ExecuteQuery()
res_f <- result2frame(re, 3)
expect_equal(nrow(res_f), 3)
expect_equal(class(res_f), "data.frame")
expect_equal(lapply(res_f, class)[[3]], "logical")
# Cleanup
Query_1$queryObject$Close()
rm(Session)
})
test_that("Array result is converted to frame", {
skip_unless_socket_available()
Session <- BasexClient$new("localhost", 1984L, username = "Test", password = "testBaseX")
Query_1 <- Session$Query(paste("for $i in 1 to 2 return ( [$i, math:pow($i, 2), string(math:pow($i, 3)), $i mod 2 = 0])"))
re_arr <- Query_1$queryObject$ExecuteQuery()
res_f <- result2frame(re_arr)
expect_equal(class(res_f), "data.frame")
expect_equal(lapply(res_f, class)[[4]], "logical")
# Cleanup
Query_1$queryObject$Close()
rm(Session)
})
test_that("Sequence result is converted to tibble", {
skip_unless_socket_available()
Session <- BasexClient$new("localhost", 1984L, username = "Test", password = "testBaseX")
Query_1 <- Session$Query(paste("declare variable $name external;",
"for $i in 1 to 3 return ( element { $name } { $i }, $i, $i mod 2 = 0)"))
Query_1$queryObject$Bind("$name", "number")
re <- Query_1$queryObject$ExecuteQuery()
res_t <- result2tibble(re, 3)
expect_equal(nrow(res_t), 3)
expect_equal(class(res_t)[[1]], "tbl_df")
expect_equal(lapply(res_t, class)[[3]], "logical")
# Cleanup
Query_1$queryObject$Close()
rm(Session)
})
test_that("Array result is converted to tibble", {
skip_unless_socket_available()
Session <- BasexClient$new("localhost", 1984L, username = "Test", password = "testBaseX")
Query_1 <- Session$Query(paste("for $i in 1 to 2 return ( [$i, math:pow($i, 2), string(math:pow($i, 3)), $i mod 2 = 0])"))
re_arr <- Query_1$queryObject$ExecuteQuery()
res_t <- result2tibble(re_arr)
expect_equal(class(res_t)[[1]], "tbl_df")
expect_equal(lapply(res_t, class)[[4]], "logical")
# Cleanup
Query_1$queryObject$Close()
rm(Session)
})
|
666718f5895a2385e1f163354b402a02ff1e07d7
|
27beee90fec3e1279a18ec239db1bb37329773fe
|
/man/reexports.Rd
|
b480e7fe908ca122f9eb78eaf2d681294810d46e
|
[] |
no_license
|
KlausVigo/treeio
|
1fc9256c013081272ba11b799a95bb944814a4be
|
7930e284ad9f5eddaf9bee9c7d7a7117fbee3dde
|
refs/heads/master
| 2021-08-29T08:01:06.835100
| 2017-12-13T13:42:48
| 2017-12-13T13:42:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 879
|
rd
|
reexports.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ape.R, R/method-as-phylo.R, R/reexport.R
\docType{import}
\name{reexports}
\alias{reexports}
\alias{read.tree}
\alias{reexports}
\alias{read.nexus}
\alias{reexports}
\alias{rtree}
\alias{reexports}
\alias{write.tree}
\alias{reexports}
\alias{Nnode}
\alias{reexports}
\alias{Ntip}
\alias{reexports}
\alias{as.phylo}
\alias{reexports}
\alias{\%>\%}
\title{Objects exported from other packages}
\keyword{internal}
\description{
These objects are imported from other packages. Follow the links
below to see their documentation.
\describe{
\item{ape}{\code{\link[ape]{read.tree}}, \code{\link[ape]{read.nexus}}, \code{\link[ape]{rtree}}, \code{\link[ape]{write.tree}}, \code{\link[ape]{Nnode}}, \code{\link[ape]{Ntip}}, \code{\link[ape]{as.phylo}}}
\item{magrittr}{\code{\link[magrittr]{\%>\%}}}
}}
|
e65097cf196ace85462fea73c8b5c84b144e2324
|
090d33f72f95302d1488d1388499b53ecc919901
|
/R/summaries/psrf.R
|
8307c43573f1e1b3312ba82d56b6fe08a0521167
|
[] |
no_license
|
wlandau/normalization
|
f598d7418e24b14835323166bf327714b25ff947
|
e34b13f05e86f2ded3fdff90dcedf2a31b21fde1
|
refs/heads/main
| 2021-01-20T20:11:01.737905
| 2016-06-21T01:04:23
| 2016-06-21T01:04:23
| 61,324,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
psrf.R
|
# Compute Gelman-Rubin potential scale reduction factors.
# Only keep the important ones above 1.1.
#' @param obj list of fbseq::Chain objects
summary_psrf = function(obj){
p = psrf(obj)
p = p[is.finite(p)]
p = p[p > 1.1]
p = sort(p, decreasing = T)
p = p[!grepl("epsilon", names(p))]
}
|
4234d46a5e67fb71608065d118a8938490a0379f
|
49c6a26bf15df25ed7b3099e214cf50875b106dd
|
/01.categorical_analysis/00.data/Chapter6/MP5.1.R
|
aaac55aecba81aa746f7470852f9d8d6c162d162
|
[] |
no_license
|
yerimlim/2018Spring
|
2331d72153c8520664367698c1996bd13d71572f
|
d05131effd2051f703baa2967521d9708ce0551c
|
refs/heads/master
| 2021-09-10T10:02:47.741598
| 2018-03-24T08:29:18
| 2018-03-24T08:29:18
| 126,101,542
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,522
|
r
|
MP5.1.R
|
#####################################################################
# NAME: Chris Bilder #
# DATE: 5-17-13 #
# PURPOSE: Example 5.1 of Mehta and Patel (1995) #
# #
# NOTES: #
#####################################################################
# Number disease free (w) out of total (n) per explanatory variable pattern
set1 <- data.frame(LI = c(0,0,0,0, 1,1,1,1), gender = c(0,0,1,1,0,0,1,1),
AOP = c(0,1,0,1,0,1,0,1), w = c(3,2,4,1,5,3,5,6), n = c(3,2,4,1,5,5,9,17))
head(set1)
sum(set1$n) # Sample size
sum(set1$w)
# Transform data to one person per row (Bernoulli format)
set1.y1 <- set1[rep(1:nrow(set1), times = set1$w), -c(4:5)]
set1.y1$y <- 1
set1.y0 <- set1[rep(1:nrow(set1), times = set1$n-set1$w), -c(4:5)]
set1.y0$y <- 0
set1.long <- data.frame(rbind(set1.y1, set1.y0), row.names = NULL)
nrow(set1.long) # Sample size
sum(set1.long$y)
head(set1.long)
tail(set1.long)
ftable(formula = y ~ LI + gender + AOP, data = set1.long)
# ftable(x = set1.long) # Same
#########################################################################
# Regular logistic regression
mod.fit <- glm(formula = w/n ~ LI + gender + AOP, data = set1, family = binomial(link = logit),
weights = n, trace = TRUE, epsilon = 1e-8) # Default value of epsilon specified
# summary(mod.fit)
round(summary(mod.fit)$coefficients, 4)
# A strictor convergence criteria shows the non-convergence
mod.fit <- glm(formula = w/n ~ LI + gender + AOP, data = set1, family = binomial(link = logit),
weights = n, trace = TRUE, epsilon = 0.00000000001)
# summary(mod.fit)
round(summary(mod.fit)$coefficients, 4)
# Fitting the model to the Bernoulli data format
mod.fit <- glm(formula = y ~ LI + gender + AOP, data = set1.long, family = binomial(link = logit),
trace = TRUE)
summary(mod.fit)
#########################################################################
# logistiX
library(package = logistiX)
mod.fit.logistiX <- logistiX(x = set1.long[,1:3], y = set1.long[,4], alpha = 0.05)
summary(mod.fit.logistiX) # No intercept, but otherwise very similar to Table 3 in the paper
# The p-value is a little different because they are using a little different testing method
names(mod.fit.logistiX)
mod.fit.logistiX$estout # Estimates from four different methods
mod.fit.logistiX$ciout # CIs for four different methods, TST-Pmid row gives the mid-p correction
mod.fit.logistiX$distout # varnum = 1 matches Table 2 in the paper
mod.fit.logistiX$tobs # Observed values of the sufficient statistics
# Exact distribution for sufficient statistic of beta1
just.for.beta1 <- mod.fit.logistiX$distout$varnum == 1
distL1 <- mod.fit.logistiX$distout[just.for.beta1, ]
distL1$rel.freq <- round(distL1$counts/sum(distL1$counts), 4)
distL1
mod.fit.logistiX$tobs[2] # Sufficient stat for beta1 - t1 = 19 (counts for it are distL1$count[1])
distL1$count[1]/sum(distL1$counts) # One-sided p-value bottom p. 2151
sum(distL1$counts[c(1,6:8)])/sum(distL1$counts) # Two-tail test, matches Table 3
# Exact distribution for sufficient statistic of beta2
just.for.beta2 <- mod.fit.logistiX$distout$varnum == 2
distgender <- mod.fit.logistiX$distout[just.for.beta2, ]
distgender$rel.freq <- round(distgender$counts/sum(distgender$counts), 4)
distgender
mod.fit.logistiX$tobs[3] # Sufficient stat for beta2 - t2 = 16
sum(distgender$count[1:3])/sum(distgender$counts) # One-sided p-value
sum(distgender$counts[c(1:3,8:10)])/sum(distgender$counts) # Two-tail test, matches Table 3
2*sum(distgender$count[1:3])/sum(distgender$counts) # P-value given by summary()
# mid-p - see "TST-Pmid" rows in mod.fit.logistiX$ciout
pmf.gender <- distgender$count/sum(distgender$counts) # PMF
2*(sum(pmf.gender[1:2]) + 0.5*pmf.gender[3])
# Exact CI
confint(object = mod.fit.logistiX, level = 0.95, type = "exact") # Another way to extract CI
x11()
plot(x = mod.fit.logistiX, var = 1) # Exact distribution of sufficient statistic
x11()
barplot(height = distL1$counts/sum(distL1$counts), names.arg = distL1$t.stat) # Same
#########################################################################
# elrm
library(package = elrm)
# set.seed(8718) # This does not help you reproduce the same sample.
# The sampling is performed by a C program called by elrm(). It looks like
# no seed number is passed into this program.
mod.fit.elrm1 <- elrm(formula = w/n ~ LI + gender + AOP, interest = ~ LI, iter = 101000,
dataset = set1, burnIn = 1000, alpha = 0.05)
summary(mod.fit.elrm1)
# Estimate of exact distribution corresponding to beta1's sufficient statistic
mod.fit.elrm1$distribution # Similar to what was obtained by logistiX, but just in a different order
sum(mod.fit.elrm1$distribution$LI[1:3,2]) # Two tail test
mod.fit.elrm1$distribution$LI[3,2] # Left-tail test
plot(mod.fit.elrm1)
options(width = 60)
names(mod.fit.elrm1)
options(width = 115)
mod.fit.elrm1$coeffs
mod.fit.elrm1$obs.suff.stat
sum(set1$y*set1$x1)
# mod.fit.elrm1$distribution$LI[,2]
# mod.fit.elrm1$distribution$LI[,1] == 19
#########################################################################
# Firth
library(package = logistf)
mod.fit.firth <- logistf(formula = y ~ LI + gender + AOP, data = set1.long)
mod.fit.firth
summary(mod.fit.firth)
#
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.