blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dff56ac75624041466cf04915ebeba456747aec9
|
506baf5d25ec38e6452a40b5090bbed9ac0aa7d3
|
/man/as.data.frame.prevR.Rd
|
7e6da48a975f34e0b0e1eb2baca395503f845e34
|
[] |
no_license
|
cran/prevR
|
71ea23479220037d8417132a167f35ed763483de
|
ab9dfc7467c1f80f99985717a259908e049fa5ca
|
refs/heads/master
| 2023-05-27T08:58:21.486960
| 2023-05-15T17:50:03
| 2023-05-15T17:50:03
| 17,698,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,734
|
rd
|
as.data.frame.prevR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.data.frame.prevR.r
\name{as.data.frame.prevR}
\alias{as.data.frame.prevR}
\alias{as.data.frame}
\title{Convert an object of class prevR into a data.frame.}
\usage{
\method{as.data.frame}{prevR}(x, ..., N = NULL, R = NULL, clusters.only = FALSE)
}
\arguments{
\item{x}{object of class \code{\linkS4class{prevR}}.}
\item{...}{not used, for compatibility with the generic method
\code{\link[base:as.data.frame]{base::as.data.frame()}}.}
\item{N}{integer or list of integers setting elements of
\code{rings} to extract.}
\item{R}{integer or list of integers setting elements of
\code{rings} to extract.}
\item{clusters.only}{return only the slot \code{clusters} of \code{x}?}
}
\value{
If \code{clusters.only = TRUE}, the function will return only the
slot \code{clusters} of \code{x}.
Otherwise, slots \code{clusters} and \code{rings} of \code{x} will be
merged in a unique data frame. The columns of \code{rings} will be renamed
adding a suffix like \emph{.N300.RInf}.
\code{N} and \code{R} define the elements of \code{rings} to extract.
If not specified (\code{NULL}), all the elements of \code{rings} will
be included.
}
\description{
This function merges the slots \code{clusters} et \code{rings} of
a object of class \code{\linkS4class{prevR}}.
}
\examples{
str(fdhs)
str(as.data.frame(fdhs))
\dontrun{
r.fdhs <- rings(fdhs, N = c(100, 200, 300))
str(r.fdhs)
str(as.data.frame(r.fdhs, clusters.only = TRUE))
str(as.data.frame(r.fdhs))
str(as.data.frame(r.fdhs, N = 300))
}
}
\seealso{
\code{\link[base:as.data.frame]{base::as.data.frame()}}, \code{\linkS4class{prevR}}.
}
\keyword{manip}
|
2f26e0bd57dc62e2c6a60c3e4a44c72332f7a94a
|
94711a87720519f87688309086097b3d367d4688
|
/wealthofgenerations/man/fill.data.Rd
|
eba478bbec190543e209356d6ed2da0768d13ff2
|
[] |
no_license
|
HughParsonage/wealthofgenerations
|
73251b4c6dc6bbcd159947c3a209c38badac5069
|
e3b19be111f91b66a86907f6bcc440e206a650be
|
refs/heads/master
| 2020-05-20T01:35:26.819260
| 2015-05-20T01:09:19
| 2015-05-20T01:09:19
| 35,911,243
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 222
|
rd
|
fill.data.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/fill.data.R
\name{fill.data}
\alias{fill.data}
\title{fill.data}
\usage{
fill.data(age.totals, age.labels)
}
\description{
fill.data
}
|
6c3e6e87e7f7d50972718f70983679098be814b7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mvdalab/examples/plsFit.Rd.R
|
3f7a192dc2e571fb279ee317b60fe01bc9bcc8ba
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,478
|
r
|
plsFit.Rd.R
|
library(mvdalab)
### Name: plsFit
### Title: Partial Least Squares Regression
### Aliases: plsFit mvdareg summary.mvdareg summary.mvdareg.default
### ** Examples
### PLS MODEL FIT WITH method = 'bidiagpls' and validation = 'oob', i.e. bootstrapping ###
data(Penta)
## Number of bootstraps set to 300 to demonstrate flexibility
## Use a minimum of 1000 (default) for results that support bootstraping
mod1 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1], method = "bidiagpls",
ncomp = 2, validation = "oob", boots = 300)
summary(mod1) #Model summary
### PLS MODEL FIT WITH method = 'bidiagpls' and validation = 'loo', i.e. leave-one-out CV ###
## Not run:
##D mod2 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1], method = "bidiagpls",
##D ncomp = 2, validation = "loo")
##D summary(mod2) #Model summary
## End(Not run)
### PLS MODEL FIT WITH method = 'bidiagpls' and validation = 'none', i.e. no CV is performed ###
## Not run:
##D mod3 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1], method = "bidiagpls",
##D ncomp = 2, validation = "none")
##D summary(mod3) #Model summary
## End(Not run)
### PLS MODEL FIT WITH method = 'wrtpls' and validation = 'none', i.e. WRT-PLS is performed ###
## Not run:
##D mod4 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1],
##D method = "wrtpls", validation = "none")
##D summary(mod4) #Model summary
##D plot.wrtpls(mod4)
## End(Not run)
|
435228470fd663317f14f1ca9830a79fdb38f3d4
|
af4fc9030892fe71052f953a4db2bcfac5126c8d
|
/R/checkloss.R
|
bff257f711df7c1c6aaa75a406205bd9733311e0
|
[] |
no_license
|
halleybrantley/detrendr
|
a2d9e3e58af8d1ffdb7de356a73d6ed1824b4237
|
17e3e7cc9e4bb287c6b905f951c6255560aa8f69
|
refs/heads/master
| 2020-05-15T08:42:30.268264
| 2019-05-09T15:16:10
| 2019-05-09T15:16:10
| 182,164,011
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 463
|
r
|
checkloss.R
|
# Functions for choosing smoothing parameter
#' Evaluate checkloss function
#'
#' \code{checkloss}
#'
#' @param e argument of checkloss function
#' @param tau quantile to be used
#' @export
checkloss <- function(e, tau){
if (ncol(e) != length(tau)){
stop("Number of columns in y must be same as length of tau")
}
obj <- e
for (i in 1:length(tau)){
obj[,i] <- obj[,i]*tau[i]
obj[e[,i] < 0,i] <- e[e[,i] < 0,i]*(tau[i]-1)
}
return(obj)
}
|
fa50d2b2d2a34d53ceb96247ddccf11d0f3eaeb6
|
a321e65620ad566a9d40faafce6d85e02553f484
|
/loan_Project.R
|
cc77f0d4ea98ef63268505a23597a5b156e59bb6
|
[] |
no_license
|
flaatah/loan.Project
|
8ba19e8f81e4306c6e7c251cab66a8fbbe6a1900
|
a2a0d6f0ff7287b3b34b28c79d954b3d3b1fa33a
|
refs/heads/main
| 2023-07-11T22:21:46.475448
| 2021-08-21T07:20:45
| 2021-08-21T07:20:45
| 377,734,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,892
|
r
|
loan_Project.R
|
library(ggplot2)
library(caTools)
library(e1071)
#read_data
loan <- read.csv('loan_data.csv')
head(loan)
str(loan)
summary(loan)
#convert columns to factor
loan$inq.last.6mths <- as.factor(loan$inq.last.6mths)
loan$delinq.2yrs <- as.factor(loan$delinq.2yrs)
loan$pub.rec <- as.factor(loan$pub.rec)
loan$not.fully.paid <- as.factor(loan$not.fully.paid)
loan$credit.policy <- as.factor(loan$credit.policy)
str(loan)
#EDA
#hist for not.full.paid
ggplot(loan, aes(fico)) + geom_histogram(aes(fill=not.fully.paid), color='black',bins=40,alpha=0.5) + scale_fill_manual(values = c('green','red')) + theme_bw()
#barplot for purpose columns
ggplot(loan, aes(factor(purpose))) + geom_bar(aes(fill = not.fully.paid), position='dodge') + theme_bw() + theme(axis.text.x = element_text(angle = 90, hjust = 1))
#scatteerplot of fico score versus int.rate
ggplot(loan,aes(int.rate, fico)) + geom_point() + theme_bw()
ggplot(loan,aes(int.rate,fico)) + geom_point(aes(color=not.fully.paid),alpha=0.3) + theme_bw()
#split data into traind and test
set.seed(101)
split = sample.split(loan$not.fully.paid, SplitRatio = 0.70)
train = subset(loan, split == TRUE)
test = subset(loan, split == FALSE)
#apply svm() function on train model
model <- svm(not.fully.paid ~., data = train)
summary(model)
#predict new value from the test set
predicted.values <- predict(model,test[1:13])
table(predicted.values,test$not.fully.paid)
#Using the tune() function to test out different cost and gamma values
tune.results <- tune(svm,train.x=not.fully.paid~., data=train,kernel='radial',
ranges=list(cost=c(1,10), gamma=c(0.1,1)))
#predict Values after tuning and found cost and gamma
model <- svm(not.fully.paid ~ .,data=train,cost=10,gamma = 0.1)
predicted.values <- predict(model,test[1:13])
table(predicted.values, test$not.fully.paid)
|
81e703ced9c5b0982e174d3421b45bd378c8545d
|
13b7f8de84d5aae930468d5364e42d4aa07ab3eb
|
/man/variableDesc.Rd
|
9d3e58874c132f203c28ba71f3c51cd8dca5a25b
|
[] |
no_license
|
cran/NMMAPSlite
|
83ab72149117f5e98ba8fa4b0989da9f05ce38d5
|
d25a502710ad262786a400298cbf0ebf15180ce3
|
refs/heads/master
| 2020-06-08T23:34:47.525892
| 2013-03-22T00:00:00
| 2013-03-22T00:00:00
| 17,717,841
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,440
|
rd
|
variableDesc.Rd
|
\name{variables}
\alias{variables}
\docType{data}
\title{U.S. Cities Variable Descriptions}
\description{
Descriptions of pollutant, meteorology, and mortality variables for
U.S. cities (1987--2000).
}
\details{
The following information (and more) can be found by loading the
\code{variables} table (i.e. via \code{getMetaData("variables")})
Each city dataframe contains variables on:
\describe{
\item{city}{abbreviated city name}
\item{date}{Date}
\item{dow}{Day of week}
\item{agecat}{3 age categories}
\item{accident}{Accidental Deaths}
\item{copd}{Chronic Obstructive Pulmonary Disease}
\item{cvd}{Cardiovascular Deaths}
\item{death}{All cause mortality excluding accident}
\item{inf}{Influenza}
\item{pneinf}{Pneumonia and Influenza}
\item{pneu}{Pneumonia}
\item{resp}{Respiratory Deaths}
\item{tmpd}{Mean temperature}
\item{tmax}{Maximum temperature}
\item{tmin}{Minimum temperature}
\item{tmean}{24 hourly mean temperature}
\item{dptp}{Dew point temperature}
\item{rhum}{Mean relative humidity}
\item{mxrh}{Maximum relative humidity}
\item{mnrh}{Minimum relative humidity}
\item{pm10mean}{PM10 Mean}
\item{pm10n}{No. non-missing}
\item{pm10median}{PM10 Median}
\item{pm10max1}{Maximum Hourly PM10}
\item{pm10max2}{2nd Maximum Hourly PM10}
\item{pm10max3}{3rd Maximum Hourly PM10}
\item{pm10max4}{4th Maximum Hourly PM10}
\item{pm10max5}{5th Maximum Hourly PM10}
\item{pm10trend}{Daily mean of 1-year trends}
\item{pm10mtrend}{Daily median of 1-year trends}
\item{pm10grandmean}{Grand Mean}
\item{pm10tmean}{PM10 Trimmed Mean}
\item{pm10meanmax}{Mean of maximum PM10}
\item{pm25mean}{Mean PM2.5}
\item{pm25n}{No. non-missing}
\item{pm25median}{Median PM2.5}
\item{pm25max1}{Maximum Hourly PM2.5}
\item{pm25max2}{2nd Maximum Hourly PM2.5}
\item{pm25max3}{3rd Maximum Hourly PM2.5}
\item{pm25max4}{4th Maximum Hourly PM2.5}
\item{pm25max5}{5th Maximum Hourly PM2.5}
\item{pm25trend}{Daily mean of 1-year trends}
\item{pm25mtrend}{Daily median of 1-year trends}
\item{pm25grandmean}{Grand Mean}
\item{pm25tmean}{Trimmed Mean PM2.5}
\item{pm25meanmax}{Mean of maximum PM2.5}
\item{o3mean}{Mean O3}
\item{o3n}{No. non-missing}
\item{o3median}{Median O3}
\item{o3h0}{0 hour mean}
\item{o3h1}{1 hour mean}
\item{o3h2}{2 hour mean}
\item{o3h3}{3 hour mean}
\item{o3h4}{4 hour mean}
\item{o3h5}{5 hour mean}
\item{o3h6}{6 hour mean}
\item{o3h7}{7 hour mean}
\item{o3h8}{8 hour mean}
\item{o3h9}{9 hour mean}
\item{o3h10}{10 hour mean}
\item{o3h11}{11 hour mean}
\item{o3h12}{12 hour mean}
\item{o3h13}{13 hour mean}
\item{o3h14}{14 hour mean}
\item{o3h15}{15 hour mean}
\item{o3h16}{16 hour mean}
\item{o3h17}{17 hour mean}
\item{o3h18}{18 hour mean}
\item{o3h19}{19 hour mean}
\item{o3h20}{20 hour mean}
\item{o3h21}{21 hour mean}
\item{o3h22}{22 hour mean}
\item{o3h23}{23 hour mean}
\item{o3max1}{Maximum Hourly O3}
\item{o3max2}{2nd Maximum Hourly O3}
\item{o3max3}{3rd Maximum Hourly O3}
\item{o3max4}{4th Maximum Hourly O3}
\item{o3max5}{5th Maximum Hourly O3}
\item{o3trend}{Daily mean of 1-year trends}
\item{o3mtrend}{Daily median of 1-year trends}
\item{o3grandmean}{Grand Mean}
\item{o3tmean}{Trimmed Mean O3}
\item{o3meanmax}{Mean of maximum O3}
\item{so2mean}{Mean SO2}
\item{so2n}{No. non-missing}
\item{so2median}{Median SO2}
\item{so2h0}{0 hour mean}
\item{so2h1}{1 hour mean}
\item{so2h2}{2 hour mean}
\item{so2h3}{3 hour mean}
\item{so2h4}{4 hour mean}
\item{so2h5}{5 hour mean}
\item{so2h6}{6 hour mean}
\item{so2h7}{7 hour mean}
\item{so2h8}{8 hour mean}
\item{so2h9}{9 hour mean}
\item{so2h10}{10 hour mean}
\item{so2h11}{11 hour mean}
\item{so2h12}{12 hour mean}
\item{so2h13}{13 hour mean}
\item{so2h14}{14 hour mean}
\item{so2h15}{15 hour mean}
\item{so2h16}{16 hour mean}
\item{so2h17}{17 hour mean}
\item{so2h18}{18 hour mean}
\item{so2h19}{19 hour mean}
\item{so2h20}{20 hour mean}
\item{so2h21}{21 hour mean}
\item{so2h22}{22 hour mean}
\item{so2h23}{23 hour mean}
\item{so2max1}{Maximum Hourly SO2}
\item{so2max2}{2nd Maximum Hourly SO2}
\item{so2max3}{3rd Maximum Hourly SO2}
\item{so2max4}{4th Maximum Hourly SO2}
\item{so2max5}{5th Maximum Hourly SO2}
\item{so2trend}{Daily mean of 1-year trends}
\item{so2mtrend}{Daily median of 1-year trends}
\item{so2grandmean}{Grand Mean}
\item{so2tmean}{Trimmed Mean SO2}
\item{so2meanmax}{Mean of maximum SO2}
\item{no2mean}{Mean NO2}
\item{no2n}{No. non-missing}
\item{no2median}{Median NO2}
\item{no2h0}{0 hour mean}
\item{no2h1}{1 hour mean}
\item{no2h2}{2 hour mean}
\item{no2h3}{3 hour mean}
\item{no2h4}{4 hour mean}
\item{no2h5}{5 hour mean}
\item{no2h6}{6 hour mean}
\item{no2h7}{7 hour mean}
\item{no2h8}{8 hour mean}
\item{no2h9}{9 hour mean}
\item{no2h10}{10 hour mean}
\item{no2h11}{11 hour mean}
\item{no2h12}{12 hour mean}
\item{no2h13}{13 hour mean}
\item{no2h14}{14 hour mean}
\item{no2h15}{15 hour mean}
\item{no2h16}{16 hour mean}
\item{no2h17}{17 hour mean}
\item{no2h18}{18 hour mean}
\item{no2h19}{19 hour mean}
\item{no2h20}{20 hour mean}
\item{no2h21}{21 hour mean}
\item{no2h22}{22 hour mean}
\item{no2h23}{23 hour mean}
\item{no2max1}{Maximum Hourly NO2}
\item{no2max2}{2nd Maximum Hourly NO2}
\item{no2max3}{3rd Maximum Hourly NO2}
\item{no2max4}{4th Maximum Hourly NO2}
\item{no2max5}{5th Maximum Hourly NO2}
\item{no2trend}{Daily mean of 1-year trends}
\item{no2mtrend}{Daily median of 1-year trends}
\item{no2grandmean}{Grand Mean}
\item{no2tmean}{Trimmed Mean NO2}
\item{no2meanmax}{Mean of maximum NO2}
\item{comean}{Mean CO}
\item{con}{No. non-missing}
\item{comedian}{Median CO}
\item{coh0}{0 hour mean}
\item{coh1}{1 hour mean}
\item{coh2}{2 hour mean}
\item{coh3}{3 hour mean}
\item{coh4}{4 hour mean}
\item{coh5}{5 hour mean}
\item{coh6}{6 hour mean}
\item{coh7}{7 hour mean}
\item{coh8}{8 hour mean}
\item{coh9}{9 hour mean}
\item{coh10}{10 hour mean}
\item{coh11}{11 hour mean}
\item{coh12}{12 hour mean}
\item{coh13}{13 hour mean}
\item{coh14}{14 hour mean}
\item{coh15}{15 hour mean}
\item{coh16}{16 hour mean}
\item{coh17}{17 hour mean}
\item{coh18}{18 hour mean}
\item{coh19}{19 hour mean}
\item{coh20}{20 hour mean}
\item{coh21}{21 hour mean}
\item{coh22}{22 hour mean}
\item{coh23}{23 hour mean}
\item{comax1}{Maximum Hourly CO}
\item{comax2}{2nd Maximum Hourly CO}
\item{comax3}{3rd Maximum Hourly CO}
\item{comax4}{4th Maximum Hourly CO}
\item{comax5}{5th Maximum Hourly CO}
\item{cotrend}{Daily mean of 1-year trends}
\item{comtrend}{Daily median of 1-year trends}
\item{cograndmean}{Grand Mean}
\item{cotmean}{Trimmed Mean CO}
\item{comeanmax}{Mean of maximum CO}
\item{rmtmpd}{Adjusted 3-day lag temperature}
\item{rmdptp}{Adjusted 3-day lag Dew point temperature}
\item{markaccident}{Exclusions for Accidental Deaths}
\item{markcopd}{Exclusions for COPD}
\item{markcvd}{Exclusions for Cardiovascular Deaths}
\item{markdeath}{Exclusions for death}
\item{markinf}{Exclusions for Influenza}
\item{markpneinf}{Exclusions for Pneumonia and Influenza}
\item{markpneu}{Exclusions for Pneumonia}
\item{markresp}{Exclusions for Respiratory Deaths}
\item{l1pm10tmean}{Lag 1 PM10 trimmed mean}
\item{l1pm25tmean}{Lag 1 PM25 trimmed mean}
\item{l1cotmean}{Lag 1 CO trimmed mean}
\item{l1no2tmean}{Lag 1 NO2 trimmed mean}
\item{l1so2tmean}{Lag 1 SO2 trimmed mean}
\item{l1o3tmean}{Lag 1 O3 trimmed mean}
\item{l2pm10tmean}{Lag 2 PM10 trimmed mean}
\item{l2pm25tmean}{Lag 2 PM25 trimmed mean}
\item{l2cotmean}{Lag 2 CO trimmed mean}
\item{l2no2tmean}{Lag 2 NO2 trimmed mean}
\item{l2so2tmean}{Lag 2 SO2 trimmed mean}
\item{l2o3tmean}{Lag 2 O3 trimmed mean}
}
}
\seealso{
\code{\link{readCity}}, \code{\link{listCities}}
}
\keyword{datasets}
|
b7230d83f8b56c6c9e2e2304bc225d3befaeba83
|
ef3b06420505bb9db574b2e4179a00a1553a1f6d
|
/summary/server.R
|
52cc3f2f8752fa5605e261613662d4ce7613bd61
|
[] |
no_license
|
Wei-LinHsiao/political-detector
|
62be8484e4b97d4ab075c8397cfbaade1c8c9d5e
|
6def7a1ae1149dfba833cffb3625be87e035a6ba
|
refs/heads/master
| 2020-04-11T19:53:10.618991
| 2019-01-31T16:00:07
| 2019-01-31T16:00:07
| 162,051,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,485
|
r
|
server.R
|
library(rsconnect)
library(shiny)
library(readr)
library(tidyverse)
library(DT)
# Import data
coef <- read.csv("coef.csv", header=FALSE)
comments <- read.csv("comments.csv")
# Add title to coefficents
colnames(coef) <- c("Phrase", "Coefficent")
# Adjust last column to UNIX timestamp column
comments <- comments %>%
mutate(`Timestamp (UTC)` = as.POSIXct(Timestamp, origin="1970-01-01", tz = "UTC")) %>%
mutate(Timestamp = NULL)
shinyServer(function(input, output) {
# Table for comments
output$comments <- DT::renderDataTable(
DT::datatable(comments)
)
# Table for coefficents
output$coef <- DT::renderDataTable(
DT::datatable(coef)
)
# Files for downloading
output$downloadCoef<- downloadHandler(
filename = function() {
"pol_detect_coeff.csv"
},
content = function(file) {
write.csv(coef, file, row.names = FALSE)
})
# Comments
output$downloadCom<- downloadHandler(
filename = function() {
"pol_detect_reddit_comments.csv"
},
content = function(file) {
write.csv(comments, file, row.names = FALSE)
})
# Differnt spreadsheet for Republicans and Democrats
output$demJSON <- downloadHandler(
filename = "data_demo_full.json",
content = function(file) {
file.copy("data_demo_full.json", file)
})
output$repubJSON <- downloadHandler(
filename = "data_repub_full.json",
content = function(file) {
file.copy("data_repub_full.json", file)
})
})
|
78decd2745d5c830482b7b7acae35ffb80993ee0
|
dc012806c6a46a7ed990408e48f85b263418063e
|
/man/get_isothermal_model_data.Rd
|
1ca8f4badf0fa95bdcc85eaca2ab3f7fed4e9ae5
|
[] |
no_license
|
albgarre/bioinactivation
|
becbc9814fae303dad7f8b0861fd4d88bb3286ec
|
668e9d234435a2cf8a669ba0de71cdce25907829
|
refs/heads/master
| 2022-12-07T12:39:59.241879
| 2022-11-22T12:28:32
| 2022-11-22T12:28:32
| 146,264,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 996
|
rd
|
get_isothermal_model_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isothermal_fit.R
\name{get_isothermal_model_data}
\alias{get_isothermal_model_data}
\title{Isothermal Model Data}
\usage{
get_isothermal_model_data(model_name = "valids")
}
\arguments{
\item{model_name}{Optional string with the key of the model to use.}
}
\value{
If \code{model_name} is missing, a list of the valid model keys.
If \code{model_name} is not a valid key, NULL is returned.
Otherwise, a list with the parameters of the model selected and its
\code{formula} for the nonlinear adjustment.
}
\description{
Provides information of the models implemented for fitting of isothermal
data.
This models are valid only for isothermal adjustment with the function
\code{\link{fit_isothermal_inactivation}}. To make predictions with the
function \code{\link{predict_inactivation}} or adjust dynamic experiments
with \code{\link{fit_dynamic_inactivation}}, use
\code{\link{get_model_data}}.
}
|
2511fd174a1fc557c5a2a181f66a3458f01de4b4
|
c7b21b8da05cd2066ac45bb86adbb3266501384c
|
/man/compare.model.Rd
|
fc49cef7dd0b2a0f079535df9af8664030bd7748
|
[
"MIT"
] |
permissive
|
zhangkaicr/doRadiomics
|
1d68a14408773b9e30314d65c097a6e9a11c8cb5
|
458d38aacbebf646cb8588be780ef2c6026fde0c
|
refs/heads/main
| 2023-03-22T00:19:13.636809
| 2021-03-09T04:17:20
| 2021-03-09T04:17:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 433
|
rd
|
compare.model.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare.R
\name{compare.model}
\alias{compare.model}
\title{Compare two model}
\usage{
compare.model(obj1, obj2, fpath)
}
\arguments{
\item{obj1}{an Radiomics.out or Nomogram.out object}
\item{obj2}{an Radiomics.out or Nomogram.out object}
\item{fpath}{an output pptx file path}
}
\value{
output the results in fpath
}
\description{
Compare two model
}
|
f81024f89464676a72786e10ecabe48feb85c201
|
dc2d65262c6ba262ded597991ead46d901d520f0
|
/work_in_progress/Carlin and Chib gibbs sampler for bayesian model uncertainty in meta analysis.R
|
4da060a425b9f5e9103de734b408dfa5e9c7335d
|
[] |
no_license
|
fanstev1/bayesian-model-uncertainty-in-meta-analysis
|
10809cf66dc65d0fb26c197c6a570acea89bc41b
|
13bebb6a7029d50c5749e9521cb4725e0da787fc
|
refs/heads/master
| 2020-12-23T01:27:33.063205
| 2016-08-16T15:14:00
| 2016-08-16T15:14:00
| 65,828,048
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,119
|
r
|
Carlin and Chib gibbs sampler for bayesian model uncertainty in meta analysis.R
|
library(MCMCpack)
library(mvtnorm)
#library(R2WinBUGS)
# Gibbs sampler for Carlin and Chib's method
# Model 1: Y_i ~ N(delta, sigma_i^2)
# Model 2: Y_i ~ N(beta + S_i, sigma_i^2) and S_i ~ N(0, sigma_s^2)
bayes.mdl.uncertainty.meta.analysis<- function(y, var_y, parm.ini,
niter, nburn, nthin,
nchain= length(parm.ini),
prior.prob.fixed= .5,
prior.parm.mdl1, pseudo.prior.parm.mdl1,
prior.parm.mdl2, pseudo.prior.parm.mdl2,
print.progress= FALSE) {
# this is the main program implementing the Gibbs sampler of Carlin and Chib's method
# parm is list, containing Model index, parm1 (list) and parm2 (list)
# parm1 is a list variable containing all parameters of model 1
# parm2 is a list variable containing all parameters of model 2
mcmc.out.lst<- vector("list", nchain)
iter.idx<- 0
while (iter.idx <= (niter+nburn)*nthin){
for (k in 1:nchain) {
parm<- parm.ini[[k]]
# update Model 1 parmeters
parm$parm1$delta<- mdl1_post_delta(parm1= parm$parm1,
mdl= parm$mdl.idx,
y= y, var_y= var_y,
prior.parm= prior.parm.mdl1,
pseudo.prior.parm= pseudo.prior.parm.mdl1)
# update Model 2 parmeters
parm$parm2$beta <- mdl2_post_beta(parm2= parm$parm2,
mdl= parm$mdl.idx,
y= y, var_y= var_y,
prior.parm= prior.parm.mdl2,
pseudo.prior.parm= pseudo.prior.parm.mdl2)
parm$parm2$lambda<- mdl2_post_lambda( parm2= parm$parm2,
mdl= parm$mdl.idx,
y= y, var_y= var_y,
prior.parm= prior.parm.mdl2,
pseudo.prior.parm= pseudo.prior.parm.mdl2)
parm$parm2$b <- mdl2_post_b( parm2= parm$parm2,
mdl= parm$mdl.idx,
y= y, var_y= var_y,
prior.parm= prior.parm.mdl2,
pseudo.prior.parm= pseudo.prior.parm.mdl2)
parm$parm2$var_b<- mdl2_post_var_b( parm2= parm$parm2,
mdl= parm$mdl.idx,
y= y, var_y= var_y,
prior.parm= prior.parm.mdl2,
pseudo.prior.parm= pseudo.prior.parm.mdl2)
# update model probability
parm$mdl.idx <- gs_post_mdl(parm= parm, y= y, var_y= var_y, prior.prob.mdl1= prior.prob.fixed,
prior.parm.mdl1= prior.parm.mdl1,
prior.parm.mdl2= prior.parm.mdl2,
pseudo.prior.parm.mdl1= pseudo.prior.parm.mdl1,
pseudo.prior.parm.mdl2= pseudo.prior.parm.mdl2)
parm.ini[[k]]<- parm
if (iter.idx> nburn * nthin & iter.idx %% nthin == 0) {
parm<- unlist(parm)
parm2.s<- as.numeric(parm[which(names(parm) %in% paste("parm2.b", 1:nobs, sep= ""))] * parm["parm2.lambda"])
names(parm2.s)<- paste("parm2.s", 1:nobs, sep= "")
parm2.var_s<- as.numeric(parm[which(names(parm)=="parm2.var_b")] * abs(parm["parm2.lambda"]))
names(parm2.var_s)<- "parm2.var_s"
parm<- c(parm, parm2.s, parm2.var_s)
mcmc.out.lst[[k]]<- rbind(mcmc.out.lst[[k]], parm)
}
}
if (print.progress) print(iter.idx)
iter.idx<- 1+iter.idx
}
for (k in 1:nchain) { mcmc.out.lst[[k]]<- mcmc(mcmc.out.lst[[k]], start= 1+nburn, thin= nthin) }
return(mcmc.out.lst<- mcmc.list(mcmc.out.lst))
#plot( mcmc.out.lst<- mcmc(mcmc.out.lst) )
#table(mcmc.out.lst[,"mdl.idx"])
}
gs_post_mdl<- function(parm, y, var_y, prior.prob.mdl1= .5,
prior.parm.mdl1, pseudo.prior.parm.mdl1,
prior.parm.mdl2, pseudo.prior.parm.mdl2) {
log.marginal.lik.ratio<-
# given model 2
dmvnorm(y, mean= parm$parm2$beta + parm$parm2$lambda * parm$parm2$b, sigma= diag(var_y), log= TRUE) +
# pseudo prior for Model 1 parameters when model 2 is assigned
dnorm(parm$parm1$delta, mean= pseudo.prior.parm.mdl1$delta$mu, sd= sqrt(pseudo.prior.parm.mdl1$delta$var), log= TRUE) +
dnorm(parm$parm2$beta, mean= prior.parm.mdl2$beta$mu, sd= sqrt(prior.parm.mdl2$beta$var), log= TRUE) +
dnorm(parm$parm2$lambda, mean= prior.parm.mdl2$lambda$mu, sd= sqrt(prior.parm.mdl2$lambda$var), log= TRUE) +
sum(dnorm(parm$parm2$b, mean= 0, sd= sqrt(parm$parm2$var_b), log= TRUE) ) +
log(dinvgamma(parm$parm2$var_b, shape= prior.parm.mdl2$var_b$alpha , scale= prior.parm.mdl2$var_b$theta)) +
log(1-prior.prob.mdl1) -
# given model 1
( dmvnorm(y, mean= rep(parm$parm1$delta, length(y)), sigma= diag(var_y), log= TRUE) +
dnorm(parm$parm1$delta, mean= prior.parm.mdl1$delta$mu, sd= sqrt(prior.parm.mdl1$delta$var), log= TRUE) +
# pseudo prior for Model 1 parameters when model 2 is assigned
dnorm(parm$parm2$beta, mean= pseudo.prior.parm.mdl2$beta$mu, sd= sqrt(pseudo.prior.parm.mdl2$beta$var), log= TRUE) +
dnorm(parm$parm2$lambda, mean= pseudo.prior.parm.mdl2$lambda$mu, sd= sqrt(pseudo.prior.parm.mdl2$lambda$var), log= TRUE) +
sum(dnorm(parm$parm2$b, mean= pseudo.prior.parm.mdl2$b$mu, sd= sqrt(pseudo.prior.parm.mdl2$b$var), log= TRUE)) +
log(dinvgamma(parm$parm2$var_b, shape= pseudo.prior.parm.mdl2$var_b$alpha, scale= pseudo.prior.parm.mdl2$var_b$theta)) +
log(prior.prob.mdl1))
prob.mdl1.update<- 1/(1+exp(log.marginal.lik.ratio)) # probably of model 1 (fixed-effect)
return( 1+rbinom(n= 1, size= 1, prob= 1-prob.mdl1.update) )
}
mdl2_post_var_b<- function(parm2, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm.mdl2 is a list variable containing all parameters of model 2
# prior.parm is a list containing all hyperparameters of beta when model= 1
# pseudo.prior.parm is a list containing all hyperparameters of beta when model= 2
if (mdl==2) {
alpha.update<- prior.parm$var_b$alpha + 0.5 * length(parm2$b)
theta.update<- prior.parm$var_b$theta + 0.5 * sum(parm2$b^2)
}
else {
alpha.update<- pseudo.prior.parm$var_b$alpha
theta.update<- pseudo.prior.parm$var_b$theta
}
return( rinvgamma(1, shape= alpha.update, scale= theta.update) )
}
mdl2_post_b<- function(parm2, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm.mdl2 is a list variable containing all parameters of model 2
# prior.parm is a list containing all hyperparameters of beta when model= 1
# pseudo.prior.parm is a list containing all hyperparameters of beta when model= 2
if (mdl==2) {
var.b.update <- ( 1/parm2$var_b + parm2$lambda^2/var_y)^(-1)
mu.b.update <- var.b.update * parm2$lambda * (y - parm2$beta)/var_y
}
else {
var.b.update <- pseudo.prior.parm$b$var
mu.b.update <- pseudo.prior.parm$b$mu
}
return( as.numeric( rmvnorm(n= 1, mean= mu.b.update, sigma= diag(var.b.update)) ) )
}
mdl2_post_lambda<- function(parm2, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm.mdl2 is a list variable containing all parameters of model 2
# prior.parm is a list containing all hyperparameters of beta when model= 1
# pseudo.prior.parm is a list containing all hyperparameters of beta when model= 2
if (mdl==2) {
sigma.lambda.update<- ( 1/prior.parm$lambda$var + sum(parm2$b^2/var_y) )^(-1/2)
mu.lambda.update <- sigma.lambda.update^2 * sum(parm2$b * (y - parm2$beta)/var_y)
}
else {
sigma.lambda.update<- sqrt(pseudo.prior.parm$b$var)
mu.lambda.update <- pseudo.prior.parm$b$mu
}
return( rnorm(n= 1, mean= mu.lambda.update, sd= sigma.lambda.update) )
}
mdl2_post_beta<- function(parm2, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm.mdl2 is a list variable containing all parameters of model 2
# prior.parm is a list containing all hyperparameters of beta when model= 1
# pseudo.prior.parm is a list containing all hyperparameters of beta when model= 2
if (mdl==2) {
sigma.beta.update<- ( 1/prior.parm$beta$var + sum(1/var_y) )^(-1/2)
mu.beta.update <- sigma.beta.update^2 * sum((y - parm2$lambda* parm2$b)/var_y)
}
else {
sigma.beta.update<- sqrt(pseudo.prior.parm$beta$var)
mu.beta.update <- pseudo.prior.parm$beta$mu
}
return( rnorm(n= 1, mean= mu.beta.update, sd= sigma.beta.update) )
}
mdl1_post_delta<- function(parm1, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm1 is a list variable containing all parameters of model 1
# prior.parm.mdl1 is a list containing all hyperparameters of delta when model= 1
# pseudo.prior.parm.mdl1 is a list containing all hyperparameters of delta when model= 2
if (mdl==1) {
sigma.delta.update<- ( 1/prior.parm$delta$var + sum(1/var_y) )^(-1/2)
mu.delta.update <- sigma.delta.update^2 * sum(y/var_y)
}
else {
sigma.delta.update<- sqrt(pseudo.prior.parm$delta$var)
mu.delta.update <- pseudo.prior.parm$delta$mu
}
return( rnorm(n= 1, mean= mu.delta.update, sd= sigma.delta.update) )
}
gs_post_s.mdl2<- function(parm.mdl2, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm.mdl2 is a list variable containing all parameters of model 2
# prior.parm is a list containing all hyperparameters of beta when model= 1
# pseudo.prior.parm is a list containing all hyperparameters of beta when model= 2
if (mdl==2) {
sigma.s.update<- ( (1/parm.mdl2$sigma_s)^2 + 1/var_y)^(-1/2)
mu.s.update<- sigma.s.update^2 * (y-parm.mdl2$beta)/var_y
}
else {
sigma.s.update<- pseudo.prior.parm$s$sigma
mu.s.update<- pseudo.prior.parm$s$mu
}
output<- as.numeric(rmvnorm(n= 1, mean= mu.s.update, sigma= diag(sigma.s.update^2)))
return(output)
}
gs_post_sigma_s.mdl2<- function(parm.mdl2, mdl, y, var_y, prior.parm, pseudo.prior.parm){
# parm.mdl2 is a list variable containing all parameters of model 2
# prior.parm is a list containing all hyperparameters of beta when model= 1
# pseudo.prior.parm is a list containing all hyperparameters of beta when model= 2
if (mdl==2) {
alpha.update<- prior.parm$sigma_s$alpha + 0.5*length(parm.mdl2$s)
theta.update<- prior.parm$sigma_s$theta + 0.5*sum(parm.mdl2$s^2)
}
else {
alpha.update<- pseudo.prior.parm$sigma_s$alpha
theta.update<- pseudo.prior.parm$sigma_s$theta
}
output<- sqrt(rinvgamma(1, shape= alpha.update, scale= theta.update ))
return( output )
}
|
87779c557c548f63085471056e2aac6892696f9a
|
99dd03c6ab460922e07412d08fcd118f50789826
|
/src/prepare_bbc.R
|
c4307b628966aafd747a8d954d6b8c1b10db1470
|
[
"MIT"
] |
permissive
|
maqin2001/IRIS3
|
51108c5b7ab2a5e42d1ff729c5ffe168faa52053
|
b5b1934b2f2056c7fa47728ac05d9504547e6062
|
refs/heads/master
| 2020-05-30T21:43:48.768769
| 2019-05-24T15:58:56
| 2019-05-24T15:58:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,454
|
r
|
prepare_bbc.R
|
####### Read all motif result, convert to input for BBC ##########
# remove all empty files before this
library(seqinr)
library(tidyverse)
args <- commandArgs(TRUE)
#setwd("D:/Users/flyku/Documents/IRIS3-data/test_meme_new")
#setwd("/var/www/html/iris3_test/data/20190408202315/")
#srcDir <- getwd()
#jobid <-20190408202315
# is_meme <- 0
# motif_len <- 12
srcDir <- args[1]
is_meme <- args[2] # no 0, yes 1
motif_len <- args[3]
setwd(srcDir)
getwd()
workdir <- getwd()
alldir <- list.dirs(path = workdir)
alldir <- grep(".+_bic$",alldir,value=T)
#gene_info <- read.table("file:///D:/Users/flyku/Documents/IRIS3_data_backup/dminda/human_gene_start_info.txt")
species_id <- as.character(read.table("species_main.txt")[1,1])
if(species_id == "Human"){
gene_info <- read.table("/var/www/html/iris3/program/dminda/human_gene_start_info.txt")
} else if (species_id == "Mouse"){
gene_info <- read.table("/var/www/html/iris3/program/dminda/mouse_gene_start_info.txt")
}
sort_dir <- function(dir) {
tmp <- sort(dir)
split <- strsplit(tmp, "_CT_")
split <- as.numeric(sapply(split, function(x) x <- sub("_bic.*", "", x[2])))
return(tmp[order(split)])
}
sort_closure <- function(dir){
tmp <- sort(dir)
split <- strsplit(tmp, "/bic")
split <- as.numeric(sapply(split, function(x) x <- sub("\\D+", "", x[2])))
return(tmp[order(split)])
}
sort_short_closure <- function(dir){
tmp <- sort(dir)
split <- strsplit(tmp, "bic")
split <- as.numeric(sapply(split, function(x) x <- sub("\\D+", "", x[2])))
return(tmp[order(split)])
}
alldir <- sort_dir(alldir)
#convert_motif(all_closure[1])
#filepath<-all_closure[1]
convert_motif <- function(filepath){
this_line <- data.frame()
motif_file <- file(filepath,"r")
line <- readLines(motif_file)
# get pvalue and store it in pval_rank
split_line <- unlist(strsplit(line," "))
pval_value <- split_line[which(split_line == "Pvalue:")+2]
if(length(pval_value)>0){
pval_value <- as.numeric(gsub("\\((.+)\\)","\\1",pval_value))
pval_name <- paste(">",basename(filepath),"-",seq(1:length(pval_value)),sep="")
tmp_pval_df <- data.frame(pval_name,pval_value)
#print(tmp_pval_df)
pval_rank <<-rbind(pval_rank,tmp_pval_df)
df <- line[substr(line,0,1) == ">"]
df <- read.table(text=df,sep = "\t")
colnames(df) <- c("MotifNum","Seq","start","end","Motif","Score","Info")
}
close(motif_file)
return(df)
}
#i=1
#filepath=all_closure[1]
convert_meme <- function(filepath){
this_line <- matrix(0,ncol = 6)
this_line <- data.frame(this_line)
motif_result <- tibble()
line<-0
motif_file <- file(filepath,"r")
line = readLines(motif_file)
close(motif_file)
if (nchar(line[1]) != 57) {
df <- line[substr(line,0,3) == "ENS"|substr(line,0,3) == "ens"]
for (i in 1:length(df)) {
this_line <- strsplit(df[i],"\\s+")[[1]]
if(length(grep(".+[ATCG]",this_line[5])) == 1){
tmp_bind <- t(data.frame(this_line))
if(ncol(tmp_bind) < 6) {
if (nchar(as.character(tmp_bind[5])) < motif_len){
tmp_bind <- cbind(tmp_bind,"A")
tmp <- tmp_bind[4]
tmp_bind[4] <- tmp_bind[5]
tmp_bind[5] <- tmp
} else {
tmp_bind <- cbind(tmp_bind,"A")
}
}
motif_result <- rbind(motif_result,tmp_bind)
}
}
df_info = line[substr(line,0,5) == "MOTIF"]
all_motif_index <- 1
#filepath=paste(filepath,".test",sep = "")
cat("", file=filepath)
#i=1
for (i in 1:length(df_info)) {
this_info <- strsplit(df_info[i],"\\s+")[[1]]
this_consensus <- this_info[2]
this_index <- i
this_motif_length <- this_info[6]
this_num_sites <- as.numeric(this_info[9])
this_pval <- this_info[15]
this_pval <- as.numeric(this_pval)
motif_idx_range <- seq(all_motif_index,all_motif_index + this_num_sites - 1)
all_motif_index <- all_motif_index + this_num_sites
this_motif_align <- motif_result[motif_idx_range,]
this_motif_name <- paste(">Motif-",i,sep = "")
this_motif_align <- cbind(this_motif_align,this_motif_name)
this_motif_align[,4] <- as.numeric(as.character(this_motif_align[,2])) + as.numeric(this_motif_length) - 1
this_seq_idx <- sample(seq(1:this_num_sites))
this_motif_align[,6] <- this_seq_idx
colnames(this_motif_align) <- (c("V1","V2","V3","V4","V5","V6","V7"))
this_motif_align <- this_motif_align[,c(7,6,2,4,5,3,1)]
this_motif_align[, ] <- lapply(this_motif_align[, ], as.character)
cat("*********************************************************\n", file=filepath,append = T)
cat(paste(" Candidate Motif ",this_index,sep=""), file=filepath,append = T)
cat("\n*********************************************************\n\n", file=filepath,append = T)
cat(paste(" Motif length: ",this_motif_length,"\n Motif number: ",this_num_sites,
"\n Motif Pvalue: ",1/this_pval," ",this_pval,"\n\n",sep=""), file=filepath,append = T)
cat(paste("\n------------------- Consensus sequences------------------\n",this_consensus,"\n\n",sep=""), file=filepath,append = T)
cat("------------------- Aligned Motif ------------------\n#Motif Seq start end Motif Score Info\n", file=filepath,append = T)
for (j in 1:nrow(this_motif_align)) {
cat( as.character(this_motif_align[j, ]), file=filepath,append = T,sep = "\t")
cat("\n", file=filepath,append = T)
}
cat("----------------------------------------------------\n\n", file=filepath,append = T)
}
}
}
#i=1
#j=19
#info = "bic1.txt.fa.closures-1"
module_type <- sub(paste(".*_ *(.*?) *_.*",sep=""), "\\1", alldir)
#module_type <- rep("CT",6)
regulon_idx_module <- 0
result_gene_pos <- data.frame()
for (i in 1:length(alldir)) {
combined_seq <- data.frame()
combined_gene <- data.frame()
pval_rank <- data.frame()
all_closure <- list.files(alldir[i],pattern = "*.closures$",full.names = T)
short_all_closure <- list.files(alldir[i],pattern = "*.closures$",full.names = F)
all_closure <- sort_closure(all_closure)
short_all_closure <- sort_short_closure(short_all_closure)
if(length(all_closure) > 0){
for (j in 1:length(all_closure)) {
if(is_meme == 1) {
convert_meme(all_closure[j])
}
matches <- regmatches(short_all_closure[j], gregexpr("[[:digit:]]+", short_all_closure[j]))
bic_idx <- as.numeric(unlist(matches))
#test
#motif_seq <- convert_motif(paste(all_closure[j],".test",sep = ""))[,c(1,5,7)]
motif_seq <- convert_motif(all_closure[j])[,c(1,5,7)]
motif_pos <- convert_motif(all_closure[j])[,c(1,2,3,4,7)]
gene_pos <- merge(motif_pos,gene_info,by.x = "Info",by.y = 'V2')
gene_pos <-transform(gene_pos, min = pmin(start, end), max=pmax(start,end))
gene_pos[,4] <- gene_pos[,7] + gene_pos[,8]
gene_pos[,5] <- gene_pos[,7] + gene_pos[,9]
gene_pos[,10] <- module_type[i]
gene_pos[,11] <- paste(i,bic_idx,sub(">Motif-","",gene_pos[,2]),sep = ",")
if(module_type[i] == "module"){
regulon_idx_module <- regulon_idx_module + 1
gene_pos[,11] <- paste(regulon_idx_module,bic_idx,sub(">Motif-","",gene_pos[,2]),sep = ",")
}
#write.table(gene_pos[,c(6,4,5,1)],paste(alldir[i],"/bic",j,".bed",sep=""),sep = "\t" ,quote=F,row.names = F,col.names = F)
result_gene_pos <- rbind(result_gene_pos,gene_pos[,c(6,4,5,1,10,11)])
motif_seq[,1] <- gsub(">Motif","",motif_seq[,1])
motif_seq[,4] <- as.factor(paste(short_all_closure[j],motif_seq[,1],sep=""))
seq_file <- motif_seq[,c(4,3)]
motif_seq <- motif_seq[,c(4,2)]
colnames(motif_seq) <- c("info","seq")
colnames(seq_file) <- c("info","genes")
combined_seq <- rbind(combined_seq,motif_seq)
combined_gene <- rbind(combined_gene,seq_file)
res <- paste(alldir[i],".bbc.txt",sep="")
#res <- file("filename", "w")
cat("", file=res)
for (info in levels(combined_seq[,1])) {
cat(paste(">",as.character(info),sep=""), file=res,sep="\n",append = T)
if (length(as.character(combined_seq[which(combined_seq[,1]== info),2])) >= 100) {
sequence <- as.character(combined_seq[which(combined_seq[,1]== info),2])[1:99]
} else {
sequence <- as.character(combined_seq[which(combined_seq[,1]== info),2])
}
cat(sequence, file=res,sep="\n",append = T)
}
}
} else {
cat("", file= paste(alldir[i],".bbc.txt",sep=""),sep="\n",append = T)
}
write.table(combined_gene,paste(alldir[i],".motifgene.txt",sep=""),sep = "\t" ,quote=F,row.names = F,col.names = T)
pval_rank <- pval_rank[!duplicated(pval_rank$pval_name),]
#test_pval_rank <- pval_rank[!duplicated(pval_rank$pval_name),]
if(nrow(pval_rank) > 0){
pval_rank[,3] <- seq(1:nrow(pval_rank))
pval_rank <- pval_rank[order((pval_rank$pval_value),decreasing = T),]
pval_idx <- pval_rank[,3]
#write.table(pval_rank,paste(alldir[i],".pval.txt",sep=""),sep = "\t" ,quote=F,row.names = F,col.names = F)
this_fasta <- read.fasta(paste(alldir[i],".bbc.txt",sep=""))
this_fasta <- this_fasta[pval_idx]
write.fasta(this_fasta,names(this_fasta),paste(alldir[i],".bbc.txt",sep=""),nbchar = 12)
}
cat(">end", file=paste(alldir[i],".bbc.txt",sep=""),sep="\n",append = T)
}
write.table(result_gene_pos,paste("motif_position.bed",sep=""),sep = "\t" ,quote=F,row.names = F,col.names = F)
|
5fb276de1d78ccef3fa4fad21bedd90b1824f46a
|
e205d4542b2f7d13bc3c1a3bba2eae4c16cfc743
|
/tests/testthat/test-21-DRYWETAIR.R
|
634abb1c6ec9bf0d1ebfeea78f2070163adbae89
|
[
"MIT"
] |
permissive
|
trenchproject/TrenchR
|
03afe917e19b5149eae8a76d4a8e12979c2b752f
|
7164ca324b67949044827b743c58196483e90360
|
refs/heads/main
| 2023-08-20T11:54:26.054952
| 2023-08-04T03:52:42
| 2023-08-04T03:52:42
| 78,060,371
| 8
| 8
|
NOASSERTION
| 2022-09-15T21:36:08
| 2017-01-04T23:09:28
|
R
|
UTF-8
|
R
| false
| false
| 973
|
r
|
test-21-DRYWETAIR.R
|
context("DRYWETAIR")
expect_similar <- function(input, expected) {
eval(bquote(expect_lt(abs(input - expected), 0.01)))
}
test_that("DRYAIR function works as expected", {
expect_equal(length(DRYAIR(db=30, bp=100*1000, alt=0)), 11)
expect_identical(class(DRYAIR(db=30, bp=100*1000, alt=0)), "list")
expect_similar(DRYAIR(db=30, bp=100*1000, alt=0)[[2]], 1.149212)
})
test_that("VAPRS function works as expected", {
expect_similar(VAPPRS(db=30), 4240.599)
})
test_that("WETAIR function works as expected", {
expect_equal(length(WETAIR(db=30, wb=28, rh=60, bp=100*1000)), 9)
expect_identical(class(WETAIR(db=30, wb=28, rh=60, bp=100*1000)), "list")
expect_similar(WETAIR(db=30, wb=28, rh=60, bp=100*1000)[[5]], 2.961613)
expect_similar(WETAIR(db=30, wb=28, rh=60, dp = 9, bp=100*1000)[[5]], 1.32676)
expect_similar(WETAIR(db=30, wb=28, rh=-0.5, bp=100*1000)[[8]], -999)
expect_similar(WETAIR(db=30, wb=28, rh=-1.1, bp=100*1000)[[5]], 4.2537)
})
|
2a5bdb58be12d064805f1a8d99d9a5ad33912d3e
|
a20d5b8fc49681b708fb99ae02087f0b34cf89e3
|
/R/plot_moving_averages.R
|
05cd1c05865d80a41ef4673c3a86b448b36234a6
|
[] |
no_license
|
gladelephant/Nature_2021_Breast-Tumors-Maintain-a-Reservoir-of-Subclonal-Diversity-During-Expansion
|
3facd8ee70e2a0d2f98514ce81bcd83efacc824c
|
67e0f9bb6424cb75e0c500b6934d91024b2312d6
|
refs/heads/main
| 2023-05-08T07:37:32.585283
| 2021-05-06T21:42:04
| 2021-05-06T21:42:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,454
|
r
|
plot_moving_averages.R
|
#' Reproduces the moving average plots
#'
#' @param chromosome Chromosome to be plotted
#' @param genes list of genes to be plotted as a boxplot.
#'
#' @return a ggplot object with the three plots using patchwork
#' @export
#'
#' @examples
#'
plot_moving_average <- function(chromosome,
genes) {
# Methods:
# DNA copy number profiles from the expanded clusters are shown
# by taking the mode of the ith segment from their profiles
# according to the co-clustering identities.
# mode function thanks to https://rpubs.com/Mentors_Ubiqum/using_mode
mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
active_chr <- unique(blk_long_gj_jit$chr)[i]
blk_long_gj2 <- blk_long_gj_jit %>%
dplyr::filter(chr == chromosome)
cnt_long_gene_trip_chr <- cnt_long_gene_trip %>%
filter(gene %in% genes) %>%
mutate(gene = fct_relevel(gene, genes))
message("Calculating moving averages")
genes_common <-
blk_long_gj2$gene_id[blk_long_gj2$gene_id %in% rownames(cnt_avg)]
chr <- as.data.frame(cnt_avg[unique(genes_common),])
mp <- map_df(chr, function(x) {
evobiR::SlidingWindow(mean, x, 100, 1)
})
mp_l <-
mp %>% mutate(pos = 1:nrow(mp)) %>% gather(key = "sample", value = "window_exp", -pos)
mp_l <- inner_join(mp_l, cl_info) %>%
group_by(subclones, pos) %>%
summarize(mean_cluster = mean(window_exp))
p0 <- cnt_long_gene_trip_chr %>%
filter(gene %in% genes) %>%
ggplot() +
geom_boxplot(aes(x = subclones,
y = z_score,
fill = subclones)) +
facet_wrap(vars(gene), ncol = 1) +
scale_fill_manual(values = colors_vector$subclones) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 5)) +
ylab("mean expression \n (subclone)") +
xlab("expanded cluster") +
theme(
strip.background = element_blank(),
strip.text = element_text(size = 16),
panel.border = element_rect(
color = "black",
fill = NA,
size = 1
),
axis.text.x = element_text(
size = 16,
angle = 90,
vjust = 0.5
),
axis.text.y = element_text(size = 16),
axis.title = element_text(size = 16),
legend.position = "none"
)
p1 <- ggplot() +
geom_line(
data = blk_long_gj2 %>%
group_by(pos, subclones) %>%
summarise(cn = mode(cn)),
aes(
x = pos,
y = cn,
color = fct_relevel(subclones,
gtools::mixedsort(as.character(
unique(blk_long_gj2$subclones)
)))
),
size = .8
) +
geom_text(
data = blk_long_gj2 %>%
filter(gene %in% genes) %>%
distinct(gene, .keep_all = T),
aes(x = pos,
y = 3.7,
label = gene),
angle = 90 ,
size = 8
) +
theme_cowplot() +
theme(
axis.text.x = element_blank(),
axis.title.x = element_text(size = 20),
axis.title.y = element_text(size = 20),
axis.text.y = element_text(size = 20),
axis.ticks.x = element_blank(),
axis.title = element_text(size = 20),
legend.position = "none"
) +
scale_color_manual(name = "cluster", values = colors_vector$subclones) +
scale_y_continuous(
breaks = function(x)
unique(floor(pretty(seq(
0, (max(x) + 1) * 1.1
))))
) +
# ggtitle(active_chr) +
xlab(paste0("genomic position ", "(", active_chr, ")")) +
ylab("copy number")
p2 <-
mp_l %>%
ungroup %>%
mutate(hdb = fct_relevel(subclones,
gtools::mixedsort(as.character(
unique(cnt_long_gene_trip$subclones)
)))) %>%
ggplot() +
geom_line(aes(x = pos,
y = mean_cluster,
color = subclones),
size = 1.2) +
scale_color_manual(values = colors_vector$subclones) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 4)) +
theme_cowplot() +
theme(
axis.text.x = element_blank(),
axis.text.y = element_text(size = 20, vjust = 0.5),
axis.ticks.x = element_blank(),
axis.title = element_text(size = 20),
legend.position = "none"
) +
xlab("genomic windows (100 genes)") +
ylab("expression \n z-score")
message("plotting.")
p1 / p2 / p0
}
|
88da7c6a8b23a6bc01851c1e9a7d98044b008802
|
27fd2631dd5e44ff6171a271f8bb9c0ccdc1c835
|
/LearningR/ch_8_data.R
|
9577b8bf68fc043cf030486f2082f492b00875cd
|
[] |
no_license
|
dvjr22/Learning
|
669088cb79812065be5fdd83c90782fc8108078d
|
af2386c6a5d7ed9f361942a4a77485a8346923bd
|
refs/heads/master
| 2021-05-15T06:53:13.704584
| 2018-02-20T12:52:22
| 2018-02-20T12:52:22
| 113,348,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,026
|
r
|
ch_8_data.R
|
#Chapter 8
#Topics:
# lattice package
# xyplot()
# bwplot()
# dotplot()
# histogram()
# densityplot()
# Panel Functions
rm(list = ls()) #clear environment
setwd("/home/valdeslab/Learning/LearningR") #GreenMachine
#setwd("/home/diego/Learning/LearningR") #Laptop
#Load packages
library(lattice)
library(grid)
#Mulitpanel Scatterplots - xyplot
Env = read.table("RBook/RIKZENV.txt", header = TRUE)
names(Env)
str(Env)
Env$MyTime = Env$Year + Env$dDay3 / 365 #Generate time in days
#plot salinity vs time, conditional on station
xyplot(SAL ~ MyTime | factor(Station),
type = "l", #Draws a line between the points
strip = function(bg, ...)
strip.default(bg = 'white', ...),
col.line = 1, data = Env)
#For comparison
xyplot(SAL ~ MyTime | factor(Station), data = Env)
xyplot(SAL ~ MyTime | factor(Station),
type = "l", strip = TRUE, col.line = 1,
data = Env)
xyplot(SAL ~ MyTime | factor(Station),
type = "l", strip = FALSE, col.line = 1,
data = Env)
#Multiplanel Boxplots - bwplot()
bwplot(SAL ~ factor(Month) | Area,
strip = strip.custom(bg = 'white'),
cex = 0.5, layout = c(2,5),
data = Env, xlab = "Month", ylab = "Salinity",
par.settings = list(
box.rectangle = list(col =1),
box.umbrella = list(col = 1),
plot.symbol = list(cex = .5, col = 1)
))
#Shorter version of above
bwplot(SAL ~ factor(Month) | Area, layout = c(2,5),
data = Env, xlab = "Month", ylab = "Salinity")
#Multipanel Cleveland Dotplots - dotplot()
dotplot(factor(Month) ~ SAL | Station,
subset = Area == "OS", #Select a subsection of data - subset = (Area == "OS") also correct
jitter.x = TRUE, #Adds variations to show multiple observations
col = 1, data = Env, strip = strip.custom(bg = 'white'),
cex = 0.5, ylab = "Month", xlab = "Salinity")
#Multipanel Histograms - histogram()
histogram( ~ SAL | Station, data = Env,
subset = (Area == "OS"),
layout = c(1,4), # 1 col, 4 rows
nint = 30, #Number of bars
xlab = "Salinity", ylab = "Frequencies",
strip = FALSE, strip.left = TRUE #Move strip to side of panels
)
densityplot( ~ SAL | Station, data = Env,
subset = (Area == "OS"),
layout = c(1,4), # 1 col, 4 rows
xlab = "Salinity", ylab = "Frequencies",
strip = FALSE, strip.left = TRUE #Move strip to side of panels
)
#Panel Functions
xyplot(SAL ~ Month | factor(Year), data = Env,
type = c("p"), subset = (Station == "GROO"),
xlim = c(0, 12), ylim = c(0,30), pch = 19,
panel = function (...) {
panel.xyplot(...)
panel.grid(..., h = -1, v = -1) #Add grid, negative values force alignment to data
panel.loess(...) #Add smoothing line
})
#Simpler version
xyplot(SAL ~ Month | Year, data = Env,
subset = (Station == "GROO"), pch = 19,
xlim = c(0, 12), ylim = c(0, 30),
type = c("p", "g", "smooth")) #execute panel.xyplot, panel.grid, and panel.smooth
#Plot and show the outlier
dotplot(factor(Month) ~ SAL | Station, pch = 16,
subset = (Area == "OS"), data = Env,
ylab = "Month", xlab = "Salinity",
panel = function(x, y, ...) {
Q = quantile(x, c(0.25, 0.5, 0.75), na.rm = TRUE)
R = Q[3] - Q[1]
L = Q[2] - 3 * (Q[3] - Q[1])
MyCex = rep(0.4, length(y))
MyCol = rep(1, length(y))
MyCex[x < L] = 1.5
MyCol[x < L] = 2
panel.dotplot(x, y, cex = MyCex, col = MyCol, ...)
})
#8.6.3
Sparrows<-read.table(file="RBook/Sparrows.txt", header=TRUE)
names(Sparrows)
#[1] "Species" "Sex" "Wingcrd" "Tarsus" "Head" "Culmen"
#[7] "Nalospi" "Wt" "Observer" "Age"
library(lattice)
xyplot(Wingcrd ~ Tarsus | Species * Sex,
xlab = "Axis 1", ylab = "Axis 2", data = Sparrows,
xlim = c(-1.1, 1.1), ylim = c(-1.1, 1.1),
panel = function(subscripts, ...){
zi <- Sparrows[subscripts, 3:8]
di <- princomp(zi, cor = TRUE)
Load <- di$loadings[, 1:2]
Scor <- di$scores[, 1:2]
panel.abline(a = 0, b = 0, lty = 2, col = 1)
panel.abline(h = 0, v = 0, lty = 2, col = 1)
for (i in 1:6){
llines(c(0, Load[i, 1]), c(0, Load[i, 2]),
col = 1, lwd = 2)
ltext(Load[i, 1], Load[i, 2],
rownames(Load)[i], cex = 0.7)}
sc.max <- max(abs(Scor))
Scor <- Scor / sc.max
panel.points(Scor[, 1], Scor[, 2], pch = 1,
cex = 0.5, col = 1)
})
S1<-Sparrows[Sparrows$Species=="SESP" & Sparrows$Sex=="Female",3:8]
di <- princomp(S1, cor = TRUE)
Load <- di$loadings[, 1:2]
Scor <- di$scores[, 1:2]
cloud(CHLFa ~ T * SAL | Station, data = Env,
screen = list(z = 105, x = -70),
ylab = "Sal", xlab = "T", zlab = "Chl. a",
ylim = c(26,33),
subset = (Area=="OS"),
scales = list(arrows = FALSE))
Hawaii <- read.table("RBook/waterbirdislandseries.txt", header = TRUE)
library(lattice)
names(Hawaii)
Birds <- as.vector(as.matrix(Hawaii[,2:9]))
Time <- rep(Hawaii$Year, 8)
MyNames <- c("Stilt_Oahu","Stilt_Maui","Stilt_Kauai_Niihau",
"Coot_Oahu","Coot_Maui","Coot_Kauai_Niihau",
"Moorhen_Oahu","Moorhen_Kauai")
ID <- rep(MyNames, each = 48)
xyplot(Birds ~ Time|ID, ylab = "Bird abundance",
layout = c(3, 3), type = "l", col = 1)
ID2 <- factor(ID,
levels=c(
"Stilt_Oahu",
"Stilt_Kauai_Niihau",
"Stilt_Maui",
"Coot_Oahu",
"Coot_Kauai_Niihau",
"Coot_Maui",
"Moorhen_Oahu",
"Moorhen_Kauai"))
xyplot(Birds ~ Time|ID2, ylab = "Bird abundance",
layout = c(3, 3), type = "l", col = 1)
xyplot(Birds ~ Time|ID2, ylab = "Bird abundance",
layout = c(3, 3), type = "l", col = 1,
scales = list(x = list(relation = "same"),
y = list(relation = "free"),
tck=-1))
Species <-rep(c("Stilt","Stilt","Stilt",
"Coot","Coot","Coot",
"Moorhen","Moorhen"),each = 48)
xyplot(Birds ~ Time|Species, ylab = "Bird abundance",
layout = c(2, 2), type = "l", col = 1,
scales = list(x = list(relation = "same"),
y = list(relation = "free")),
groups = ID, lwd=c(1,2,3))
xyplot(Stilt_Oahu + Stilt_Maui + Stilt_Kauai_Niihau ~ Year,
ylab = "Bird abundance", data = Hawaii,
layout = c(2, 2), type = "l", col = 1,
scales = list(x = list(relation = "same"),
y = list(relation = "free")))
Env <- read.table(file ="RBook/RIKZENV.txt", header = TRUE)
library(lattice)
AllAreas <- levels(unique(Env$Area))
for (i in AllAreas ){
Env.i <- Env[Env$Area==i,]
dotplot(factor(Month)~SAL | Station,
data = Env.i)
win.graph()
}
|
bb907e2a2a0fbfaf87fcc46f2244a5c02b405797
|
56993b6bc9f7a3de1942de4933a597c6e122e010
|
/man/leveneTests.Rd
|
9eb82a7e21150943d39b68b346529456380d5c51
|
[] |
no_license
|
WeiAkaneDeng/Xvarhet
|
694bf59af2fdbb1e6b2956ef1cf993c5915c302f
|
8dc6b92131781c72d1940aa702447f21028071f3
|
refs/heads/master
| 2021-11-19T08:52:23.788899
| 2021-09-30T12:21:36
| 2021-09-30T12:21:36
| 124,282,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,331
|
rd
|
leveneTests.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LeveneTest.R
\name{leveneTests}
\alias{leveneTests}
\title{Levene's test for variance homogeneity by SNP genotypes}
\usage{
leveneTests(GENO, SEX, PLINK = FALSE, Y, centre = "median", COV = NULL)
}
\arguments{
\item{GENO}{the genotype of a SNP, must be a vector of 0, 1, 2's indicating the number of reference alleles. The length of \code{GENO} should match that of \code{SEX}, \code{Y}, and any covariates.}
\item{SEX}{the genetic sex of individuals in the sample population, must be a vector of 1 and 2 or 0 and 1, depending on whether the PLINK sex code is used. Note that the default sex code is 1 for male and 2 for female in PLINK.}
\item{PLINK}{a logical indicating whether the SEX is coded following PLINK or not.}
\item{Y}{a vector of quantitative traits, such as human height.}
\item{centre}{a character indicating whether the absolute deviation should be calculated with respect to ``median'' or ``mean''.}
\item{COV}{a vector or matrix of covariates that are used to reduce bias due to confounding, such as age.}
}
\value{
a vector of Levene's test p-values according to levels specified by \code{GENO}, the
interaction between \code{GENO} and \code{SEX}, sex-specific Levene's test stratified by
\code{GENO}, and the Fisher's method to combine the sex-specific Levene's test \emph{p}-values.
}
\description{
The function takes as input the genotypes of a SNP (\code{GENO}), the sex (\code{SEX}), and a quantitative
trait (\code{Y}) in a sample population, and possibly additional covariates. It should be noted
that these variables must be of the same length. The function then
returns the variance heterogeneity \emph{p}-values for the model \eqn{Y\sim G},
\eqn{Y \sim G\times S}, the sex-specific results based on model \eqn{Y\sim G}, as well as that
using Fisher's method to combine sex-specific results.
}
\note{
We recommend to quantile-normally transform \code{Y} to avoid ‘scale-effect’ where
the variance values tend to be proportional to mean values when stratified by \code{G}.
}
\examples{
N <- 5000
geno <- rbinom(N, 2, 0.3)
sex <- rbinom(N, 1, 0.5)
y <- rnorm(N)
cov <- matrix(rnorm(N*10), ncol=10)
leveneTests(GENO=geno, SEX=sex, Y=y, COV=cov)
}
\author{
Wei Q. Deng \email{deng@utstat.toronto.edu}
}
|
f5a18a8064e36c4fd7db9d7b68c41a3d0e5016e0
|
5d3d1b0916535dad8a83a9dad9e23ed77b982d8e
|
/man/compareAgreement.Rd
|
69cf1acfc8f4eede3e4cd804604114be20192e8d
|
[] |
no_license
|
cran/agrmt
|
3d280f0d45e7dcc141556269548296131f2c43cc
|
849caf12caabffb97aba71b2b2a54d2d36d2ec4a
|
refs/heads/master
| 2021-11-25T02:48:56.040559
| 2021-11-17T21:20:02
| 2021-11-17T21:20:02
| 17,694,324
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
rd
|
compareAgreement.Rd
|
\name{compareAgreement}
\alias{compareAgreement}
\title{Compare agreement A with and without simulated coding error}
\description{Calculate agreement in ordered rating scales, and compares this to agreement with simulated coding error.}
\usage{compareAgreement(V, n=500, e=0.01, N=500, pos=FALSE)}
\arguments{
\item{V}{A vector with an entry for each individual}
\item{n}{Number of samples in the simulation of coding errors}
\item{e}{Proportion of samples for which errors are simulated}
\item{N}{Number of replications for calculating mean and standard deviation}
\item{pos}{Vector of possible positions. If FALSE, the values occurring in V are set as the possible values}}
\details{This function calculates agreement on a vector, and compares the value with agreement with simulated coding error. It runs the function \code{\link{agreementError}} N times. The other arguments (n, e, pos) are passed down to the \code{agreementError} function.}
\value{The function returns a list with agreement A without simulated coding errors, the mean of agreement with simulated coding error, and the standard deviation of agreement with simulated coding error.}
\author{Didier Ruedin}
\seealso{\code{\link{agreement}}, \code{\link{agreementError}}}
\examples{
# Sample data:
V <- c(1,1,1,1,2,3,3,3,3,4,4,4,4,4,4)
compareAgreement(V)
}
|
b0793ae88e032adf9a1ce6bd741ea302633c5520
|
fbfcb908f975799b43a64c51c9a380701626d488
|
/man/workflowTransfer.Rd
|
1c475162625ed71afa655661dc9694a186f08319
|
[] |
no_license
|
BlasBenito/distantia
|
d29d099ae8740bfadf2a480f18ffdb4ffdbe5f41
|
0d4469f417a7e7970757ee7943ca0016c181f7ec
|
refs/heads/master
| 2022-02-24T04:30:53.308716
| 2022-02-07T14:32:31
| 2022-02-07T14:32:31
| 187,805,264
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,784
|
rd
|
workflowTransfer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/workflowTransfer.R
\name{workflowTransfer}
\alias{workflowTransfer}
\title{Transfers an attribute (time, age, depth) from one sequence to another}
\usage{
workflowTransfer(
sequences = NULL,
grouping.column = NULL,
time.column = NULL,
exclude.columns = NULL,
method = "manhattan",
transfer.what = NULL,
transfer.from = NULL,
transfer.to = NULL,
mode = "direct",
plot = FALSE
)
}
\arguments{
\item{sequences}{dataframe with multiple sequences identified by a grouping column generated by \code{\link{prepareSequences}}.}
\item{grouping.column}{character string, name of the column in \code{sequences} to be used to identify separates sequences within the file.}
\item{time.column}{character string, name of the column with time/depth/rank data.}
\item{exclude.columns}{character string or character vector with column names in \code{sequences} to be excluded from the analysis.}
\item{method}{character string naming a distance metric. Valid entries are: "manhattan", "euclidean", "chi", and "hellinger". Invalid entries will throw an error.}
\item{transfer.what}{character string, column of \code{sequences} with the attribute to be transferred. If empty or ill-defined, \code{time.column} is used instead if available.}
\item{transfer.from}{character string, group available in \code{grouping.column} identifying the sequence from which to take the attribute values.}
\item{transfer.to}{character string, group available in \code{grouping.column} identifying the sequence to which transfer the attribute values.}
\item{mode}{character string, one of: "direct" (default), "interpolate".}
\item{plot}{boolean, if \code{TRUE}, plots the distance matrix and the least-cost path.}
}
\value{
A dataframe with the sequence \code{transfer.to}, with a column named after \code{transfer.what} with the attribute values.
}
\description{
Transfers an attribute (generally time/age, but any others are possible) from one sequence (defined by the argument \code{transfer.from}) to another (defined by the argument \code{transfer.to}) lacking it. The transference of the attribute is based on the following assumption: similar samples have similar attributes. This assumption might not hold for noisy multivariate time-series. Attribute transference can be done in two different ways (defined by the \code{mode} argument):
\itemize{
\item \emph{Direct}: transfers the selected attribute between samples with the maximum similarity. This option will likely generate duplicated attribute values in the output.
\item \emph{Interpolate}: obtains new attribute values through weighted interpolation, being the weights derived from the distances between samples
}
}
\examples{
\donttest{
#loading sample dataset
data(pollenGP)
#subset pollenGP to make a shorter dataset
pollenGP <- pollenGP[1:50, ]
#generating a subset of pollenGP
set.seed(10)
pollenX <- pollenGP[sort(sample(1:50, 40)), ]
#we separate the age column
pollenX.age <- pollenX$age
#and remove the age values from pollenX
pollenX$age <- NULL
pollenX$depth <- NULL
#removing some samples from pollenGP
#so pollenX is not a perfect subset of pollenGP
pollenGP <- pollenGP[-sample(1:50, 10), ]
#prepare sequences
GP.X <- prepareSequences(
sequence.A = pollenGP,
sequence.A.name = "GP",
sequence.B = pollenX,
sequence.B.name = "X",
grouping.column = "id",
time.column = "age",
exclude.columns = "depth",
transformation = "none"
)
#transferring age
X.new <- workflowTransfer(
sequences = GP.X,
grouping.column = "id",
time.column = "age",
method = "manhattan",
transfer.what = "age",
transfer.from = "GP",
transfer.to = "X",
mode = "interpolated"
)
}
}
\author{
Blas Benito <blasbenito@gmail.com>
}
|
6cbe03b29a1510dc8dd80017f5f963fe51c696a9
|
8e797ce01d2078b48818374814d4fdc63d459a88
|
/eyewit/script.R
|
497bc80cef09db9e8a4af75a753a896939e76050
|
[
"MIT"
] |
permissive
|
ccp-eva/eyewit
|
4aa7b0fdd92e94a3f0cc891416c76386b096a7b9
|
04473da9b1aace6aa24b7d0ad796eae31989ade5
|
refs/heads/main
| 2023-08-16T23:45:16.404856
| 2023-08-10T17:22:15
| 2023-08-10T17:22:15
| 254,728,991
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,702
|
r
|
script.R
|
# load eyewit (for package dev load it: devtools::load_all("."))
# library(eyewit)
# install CRAN packages
# install.packages("tidyverse")
library(tidyverse)
# import user interface
source("interface.R")
# read raw data filenames
participants <- list.files(interface$raw_dir)
# take a random sample from raw folder to determine vendor labels, and only read headers
sample <- readr::read_tsv(file.path(interface$raw_dir, sample(participants, 1)), col_types = readr::cols(), n_max = 0)
vendor <- vendor_check(sample)$vendor
types <- vendor_check(sample)$types
# incomplete subjects (i.e., not having 2 pretest & 12 test trials)
incomplete_subjets <- c()
# Loop over all participants
for (subject in participants) {
print(subject)
# remove later
# subject <- participants[1]
# read tsv files
df_raw <- readr::read_tsv(file.path(interface$raw_dir, subject), col_types = types)
# run preflight checks & diagnostics, returns a lean df
df <- preflight(df_raw, interface)
# get start and end index pairs for inter_trial chunks
startend_fam <- get_start_end_pos(df, interface$inter_trial_chunk_patterns[1], "VideoStimulusStart", "VideoStimulusEnd")
startend_preflook <- get_start_end_pos(df, interface$inter_trial_chunk_patterns[2], "VideoStimulusStart", "VideoStimulusEnd")
# test if subject has consistent start/end indexes and get check trial count to match 16
if (get_trial_count(c(startend_fam, startend_preflook)) != 16) {
incomplete_subjets <- c(incomplete_subjets, subject)
stop("Bad Trial count")
}
# track current trials
current_test_trials <- get_trial_count(c(startend_fam, startend_preflook))
# Allocate Trials and fill-up eventValue
df <- allocate_trials(df, c(startend_fam, startend_preflook), 2)
# track video names
names_fam <- df$eventValue[startend_fam$start] |>
unique() |>
as.character()
names_preflook <- df$eventValue[startend_preflook$start] |>
unique() |>
as.character()
# Insert AOI Columns
df <- tibble::add_column(df, "{interface$aoisets$aoifamphase_obj_r_prox$column_name}" :=
get_aois(df$x, df$y, interface$aoisets$aoifamphase_obj_r_prox, startend_fam), .before = 1)
df <- tibble::add_column(df, "{interface$aoisets$aoifamphase_obj_r_nprox$column_name}" :=
get_aois(df$x, df$y, interface$aoisets$aoifamphase_obj_r_nprox, startend_fam), .after = 1)
df <- tibble::add_column(df, "{interface$aoisets$aoifamphase_obj_l_prox$column_name}" :=
get_aois(df$x, df$y, interface$aoisets$aoifamphase_obj_l_prox, startend_fam), .after = 2)
df <- tibble::add_column(df, "{interface$aoisets$aoifamphase_obj_l_nprox$column_name}" :=
get_aois(df$x, df$y, interface$aoisets$aoifamphase_obj_l_nprox, startend_fam), .after = 3)
df <- tibble::add_column(df, "{interface$aoisets$preflook$column_name}" :=
get_aois(df$x, df$y, interface$aoisets$preflook, startend_preflook), .after = 4)
df <- tibble::add_column(df, "{interface$aoisets$screen$column_name}" :=
get_aois(df$x, df$y, interface$aoisets$screen), .after = 5)
# helper variable
fi_pairs <- fi2rn(df$fi)
# Get inner AOI gaze shift latencies (used in get_looks implicitly for given)
gazeshifts <- get_gazeshift_latency(df, interface$aoisets)
# get detailed information about single fixation indexes (trial-scoped)
# fi_summary_overal <- fi_summary(df, interface$aoisets, show_non_hn_labels = TRUE)
# fi_summary_test_action <- fi_summary(df, interface$aoisets, startend_test_action, TRUE)
# fi_summary_test_outcome <- fi_summary(df, interface$aoisets, startend_test_outcome, TRUE)
##################################################################################################
# Initialize empty subject tibble (the better data.frame)
df_subject <- tibble::tibble(.rows = current_test_trials)
# Build Summary table
# ================================================================================================
# NAME INFORMATIONS
# ------------------------------------------------------------------------------------------------
df_subject$ID <- value_parser_by_key(interface$keys_filename, subject)$id
df_subject$Sex <- value_parser_by_key(interface$keys_filename, subject)$sex
df_subject$Age_Days <- value_parser_by_key(interface$keys_filename, subject)$age_days
df_subject$Exp <- value_parser_by_key(interface$keys_filename, subject, trim_right = 4)$experiment
df_subject$Rec <- value_parser_by_key(interface$keys_filename, subject)$rec
df_subject$ConEye <- value_parser_by_key(interface$keys_fam, names_fam)$con_eye
df_subject$ConProx <- value_parser_by_key(interface$keys_fam, names_fam)$con_proximity
df_subject$Condition <- value_parser_by_key(interface$keys_fam, names_fam)$condition
df_subject$ObjActor <- value_parser_by_key(interface$keys_fam, names_fam)$obj_handling_actor
df_subject$FamObj <- value_parser_by_key(interface$keys_fam, names_fam)$obj_id
df_subject$FamObjPos_Fam <- value_parser_by_key(interface$keys_fam, names_fam)$position_obj
df_subject$TrialRun <- value_parser_by_key(interface$keys_fam, names_fam)$running_trial
df_subject$TrialCon <- c(
rep(1, current_test_trials / 4),
rep(2, current_test_trials / 4),
rep(3, current_test_trials / 4),
rep(4, current_test_trials / 4)
)
# ------------------------------------------------------------------------------------------------
# Looking Times - Preflook
# ------------------------------------------------------------------------------------------------
df_subject$TotalLTScreenPreflook <-
get_looks(df, interface$aoisets$screen, startend_preflook, omit_first_overflow_fi = TRUE)$looking_times
df_subject$PrefLook_LT_Obj_Left <-
get_looks(df, interface$aoisets$preflook, startend_preflook, omit_first_overflow_fi = TRUE)$looking_times$left
df_subject$PrefLook_LT_Obj_Right <-
get_looks(df, interface$aoisets$preflook, startend_preflook, omit_first_overflow_fi = TRUE)$looking_times$right
df_subject$PrefLook_LT_Obj_Total <- df_subject$PrefLook_LT_Obj_Left + df_subject$PrefLook_LT_Obj_Right
df_subject$PrefLook_Obj_Fam <- df_subject$FamObj
df_subject$PrefLook_Obj_Fam_Pos <- get_preflook_pos(names_preflook, strsplit(names_fam, "_"))$fam_pos
df_subject$PrefLook_Obj_Nov_Pos <- dplyr::if_else(
df_subject$PrefLook_Obj_Fam_Pos == "left", "right", "left"
)
df_subject$PrefLook_LT_Obj_Fam <- dplyr::if_else(
df_subject$PrefLook_Obj_Fam_Pos == "left", df_subject$PrefLook_LT_Obj_Left, df_subject$PrefLook_LT_Obj_Right
)
df_subject$PrefLook_LT_Obj_Nov <- dplyr::if_else(
df_subject$PrefLook_Obj_Nov_Pos == "left", df_subject$PrefLook_LT_Obj_Left, df_subject$PrefLook_LT_Obj_Right
)
df_subject$PrefLook_PropScore <- df_subject$PrefLook_LT_Obj_Nov / df_subject$PrefLook_LT_Obj_Total
################################
######## OTHER MEASURES ########
################################
##### MEASURES BASED ON FIRST LOOK (FL) MEASURE
df_subject$PrefLook_FL_Left <- get_looks(
df,
interface$aoisets$preflook,
startend_preflook,
omit_first_overflow_fi = TRUE,
first_look_emergency_cutoff =
round(
median(gazeshifts$aoiPrefLook$left$latencies) +
3 * sd(gazeshifts$aoiPrefLook$left$latencies)
)
)$first_looks_collection$left$durations
# FYI You can retrieve the ending_reasons (either outside)
# df_subject$PrefLook_FL_Left <- get_looks(
# df,
# interface$aoisets$preflook,
# startend_preflook,
# omit_first_overflow_fi = TRUE,
# first_look_emergency_cutoff =
# round(
# median(gazeshifts$aoiPrefLook$left$latencies) +
# 3 * sd(gazeshifts$aoiPrefLook$left$latencies)
# )
# )$first_looks_collection$ending_reasong
df_subject$PrefLook_FL_Right <- get_looks(
df,
interface$aoisets$preflook,
startend_preflook,
omit_first_overflow_fi = TRUE,
first_look_emergency_cutoff =
round(
median(gazeshifts$aoiPrefLook$right$latencies) +
3 * sd(gazeshifts$aoiPrefLook$right$latencies)
)
)$first_looks_collection$right$durations
df_subject$PrefLook_FL_Obj_Nov <- dplyr::if_else(
df_subject$PrefLook_Obj_Nov_Pos == "left", df_subject$PrefLook_FL_Left, df_subject$PrefLook_FL_Right
)
df_subject$PrefLook_FL_Obj_Fam <- dplyr::if_else(
df_subject$PrefLook_Obj_Fam_Pos == "left", df_subject$PrefLook_FL_Left, df_subject$PrefLook_FL_Right
)
df_subject$PrefLook_FL_Screen_Omit <- get_looks(
df,
interface$aoisets$screen,
startend_preflook,
omit_first_overflow_fi = TRUE,
first_look_emergency_cutoff =
round(
median(gazeshifts$aoiScreen$onscreen$latencies) +
3 * sd(gazeshifts$aoiScreen$onscreen$latencies)
)
)$first_looks_collection$onscreen$durations
df_subject$PrefLook_FL_Screen_NoOmit <- get_looks(
df,
interface$aoisets$screen,
startend_preflook,
omit_first_overflow_fi = FALSE,
first_look_emergency_cutoff =
round(
median(gazeshifts$aoiScreen$onscreen$latencies) +
3 * sd(gazeshifts$aoiScreen$onscreen$latencies)
)
)$first_looks_collection$onscreen$durations
# =========================================
# Gap2FLScreen
# returns either a time difference in ms or NA:
# - NA means there was no fixation at all in this trial
# - any numbers means the gap in ms until the initiation of the first screen fixation
# (this should actually be a separate function, lol)
# init with 0
df_subject$Gap2FLScreen <- 0
for (pfsi in seq.int(df_subject$Gap2FLScreen)) {
# boolean that checks if there is a screen fixation at the first sample for each trial
isAoiScreenFixAtFirstSample <- (df[startend_preflook$start[pfsi] + 1,"gazeType"] == "Fixation")[1] && (df[startend_preflook$start[pfsi] + 1,"aoiScreen"] == "onscreen")[1]
# There are 2 cases,
# (1) isAoiScreenFixAtFirstSample is FALSE that means there is no fixation when the trial starts
# (2) isAoiScreenFixAtFirstSample is TRUE there is a fixation when the trial starts, which need to be shifted the next fixation (similar to the omit within get_looks)
# init diff container with zeros
time_diff <- 0
starting_timestamp <- df[startend_preflook$start[pfsi],"timestamp"] |> as.integer()
# init ending timestamp of first fixation
ending_timestamp <- NA
# if there is no initial fixation get the time in ms when the first fixation at screen appears
# This is case (1)
if (!isAoiScreenFixAtFirstSample) {
# filter for fixations being onscreen
temp <- df |>
dplyr::slice(startend_preflook$start[pfsi]:startend_preflook$end[pfsi]) |>
dplyr::filter(gazeType == "Fixation") |>
dplyr::filter(aoiScreen == "onscreen")
# check if there is any fixation in this trial at all, if not assign NA
if (nrow(temp) != 0) {
ending_timestamp <- temp$timestamp[1]
time_diff <- ending_timestamp - starting_timestamp
}
if (nrow(temp) == 0) {
time_diff <- NA
}
}
# if true, jump the next fixation (i.e., omit the first ongoing fixation)
# This is case (2)
if (isAoiScreenFixAtFirstSample) {
first_fi_to_skip <- df$fi[startend_preflook$start[pfsi]]
next_fi_to_use <- first_fi_to_skip + 1
next_fi_to_use_rn <- which(df$fi == next_fi_to_use)[1]
current_trial_start_time <- df$timestamp[startend_preflook$start[pfsi]]
current_next_fi_start_time <- df$timestamp[next_fi_to_use_rn]
time_diff <- current_next_fi_start_time - current_trial_start_time
}
df_subject$Gap2FLScreen[pfsi] <- time_diff
}
############################################################
# Orignal Idea (NoOmit might be problematic!):
df_subject$PrefLook_FL_Screen_starttocutoff <- df_subject$PrefLook_FL_Screen_Omit + df_subject$Gap2FLScreen
############################################################
# TODO #####################################################
############################################################
# think about using Omit vs NoOmit
# df_subject$PrefLook_FL_Screen_starttocutoff <- ifelse(df_subject$Gap2FLScreen == 0, df_subject$PrefLook_FL_Screen_NoOmit, df_subject$PrefLook_FL_Screen_Omit + df_subject$Gap2FLScreen)
############################################################
# init
# iterate over all screen durations rowwise within df_subject
df_subject$PrefLook_LT_Obj_Left_FL <- NA
# iterate over all screen durations rowwise within df_subject
for (i_screen_lt in seq.int(df_subject$PrefLook_FL_Screen_Omit)) {
print(paste0("Index: ", i_screen_lt, " Screen Duration: ", df_subject$PrefLook_FL_Screen_Omit[i_screen_lt]))
df_subject$PrefLook_LT_Obj_Left_FL[i_screen_lt] <- get_looks(
df,
interface$aoisets$preflook,
startend_preflook,
intra_scope_window = c(
"start",
ifelse(
is.na(df_subject$Gap2FLScreen[i_screen_lt]),
"end",
df_subject$PrefLook_FL_Screen_starttocutoff[i_screen_lt]
)
),
omit_first_overflow_fi = TRUE
)$looking_times$left[i_screen_lt] # ... only get the i'th item from get_looks
}
# same for right
df_subject$PrefLook_LT_Obj_Right_FL <- NA
for (i_screen_lt in seq.int(df_subject$PrefLook_FL_Screen_Omit)) {
print(paste0("Index: ", i_screen_lt, " Screen Duration: ", df_subject$PrefLook_FL_Screen_Omit[i_screen_lt]))
df_subject$PrefLook_LT_Obj_Right_FL[i_screen_lt] <- get_looks(
df,
interface$aoisets$preflook,
startend_preflook,
intra_scope_window = c(
"start",
ifelse(
is.na(df_subject$Gap2FLScreen[i_screen_lt]),
"end",
df_subject$PrefLook_FL_Screen_starttocutoff[i_screen_lt]
)
),
omit_first_overflow_fi = TRUE
)$looking_times$right[i_screen_lt] # ... only get the i'th item from get_looks
}
df_subject$PrefLook_LT_Obj_Nov_FL <- dplyr::if_else(
df_subject$PrefLook_Obj_Nov_Pos == "left", df_subject$PrefLook_LT_Obj_Left_FL, df_subject$PrefLook_LT_Obj_Right_FL
)
df_subject$PrefLook_LT_Obj_Fam_FL <- dplyr::if_else(
df_subject$PrefLook_Obj_Fam_Pos == "left", df_subject$PrefLook_LT_Obj_Left_FL, df_subject$PrefLook_LT_Obj_Right_FL
)
df_subject$PrefLook_PropSore_FL <- df_subject$PrefLook_LT_Obj_Nov_FL / (df_subject$PrefLook_LT_Obj_Nov_FL + df_subject$PrefLook_LT_Obj_Fam_FL)
##### MEASURES BASED ON 2 SEC LOOK-AWAY MEASURE
df_subject$PrefLook_2sec_Obj_Left <-
get_looks(
df = df,
aoi_collection = interface$aoisets$preflook,
scope = startend_preflook,
lookaway_stop = 2000,
omit_first_overflow_fi = TRUE)$lookaway_collection$left$durations
df_subject$PrefLook_2sec_Obj_Right <-
get_looks(
df = df,
aoi_collection = interface$aoisets$preflook,
scope = startend_preflook,
lookaway_stop = 2000,
omit_first_overflow_fi = TRUE)$lookaway_collection$right$durations
df_subject$PrefLook_2sec_Obj_Nov <- dplyr::if_else(
df_subject$PrefLook_Obj_Nov_Pos == "left", df_subject$PrefLook_2sec_Obj_Left, df_subject$PrefLook_2sec_Obj_Right
)
df_subject$PrefLook_2sec_Obj_Fam <- dplyr::if_else(
df_subject$PrefLook_Obj_Fam_Pos == "left", df_subject$PrefLook_2sec_Obj_Left, df_subject$PrefLook_2sec_Obj_Right
)
df_subject$PrefLook_2sec_Screen <-
get_looks(
df = df,
aoi_collection = interface$aoisets$screen,
scope = startend_preflook,
lookaway_stop = 2000,
omit_first_overflow_fi = TRUE)$lookaway_collection$onscreen$durations
####################################################
######## PrefLook_2sec_Screen_starttocutoff ########
####################################################
# init with NA
df_subject$PrefLook_2sec_Screen_starttocutoff <- NA
for (pfsi in seq.int(df_subject$PrefLook_2sec_Screen_starttocutoff)) {
# get current start index
current_video_start_index <- startend_preflook$start[pfsi] + 1
starting_timestamp <- df[startend_preflook$start[pfsi] + 1,"timestamp"] |> as.integer()
# init ending timestamp of first fixation
ending_timestamp <- NA
# filter for fixations being onscreen
temp <- df |>
dplyr::slice(startend_preflook$start[pfsi]:startend_preflook$end[pfsi]) |>
dplyr::filter(gazeType == "Fixation") |>
dplyr::filter(aoiScreen == "onscreen")
# only proceed if there is data (i.e., there is at least one fixation onscreen)
if (!nrow(temp) == 0) {
# all fixation indexes that are onscreen for current trial
all_fij <- unique(temp$fi)
time_between_fixations <- c()
for (fij in seq.int(all_fij)) {
current_fij = all_fij[fij]
# check time diff between last fi and video end of current trial is over 2000
if (current_fij == max(all_fij)) {
last_fi_timestamp_start <- temp$timestamp[which(temp$fi == current_fij) |> max()]
trial_end_timestamp <- df$timestamp[startend_preflook$end[pfsi]]
time_between_fixations <- c(time_between_fixations, trial_end_timestamp - last_fi_timestamp_start)
break
}
next_fij = all_fij[fij + 1]
time_between_fixations <- c(time_between_fixations, temp$timestamp[which(temp$fi == next_fij) |> min()] - temp$timestamp[which(temp$fi == current_fij) |> max()])
}
# check if current trial has time gaps between fixations over 2000
fiAbove2k <- NA
# init
diff <- df$timestamp[startend_preflook$end[pfsi]] - df$timestamp[startend_preflook$start[pfsi] + 1]
# is there any value above 2000?
if (max(time_between_fixations) > 2000) {
# There ARE values above 2000 ...
# ... but we only want the very first one
fiAbove2k <- all_fij[which(time_between_fixations > 2000) |> min()]
# get the timestamp
ending_timestamp <- df$timestamp[(which(df$fi == fiAbove2k) |> max()) + 1]
diff <- ending_timestamp - starting_timestamp
}
# store current trial in df_subject
df_subject$PrefLook_2sec_Screen_starttocutoff[pfsi] <- diff
}
}
# init
# iterate over all screen durations rowwise within df_subject
df_subject$PrefLook_LT_Obj_Left_2sec <- NA
# iterate over all screen durations rowwise within df_subject
for (i_screen_lt in seq.int(df_subject$PrefLook_2sec_Screen)) {
print(paste0("Index: ", i_screen_lt, " Screen Duration: ", df_subject$PrefLook_2sec_Screen[i_screen_lt]))
df_subject$PrefLook_LT_Obj_Left_2sec[i_screen_lt] <- get_looks(
df,
interface$aoisets$preflook,
startend_preflook,
intra_scope_window = c(
"start",
ifelse(
is.na(df_subject$PrefLook_2sec_Screen_starttocutoff[i_screen_lt]),
"end",
df_subject$PrefLook_2sec_Screen_starttocutoff[i_screen_lt]
)
),
omit_first_overflow_fi = TRUE
)$looking_times$left[i_screen_lt] # ... only get the i'th item from get_looks
}
df_subject$PrefLook_LT_Obj_Right_2sec <- NA
# iterate over all screen durations rowwise within df_subject
for (i_screen_lt in seq.int(df_subject$PrefLook_2sec_Screen)) {
print(paste0("Index: ", i_screen_lt, " Screen Duration: ", df_subject$PrefLook_2sec_Screen[i_screen_lt]))
df_subject$PrefLook_LT_Obj_Right_2sec[i_screen_lt] <- get_looks(
df,
interface$aoisets$preflook,
startend_preflook,
intra_scope_window = c(
"start",
ifelse(
is.na(df_subject$PrefLook_2sec_Screen_starttocutoff[i_screen_lt]),
"end",
df_subject$PrefLook_2sec_Screen_starttocutoff[i_screen_lt]
)
),
omit_first_overflow_fi = TRUE
)$looking_times$right[i_screen_lt] # ... only get the i'th item from get_looks
}
df_subject$PrefLook_LT_Obj_Fam_2sec <- dplyr::if_else(
df_subject$PrefLook_Obj_Fam_Pos == "left", df_subject$PrefLook_LT_Obj_Left_2sec, df_subject$PrefLook_LT_Obj_Right_2sec
)
df_subject$PrefLook_LT_Obj_Nov_2sec <- dplyr::if_else(
df_subject$PrefLook_Obj_Nov_Pos == "left", df_subject$PrefLook_LT_Obj_Left_2sec, df_subject$PrefLook_LT_Obj_Right_2sec
)
df_subject$PrefLook_PropScore_2sec <- df_subject$PrefLook_LT_Obj_Nov_2sec / (df_subject$PrefLook_LT_Obj_Nov_2sec + df_subject$PrefLook_LT_Obj_Fam_2sec)
# ================================================================================================
# TIME-COURSE PLOT
# ------------------------------------------------------------------------------------------------
df_time <- timebinning(df, df_subject, startend_preflook, 500)
# Sort like in the word file:
df_time <- df_time |> dplyr::arrange(TrialCon, Condition, BinNumber)
# Remove last bin
last_time_bin <- df_time$BinNumber |> max()
df_time <- df_time |> dplyr::filter(BinNumber != last_time_bin)
# write tables for individual participants
# write.table(df_subject, paste0(interface$output_dir, subject), sep = '\t', row.names = FALSE)
# write.table(df_time, paste0(interface$output_dir, sub("\\.tsv$", "", subject), "_time.tsv"), sep = '\t', row.names = FALSE)
}
# Read in tsv files from pre-processing folder
# tsv_files <- list.files(interface$output_dir, full.names = TRUE)
# # Creating data frame
# overall.data <- tsv_files %>%
# map(read_tsv) %>% # read in all the files individually, using the function read_tsv() from the readr package
# reduce(rbind) # reduce with rbind into one dataframe
|
a7ad2539a3eb9056a20db62cfab978a9f5f0712a
|
f9a852ac2ddadbe7cd3879473b56b4369fb9a53c
|
/man/pma_gettable.Rd
|
1bf54d6c5520c2b5d050ade88e71c140d5ae6143
|
[] |
no_license
|
obarisk/sqltools
|
ab0012489b2a3623240013ee22ca793bbc81910b
|
edd9df3d5114b2040a0491d7604340b31162fa8a
|
refs/heads/master
| 2021-01-19T20:36:36.298885
| 2017-04-23T11:06:25
| 2017-04-23T11:06:25
| 88,523,675
| 0
| 0
| null | 2017-04-17T15:59:43
| 2017-04-17T15:46:51
|
R
|
UTF-8
|
R
| false
| true
| 401
|
rd
|
pma_gettable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phpmyadmin_webapi.R
\name{pma_gettable}
\alias{pma_gettable}
\title{show tables on phpmyadmin server}
\usage{
pma_gettable(dbname, Url, Token)
}
\arguments{
\item{dbname, }{database name}
\item{Url, }{remote server URL}
\item{Token, }{login token}
}
\value{
table names
}
\description{
show tables on phpmyadmin server
}
|
5f64d42384626e30a97105a9d38f9527b44a73cc
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/stochvol/man/specify_priors.Rd
|
6832244f27c913a31cd515cd53646fc038c4f424
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,540
|
rd
|
specify_priors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{specify_priors}
\alias{specify_priors}
\title{Specify Prior Distributions for SV Models}
\usage{
specify_priors(
mu = sv_normal(mean = 0, sd = 100),
phi = sv_beta(shape1 = 5, shape2 = 1.5),
sigma2 = sv_gamma(shape = 0.5, rate = 0.5),
nu = sv_infinity(),
rho = sv_constant(0),
latent0_variance = "stationary",
beta = sv_multinormal(mean = 0, sd = 10000, dim = 1)
)
}
\arguments{
\item{mu}{one of sv_normal and sv_constant}
\item{phi}{one of sv_beta, sv_normal, and sv_constant. If sv_beta, then the specified beta distribution is the prior for (phi+1)/2}
\item{sigma2}{one of sv_gamma, sv_inverse_gamma, and sv_constant}
\item{nu}{one of sv_infinity, sv_exponential, and sv_constant. If sv_exponential, then the specified exponential distribution is the prior for nu-2}
\item{rho}{one of sv_beta and sv_constant. If sv_beta, then the specified beta distribution is the prior for (rho+1)/2}
\item{latent0_variance}{either the character string \code{"stationary"} or an sv_constant object.
If \code{"stationary"}, then h0 ~ N(\code{mu}, \code{sigma^2/(1-phi^2)}). If an sv_constant object with value \code{v}, then h0 ~ N(\code{mu}, \code{v}).
Here, N(b, B) stands for mean b and variance B}
\item{beta}{an sv_multinormal object}
}
\description{
This function gives access to a larger set of prior distributions
in case the default choice is unsatisfactory.
}
\seealso{
Other priors:
\code{\link{sv_constant}()}
}
\concept{priors}
|
37ad8e86fbbe8bfbdbf41cb12c3ee0b833255209
|
5d3121e7e42bfb2cc8ae76062a83df2791a45b95
|
/R/assig.R
|
d04cc9518fc75b0f61327fdf45ef4d5b48f160b5
|
[] |
no_license
|
neslon/dprep
|
3b872a3cbfe3492a27314d4d68c427a949cd538a
|
bedc64837b72919f0a249d716b6cecbb23923ad0
|
refs/heads/master
| 2021-01-11T14:49:55.339753
| 2017-01-27T23:09:36
| 2017-01-27T23:09:36
| 80,226,293
| 0
| 0
| null | 2017-01-27T16:51:27
| 2017-01-27T16:51:26
| null |
UTF-8
|
R
| false
| false
| 459
|
r
|
assig.R
|
assig <-
function(x, points, nparti, n)
{
x1<-x
if(nparti==1){ x1[1:n]= 1}
for(i in 1:nparti) {
if(i==1) {
idx <- as.numeric(x<points[i])*seq(1,length(x))
x1[idx] <- 1
}
if(i==nparti) {
idx <- as.numeric(x>=points[i-1])*seq(1,length(x))
x1[idx] <- nparti
}
if((i!=1)&(i!=nparti)) {
idx <- as.numeric((x >= points[i-1])&(x<points[i]))*seq(1,length(x))
x1[idx] <- i
}
}
x1
}
|
23b9c054c171a9f6ccaa805b69ffcc9573f39354
|
c7fb71ce56826e76c52b3b671c4d1edfe05bcccc
|
/inst/doc/files.R
|
b4666a072addd350a55c5e5f612f17da33549f82
|
[] |
no_license
|
cran/filesstrings
|
da03515790a57bb53899b67a7de3ce70ec3a446b
|
983e3724aaa20682f68d82dfdb0bb3b0f12b9c19
|
refs/heads/master
| 2023-02-07T12:41:00.563629
| 2023-01-25T16:10:02
| 2023-01-25T16:10:02
| 82,947,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,315
|
r
|
files.R
|
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE, comment = "#>")
## ----load---------------------------------------------------------------------
library(filesstrings)
## ---- remove_filename_spaces--------------------------------------------------
file.create(c("file 1.txt", "file 2.txt"))
remove_filename_spaces(pattern = "txt$", replacement = "_")
list.files(pattern = "txt$")
file.remove(list.files(pattern = "txt$")) # clean up
## ----nice_nums setup----------------------------------------------------------
file.names <- c("file999.tif", "file1000.tif")
sort(file.names)
## ----nice_nums----------------------------------------------------------------
nice_nums(file.names)
## ----before_last_dot----------------------------------------------------------
before_last_dot("spreadsheet_92.csv")
## ----add file extension 1-----------------------------------------------------
give_ext("xyz", "csv")
## ----add file extension 2-----------------------------------------------------
give_ext("xyz.csv", "csv")
## ----change file extension----------------------------------------------------
give_ext("abc.csv", "txt") # tack the new extension onto the end
give_ext("abc.csv", "txt", replace = TRUE) # replace the current extension
|
396844ea602392972de87eb70135337b0657e50f
|
42ee7e7f8650ac4378e477ea06407258a8e286c2
|
/scripts/2004_turnout_by_state.r
|
93cf587bfb90b6823ff8103abda6f664512b47b4
|
[] |
no_license
|
stoneyv/presidential_general_elections
|
d4000e90d13dc13eb870733728f1c7b6b10a5020
|
f097810a931e0a8463ce7a2967ced0b0ea88bbf9
|
refs/heads/master
| 2020-06-30T09:49:30.494158
| 2017-01-03T00:14:02
| 2017-01-03T00:14:02
| 74,379,541
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,643
|
r
|
2004_turnout_by_state.r
|
library(data.table)
library(choroplethr)
library(choroplethrMaps)
library(ggplot2)
library(gsheet)
library(dplyr)
# Setting the working directory and then confirming it.
working_dir <- '/home/stoney/Desktop/presidential_elections/scripts/'
setwd(working_dir)
getwd()
#
# United States Election Project
# http://www.electproject.org/
# Dr. Michael P. McDonald
# Associate Professor University of Florida
# Department of Political Science
#
# 2004 is not in a Google spreadsheet, so saved it to data
# and modified the headers
# http://www.electproject.org/2004g
turnout_04 <- fread('../data/electproject-org/2004_electproject_participation.csv')
# Use dpylr and pipes %>% to eliminate rows that
# are not states and set region and values columns
# in preparation for choroplethr library
results <- turnout_04 %>%
filter(State != '' & State != 'United States') %>%
mutate(value=VAP_Highest_Office) %>%
select(region=State, value)
# choroplethr state names are all lower case
results$region <- tolower(results$region)
# Remove % sign and coerce string to numeric type
results$value <- as.numeric(sub('%','', results$value))
# Plot percentage of votes by state from VEP (elegible voters)
# that had a vote counted for the highest office
m0 <- state_choropleth(results,
title = '2004 Eligible Voters Turnout\nHighest Office',
legend = '% VEP',
num_colors = 1 )
# Save the plot to the output folder
ggsave(m0,
width = 11,
height = 8.5,
dpi = 300,
file = "../plots/2004_voter_turnout.png",
type = "cairo-png")
|
a3f161295086f890e59bc13699b1976fb01836e3
|
1d5f8ab25866b9cb4898be799bb700d260ef5b62
|
/man/kernel_gauss_dC.Rd
|
811487b31b740766ced72802fe39b67ea9a1e375
|
[] |
no_license
|
CollinErickson/GauPro
|
20537d576a5a47308840ecbe080dabb2c244b96c
|
c12cfa14b5ac4e1506daec1baec27f75a2253f53
|
refs/heads/master
| 2023-04-16T08:42:18.177784
| 2023-04-12T00:59:27
| 2023-04-12T00:59:27
| 64,254,165
| 14
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 699
|
rd
|
kernel_gauss_dC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{kernel_gauss_dC}
\alias{kernel_gauss_dC}
\title{Derivative of Gaussian kernel covariance matrix in C}
\usage{
kernel_gauss_dC(x, theta, C_nonug, s2_est, beta_est, lenparams_D, s2_nug)
}
\arguments{
\item{x}{Matrix x}
\item{theta}{Theta vector}
\item{C_nonug}{cov mat without nugget}
\item{s2_est}{whether s2 is being estimated}
\item{beta_est}{Whether theta/beta is being estimated}
\item{lenparams_D}{Number of parameters the derivative is being calculated for}
\item{s2_nug}{s2 times the nug}
}
\value{
Correlation matrix
}
\description{
Derivative of Gaussian kernel covariance matrix in C
}
|
86672e157d9ff93181bce42e69f7261e69580398
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Luminescence/examples/plot_DRTResults.Rd.R
|
76bdd4c36675f4d450d7d07e460fbb469906a225
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,425
|
r
|
plot_DRTResults.Rd.R
|
library(Luminescence)
### Name: plot_DRTResults
### Title: Visualise dose recovery test results
### Aliases: plot_DRTResults
### Keywords: dplot
### ** Examples
## read example data set and misapply them for this plot type
data(ExampleData.DeValues, envir = environment())
## plot values
plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
given.dose = 2800, mtext = "Example data")
## plot values with legend
plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
given.dose = 2800,
legend = "Test data set")
## create and plot two subsets with randomised values
x.1 <- ExampleData.DeValues$BT998[7:11,]
x.2 <- ExampleData.DeValues$BT998[7:11,] * c(runif(5, 0.9, 1.1), 1)
plot_DRTResults(values = list(x.1, x.2),
given.dose = 2800)
## some more user-defined plot parameters
plot_DRTResults(values = list(x.1, x.2),
given.dose = 2800,
pch = c(2, 5),
col = c("orange", "blue"),
xlim = c(0, 8),
ylim = c(0.85, 1.15),
xlab = "Sample aliquot")
## plot the data with user-defined statistical measures as legend
plot_DRTResults(values = list(x.1, x.2),
given.dose = 2800,
summary = c("n", "mean.weighted", "sd"))
## plot the data with user-defined statistical measures as sub-header
plot_DRTResults(values = list(x.1, x.2),
given.dose = 2800,
summary = c("n", "mean.weighted", "sd"),
summary.pos = "sub")
## plot the data grouped by preheat temperatures
plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
given.dose = 2800,
preheat = c(200, 200, 200, 240, 240))
## read example data set and misapply them for this plot type
data(ExampleData.DeValues, envir = environment())
## plot values
plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
given.dose = 2800, mtext = "Example data")
## plot two data sets grouped by preheat temperatures
plot_DRTResults(values = list(x.1, x.2),
given.dose = 2800,
preheat = c(200, 200, 200, 240, 240))
## plot the data grouped by preheat temperatures as boxplots
plot_DRTResults(values = ExampleData.DeValues$BT998[7:11,],
given.dose = 2800,
preheat = c(200, 200, 200, 240, 240),
boxplot = TRUE)
|
704ee39b0a93e0f922d350a9b0536a719d656d0a
|
35a87885d11712ddda88f065e07e5581cffd6b43
|
/ecospat/man/ecospat-package.Rd
|
bc8897b8f3caf0556bcf609b0fedb94088df7960
|
[] |
no_license
|
ecospat/ecospat
|
8266a45049a313616f31f964e5cefc49ab76a2ea
|
be2716f41de829558dab53f26dcc8e3509ba187d
|
refs/heads/master
| 2023-08-14T10:04:25.008983
| 2023-06-15T09:56:10
| 2023-06-15T09:56:10
| 21,738,067
| 21
| 13
| null | 2023-07-27T12:58:12
| 2014-07-11T14:01:02
|
R
|
UTF-8
|
R
| false
| false
| 3,173
|
rd
|
ecospat-package.Rd
|
\name{ecospat-package}
\alias{ecospat-package}
\alias{ecospat}
\docType{package}
\title{Spatial Ecology Miscellaneous Methods}
\description{
Collection of methods, utilities and data sets for the support of spatial ecology analyses with a focus on pre-, core and post- modelling analyses of species distribution, niche quantification and community assembly. The \code{ecospat} package was written by current and former members and collaborators of the ecospat group of Antoine Guisan, Department of Ecology and Evolution (DEE) & Institute of Earth Surface Dynamics (IDYST), University of Lausanne, Switzerland.
\bold{Pre-modelling:}
\itemize{
\item Spatial autocorrelation:\code{\link{ecospat.mantel.correlogram}}
\item Variable selection: \code{\link{ecospat.npred}}
\item Climate Analalogy: \code{\link{ecospat.climan}, \link{ecospat.mess}, \link{ecospat.plot.mess}}
\item Phylogenetic diversity measures: \code{\link{ecospat.calculate.pd}}
\item Biotic Interactions: \code{\link{ecospat.cons_Cscore}, \link{ecospat.Cscore}, \link{ecospat.co_occurrences}}
\item Minimum Dispersal routes: \code{\link{ecospat.mdr}}
\item Niche Quantification: \code{\link{ecospat.grid.clim.dyn}, \link{ecospat.niche.equivalency.test}, \link{ecospat.niche.similarity.test}, \link{ecospat.plot.niche}, \link{ecospat.plot.niche.dyn}, \link{ecospat.plot.contrib}, \link{ecospat.niche.overlap}, \link{ecospat.plot.overlap.test}, \link{ecospat.niche.dyn.index}, \link{ecospat.shift.centroids}, \link{ecospat.niche.dynIndexProjGeo}, \link{ecospat.niche.zProjGeo}, \link{ecospat.margin}}
\item Data Preparation: \code{\link{ecospat.caleval}, \link{ecospat.cor.plot}, \link{ecospat.makeDataFrame}, \link{ecospat.occ.desaggregation}, \link{ecospat.rand.pseudoabsences}, \link{ecospat.rcls.grd}, \link{ecospat.recstrat_prop}, \link{ecospat.recstrat_regl}, \link{ecospat.sample.envar}}
}
\bold{Core Niche Modelling:}
\itemize{
\item Model evaluation: \code{\link{ecospat.cv.glm}, \link{ecospat.permut.glm}, \link{ecospat.cv.gbm}, \link{ecospat.cv.me}, \link{ecospat.cv.rf}, \link{ecospat.boyce}, \link{ecospat.CommunityEval}, \link{ecospat.cohen.kappa}, \link{ecospat.max.kappa}, \link{ecospat.max.tss}, \link{ecospat.meva.table}, \link{ecospat.plot.kappa}, \link{ecospat.plot.tss}, \link{ecospat.adj.D2.glm}, \link{ecospat.CCV.createDataSplitTable}, \link{ecospat.CCV.modeling}, \link{ecospat.CCV.communityEvaluation.bin}, \link{ecospat.CCV.communityEvaluation.prob}}
\item Spatial predictions and projections: \code{\link{ecospat.ESM.Modeling}, \link{ecospat.ESM.EnsembleModeling}, \link{ecospat.ESM.Projection}, \link{ecospat.ESM.EnsembleProjection}, \link{ecospat.SESAM.prr}, \link{ecospat.binary.model}, \link{ecospat.Epred}, \link{ecospat.mpa}}
\item Variable Importance: \code{\link{ecospat.maxentvarimport}, \link{ecospat.ESM.VarContrib}}
}
\bold{Post Modelling:}
\itemize{
\item Variance Partition: \code{\link{ecospat.varpart}}
\item Spatial predictions of species assemblages: \code{\link{ecospat.cons_Cscore}}
\item Range size quantification: \code{\link{ecospat.rangesize}, \link{ecospat.occupied.patch}}
}
}
|
31f9d3a47987021bc5ef965b8fcc3a7c3981517a
|
99766014500982b7584ec5e8217735deae30dff3
|
/ordinary-least-square-regression-modelling.R
|
b5e0a66638ec2a045d1c78e8f95c0baa2cddd133
|
[] |
no_license
|
davidenoma/regression-analysis
|
694dce0d8098fc8d698081cc20db4004b8172eae
|
5f40999b0504015278d350cde427d47dc25ebe28
|
refs/heads/master
| 2022-12-07T19:38:18.493246
| 2020-08-19T20:22:49
| 2020-08-19T20:22:49
| 287,510,993
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,845
|
r
|
ordinary-least-square-regression-modelling.R
|
#Linear regression, we model Y as a function of X
library(help = "datasets")
data("Orange")
head(Orange)
plot(Orange$age, Orange$circumference)
#Model the variation in circumference as a
#a function of age
#H0: there is no link between age and circumference
fit = lm(circumference ~ age, data=Orange)
summary(fit)
##p < 0.05 so we reject h0
library(ggplot2)
ggplot(Orange, aes(x=age, y=circumference)) +
geom_point(color="#2980B9",size = 4) +
geom_smooth(method = lm, color = "#2C3E50")
fit = lm(circumference~ age, data = Orange)
summary(fit)
#Predict the circumference of a tree aged 1500
#From the statistics we cannot take the intercept because the value
#of X cannot be zero
age = 1500
cirm = 0.106 * age
cirm
#Calculating Confidence intervals in R
head(ToothGrowth)
##Confidence interval of mean
n = length(ToothGrowth$len)
s = sd(ToothGrowth$len)
standardError = s / sqrt(n)
zvalue = qnorm(0.975)
zvalue
#Margin of error
moe = zvalue * standardError
xbar = mean(ToothGrowth$len)
xbar + c(-moe,moe)
##T-based confidence interval for mean
##Use in cases n < 30
tval = qt(0.975, df=n-1)
tval
#Close to the Z value
moe = tval * SE #Margin of error
t.test(ToothGrowth$len) #95% ci for mean
t.test(ToothGrowth$len,conf.level = 0.9)
#Confidence interval provides a measure of precision of the
#linear regression coefficient
library(ggplot2)
#Statistical model is estimating parameter from a dataset.
fit = lm(circumference ~ age , data = Orange)
summary(fit)
ggplot(Orange, aes(x=age,y=circumference)) +
geom_point(color="#2990B9",size = 4) +
geom_smooth(method=lm, color = "#2C3E50")
#Gray bands give an estimate of the confidence interval of the
#regression line.
new.data = data.frame(age=1500)
predict(fit,newdata = new.data, interval = "conf")
#Predicts the age, upper and lower limit.
confint(fit)
#We can force intercept to be zero if we are sure that it is
#not significant
fit0 = lm(circumference ~ age + 0, data = Orange)
summary(fit0)
#RElationship between Anova and linear regression
fit1 = lm(Sepal.Length ~ Petal.Length, data=iris)
summary(fit1)
anova(fit1)
#Multiple Linear Regression
#One Response variable and two or more predictors
data(iris)
head(iris)
fit1 = lm(Sepal.Length ~ Sepal.Width + Petal.Length, data = iris)
summary(fit1)
fit2 = lm(Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width, data = iris)
summary(fit2)
#Addition of categorical variables
#Accounting for interaction of predictor variables.
data(iris)
library(ggplot2)
qplot(Sepal.Length, Petal.Length, data = iris)
fitlm = lm(Petal.Length ~ Sepal.Length, data=iris)
summary(fitlm)
summary(iris)
qplot(Sepal.Length, Petal.Length, data=iris, color = Species)
x = lm(Petal.Length ~ Sepal.Length+Species, data = iris)
summary(x)
#Is there a significant variation in petal length across species
fit1 = lm(Petal.Length ~ Sepal.Length* Species, data = iris)
anova(fit1)
#anova show that the Species affects the Petal.Length at
#a significanct level i.e. predictor affecss the response
summary(iris$Species)
summary(fit1)
#Check for conditions met for OLS regression
fit = lm(Sepal.Length ~ Petal.Length + Petal.Width, data=iris)
summary(fit)
par(mfrow=c(2,2))
plot(fit)
fir = lm(Sepal.Length ~ Petal.Length + Petal.Width, data=iris)
summary(fit)
par(mfrow=c(2,2))
plot(fit)
#QQplot shows that the errors follow a normal distribution
library('lmtest')
#Test for Autocorrelated/non-independence of errors
#H0 is that there is no autocorrelation
dwtest(fit)
#WE fail to reject the H0
library(car)
#H0 hypothesis of constant error variance i.e. no hetereoscedasticity
ncvTest(fit)
#Identify outliers that have too much influence of model
#influential datapoints
cutoff = 4 /((nrow(iris) - length(fit$coefficients)-2))
par(mfrow=c(1,1))
plot(fit, which = 4,cook.levels = cutoff)
|
45c498ce1e787e99a2900304651db4e274dcea6b
|
e63cee6483260cd96f285939154d897c9d1951d5
|
/run_analysis.R
|
e921e4655e7fd9d19e92f101aa3ddf97bbd693b1
|
[] |
no_license
|
nadirizr/getdata-project
|
a055cd9801d73ab7a230d527ea0748e7abe795e3
|
90b35ab26bf6e5d472befcaac3db7c896d8f1fca
|
refs/heads/master
| 2016-09-05T15:21:11.573141
| 2014-12-20T23:46:41
| 2014-12-20T23:46:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,001
|
r
|
run_analysis.R
|
library(reshape2)
download_and_extract_dataset <- function(web_path, dest_path = ".") {
## 'web_path' is a character vector indicating the URL of the dataset
## file to download, and assumes it a zip file to extract.
## 'dest_path' is an optional character vector indicating where to
## extract the data from the downloaded file (defaults to current
## working directory.
if (!file.exists(dest_path)) {
dir.create(dest_path)
}
dataset_file <- paste(dest_path, "dataset.zip")
download.file(web_path, destfile = dataset_file, method = "curl")
unzip(dataset_file, exdir = dest_path)
}
load_dataset <- function(directory = "./UCI HAR Dataset",
training_path = "train/X_train.txt",
training_activity_path = "train/y_train.txt",
training_subject_path = "train/subject_train.txt",
test_path = "test/X_test.txt",
test_activity_path = "test/y_test.txt",
test_subject_path = "test/subject_test.txt",
features_path = "features.txt",
activity_path = "activity_labels.txt") {
## 'directory' is a character vector indicating the location of the
## datasets to load.
## 'training_path' is a character vector indicating the relative path
## of the training data sets within 'directory'.
## 'training_activity_path' is a character vector indicating the relative
## path of the training data activity for each observation.
## 'training_subject_path' is a character vector indicating the relative
## path of the training data subject for each observation.
## 'test_path' is a character vector indicating the relative path
## of the test data sets within 'directory'.
## 'test_activity_path' is a character vector indicating the relative
## path of the test data activity for each observation.
## 'test_subject_path' is a character vector indicating the relative
## path of the test data subject for each observation.
## 'features_path' is a character vector indicating the relative path
## of the feature name file within 'directory'.
## 'activity_path' is a character vector indicating the relative path
## of the activity name file within 'directory'.
##
## Returns a data frame with all of the data from all data sets, merged
## into one.
# Read the activity names into a vector of names where the activity
# number is the index and the value is the activity name.
activity_file <- paste(directory, "/", activity_path, sep="")
activity_dataset <- read.table(activity_file, sep="")
# After sorting by first column (just in case), second column is names.
activity_names <- activity_dataset[with(activity_dataset, order(V1)),][,2]
# --- Read the training dataset ---
# Read the training dataset into a data frame.
training_file <- paste(directory, "/", training_path, sep="")
training_dataset <- read.table(training_file, sep="")
# Read the training dataset activity file for each observation.
training_activity_file <- paste(directory, "/", training_activity_path, sep="")
training_activity_dataset <- read.table(training_activity_file, sep="")
# Transform each of the activities to a name.
training_activity_dataset[,1] <- apply(training_activity_dataset, 1,
function(x) activity_names[x])
# Merge the activity column into the training data set.
training_dataset <- cbind(training_activity_dataset, training_dataset)
# Read the training dataset subject file for each observation.
training_subject_file <- paste(directory, "/", training_subject_path, sep="")
training_subject_dataset <- read.table(training_subject_file, sep="")
# Merge the subject column into the training data set.
training_dataset <- cbind(training_subject_dataset, training_dataset)
# --- Read the test dataset ---
# Read the test dataset into a data frame.
test_file <- paste(directory, "/", test_path, sep="")
test_dataset <- read.table(test_file, sep="")
# Read the test dataset activity file for each observation.
test_activity_file <- paste(directory, "/", test_activity_path, sep="")
test_activity_dataset <- read.table(test_activity_file, sep="")
# Transform each of the activities to a name.
test_activity_dataset[,1] <- apply(test_activity_dataset, 1,
function(x) activity_names[x])
# Merge the activity column into the test data set.
test_dataset <- cbind(test_activity_dataset, test_dataset)
# Read the test dataset subject file for each observation.
test_subject_file <- paste(directory, "/", test_subject_path, sep="")
test_subject_dataset <- read.table(test_subject_file, sep="")
# Merge the subject column into the test data set.
test_dataset <- cbind(test_subject_dataset, test_dataset)
# --- Combine the datasets ---
# Combine the two datasets.
combined_dataset <- rbind(training_dataset, test_dataset)
# Add the feature names as the column names.
features_file <- paste(directory, "/", features_path, sep="")
features_table <- read.table(features_file, sep="")
# After sorting by first column (just in case), second column is names.
feature_names <- features_table[with(features_table, order(V1)),][,2]
names(combined_dataset) <- c("Subject", "Activity",
as.character(feature_names))
return(combined_dataset)
}
filter_dataset <- function(dataset,
feature_types=c("^Subject$", "^Activity$",
"mean\\(\\)", "std\\(\\)")) {
## 'dataset' is a data frame produced by a call to 'load_dataset'.
## 'feature_types' is a vector of character vectors where each element
## represents a string which must appear in a column name for it to pass
## the filter and be present in the resulting data frame.
##
## Returns a filtered data frame with column only for those features
## that contain one of the strings in 'feature_types'.
# Find the columns that answer to one of the regular expressions in
# 'feature_types.
dataset_names <- names(dataset)
dataset_cols <- Reduce(c, sapply(feature_types,
function(x) grep(x, dataset_names)))
# Take only those columns.
return(dataset[, dataset_cols])
}
calculate_tidy_average_dataset <- function(dataset) {
## 'dataset' is a data frame produced by a call to 'load_dataset', and
## possibly passed through 'filter_dataset'.
##
## Returns a dataset with the average of the various variables per each
## combination of Subject and Activity.
# First we melt the data with regard to Subject and Activity, thus
# grouping the observations according to that combination of variables.
melted_dataset <- melt(dataset, id.vars=c("Subject", "Activity"))
# Then we cast the data into a data frame where all the measured
# variables are averaged for each combination of ID variables.
casted_dataset <- dcast(melted_dataset,
Subject + Activity ~ variable,
mean)
return(casted_dataset)
}
main <- function(download=TRUE) {
## Runs all of the steps above and writes the resulting dataset to a
## file called 'tidy_dataset.txt'.
## If 'download' is TRUE, downloads and unzips the dataset when running,
## otherwise assumes that the files exist.
if (download) {
web_path <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
print("Downloading dataset file from: ", web_path)
download_and_extract_dataset(web_path)
print("Done.")
}
print("Loading datasets from files...")
dataset <- load_dataset()
print("Filtering datasets for means and standard deviations...")
dataset <- filter_dataset(dataset)
print("Calculating averages of variables per Subject and Activity...")
dataset <- calculate_tidy_average_dataset(dataset)
print("Done.")
print("Writing tidy dataset to file: tidy_dataset.txt")
write.table(dataset, "tidy_dataset.txt", row.names=FALSE)
print("Done.")
}
main()
|
0afb0b540ff46f99856f475197a6e31289fbdbd1
|
1db3390483611dad623d984fc1d18c277af3ed4e
|
/R/hpcc.table.R
|
7e72fff1e281d6901b8277eeefae633fc01eef1b
|
[] |
no_license
|
Saulus/rHpcc
|
7105a535b4b62c736625c74175114ea61e7aa30c
|
5fef5811fe0a63555a66e30c05bb4ffef46ad7ce
|
refs/heads/master
| 2021-01-14T14:10:50.315049
| 2014-11-24T09:17:54
| 2014-11-24T09:17:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
hpcc.table.R
|
hpcc.table <-
function (dataframe, format,expression = NULL,
few = NULL, unsorted = FALSE, local =FALSE, keyed = FALSE,
merge = FALSE)
{
out.dataframe <- hpcc.get.name()
if (missing(dataframe)) {
stop("no dataframe")
}
code <- sprintf("%s := TABLE(%s,%s",out.dataframe,dataframe,format)
if(is.not.null(expression)) {
code <- sprintf("%s,%s",code,expression)
if(few=='FEW' || few=='few' || few == 'MANY' || few=='many') {
code <- sprintf("%s,%s",few)
}
if(unsorted) {
code <- sprintf("%s,UNSORTED",code)
}
}
if(local) {
code <- sprintf("%s,LOCAL",code)
}
if(keyed) {
code <- sprintf("%s,KEYED",code)
}
if(merge) {
code <- sprintf("%s,MERGE",code)
}
code <- sprintf("%s)",code)
hpcc.submit(code)
return(out.dataframe)
}
|
b96b48620522ce5c61fd0fbea03ae51ada44cc64
|
6393059185fa3456b16d4e69193828686bcc3a22
|
/Code/step2_eventcode/step2_eventcode_NVMS.R
|
6f65e3f06de44eada064343014845b02c25ff3d8
|
[] |
no_license
|
zhukovyuri/xSub_ReplicationCode
|
49c6f092a8dfdd44762e493ed4155dedcc37c371
|
a8c83d60ed1076d5a22f4d3fb3bc28fbbbcd0a81
|
refs/heads/master
| 2021-05-03T10:21:31.921202
| 2019-06-19T00:33:06
| 2019-06-19T00:33:06
| 120,532,879
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,858
|
r
|
step2_eventcode_NVMS.R
|
rm(list=ls())
## Set directory
setwd("~/")
if("XSub"%in%dir()){setwd("~/XSub/Data/")}
if("Dropbox2"%in%dir()){setwd("~/Dropbox2/Dropbox (Zhukov research team)/XSub/Data/")}
# setwd("F:/Dropbox (Zhukov research team)/XSub/Data/")
## Install & load packages (all at once)
list.of.packages <- c("gdata","countrycode","maptools","foreign","plotrix","sp","raster","rgeos","gdata","parallel","foreach","doParallel")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]; if(length(new.packages)){install.packages(new.packages,dependencies=TRUE)}
lapply(list.of.packages, require, character.only = TRUE)
#############################
## Creat event-level data
#############################
## NVMS: Indonesia
#############################
## Load custom functions
source("Code/functions.R")
# Load events
load("Input/Events/NVMS/NVMS_GEO.RData")
source("Code/step2_eventcode/step2x_event_types_list.R")
source("Code/step2_eventcode/step2x_eventType_function.R")
head(nvms.raw)
tail(nvms.raw)
colnames(nvms.raw)
dim(nvms.raw) ##241849 by 104
#Create the DATE column
data <- nvms.raw
data$DATE <- as.Date(data$tanggal_Kejadian,"%m/%d/%Y")
data$DATE <- gsub("-", "", data$DATE)
head(data)
tail(data)
# Precision codes
head(data)[,1:20]
data$GEOPRECISION0 <- "settlement"
data$GEOPRECISION0[which((data$Kecamatan1==""|is.na(data$Kecamatan1)))] <- "adm2"
data$TIMEPRECISION0 <- "day"
tail(data)
# By country
cntz <- "IDN"
subdata <- data
head(subdata)
tail(subdata)
## By country
disag <- sort(unique(subdata$ISO3))
j <- 1; disag[j]
# Dates & locations
sub.datez <- as.numeric(as.character(subdata$DATE)) #*10000+as.numeric(as.character(subdata$Month))*100+as.numeric(as.character(subdata$Calendar.Day...End))
sub.lat <- subdata$LAT
sub.long <- subdata$LONG
sub.precis <- subdata$GEOPRECISION0
sub.tprecis <- subdata$TIMEPRECISION0
sub0 <- data.frame(SOURCE=paste0("NMVS_Indonesia"),CONFLICT=countrycode(cntz,"iso3c","country.name"),COWN=countrycode(cntz,origin = "iso3c",destination = "cown"),COWC=countrycode(cntz,origin = "iso3c",destination = "cowc"),ISO3=countrycode(cntz,origin = "iso3c",destination = "iso3c"),DATE=sub.datez,LAT=sub.lat,LONG=sub.long,GEOPRECISION=sub.precis,TIMEPRECISION=sub.tprecis)
head(sub0)
# Actors (based on the article)
sub0$INITIATOR_SIDEA <- 1*(subdata$actor_s1_tp=="5"|subdata$actor_s1_tp=="6"|subdata$actor_s1_tp=="10"|subdata$actor_s1_tp=="14"|subdata$actor_s1_tp=="15"|subdata$actor_s1_tp=="16"|subdata$actor_s1_tp=="19")
sub0$INITIATOR_SIDEB <- 1*(subdata$actor_s1_tp=="3"|subdata$actor_s1_tp=="17")
sub0$INITIATOR_SIDEC <- 1*(subdata$actor_s2_tp=="4")
sub0$INITIATOR_SIDED <- 1*(subdata$actor_s1_tp=="2"|subdata$actor_s1_tp=="7"|subdata$actor_s1_tp=="8"|subdata$actor_s1_tp=="9"|subdata$actor_s1_tp=="11"|subdata$actor_s1_tp=="12"|subdata$actor_s1_tp=="13"|subdata$actor_s1_tp=="18")
sub0$TARGET_SIDEA <- 1*(subdata$actor_s2_tp=="5"|subdata$actor_s2_tp=="6"|subdata$actor_s2_tp=="10"|subdata$actor_s2_tp=="14"|subdata$actor_s2_tp=="15"|subdata$actor_s2_tp=="16"|subdata$actor_s2_tp=="19")
sub0$TARGET_SIDEB <- 1*(subdata$actor_s2_tp=="3"|subdata$actor_s2_tp=="17")
sub0$TARGET_SIDEC <- 1*(subdata$actor_s2_tp=="4")
sub0$TARGET_SIDED <- 1*(subdata$actor_s2_tp=="2"|subdata$actor_s2_tp=="7"|subdata$actor_s2_tp=="8"|subdata$actor_s2_tp=="9"|subdata$actor_s2_tp=="11"|subdata$actor_s2_tp=="12"|subdata$actor_s2_tp=="13"|subdata$actor_s2_tp=="18")
# Dyads
sub0$DYAD_A_A <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEA
sub0$DYAD_A_B <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEB
sub0$DYAD_A_C <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDEC
sub0$DYAD_A_D <- sub0$INITIATOR_SIDEA*sub0$TARGET_SIDED
sub0$DYAD_B_A <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEA
sub0$DYAD_B_B <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEB
sub0$DYAD_B_C <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDEC
sub0$DYAD_B_D <- sub0$INITIATOR_SIDEB*sub0$TARGET_SIDED
sub0$DYAD_C_A <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEA
sub0$DYAD_C_B <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEB
sub0$DYAD_C_C <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDEC
sub0$DYAD_C_D <- sub0$INITIATOR_SIDEC*sub0$TARGET_SIDED
sub0$DYAD_D_A <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEA
sub0$DYAD_D_B <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEB
sub0$DYAD_D_C <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDEC
sub0$DYAD_D_D <- sub0$INITIATOR_SIDED*sub0$TARGET_SIDED
# Actions (indiscriminate = violence vs. civilians)
sub0$ACTION_ANY <- 1
sub0$ACTION_IND <- 1*(subdata$ben_kek1%in%c(2,6,9))
sub0$ACTION_DIR <- 1*(subdata$ben_kek1%in%c(7,8,10,11,13,14))
sub0$ACTION_PRT <- 1*(subdata$ben_kek1%in%c(3,5))
# Actor-action
sub0$SIDEA_ANY <- sub0$INITIATOR_SIDEA*sub0$ACTION_ANY
sub0$SIDEA_IND <- sub0$INITIATOR_SIDEA*sub0$ACTION_IND
sub0$SIDEA_DIR <- sub0$INITIATOR_SIDEA*sub0$ACTION_DIR
sub0$SIDEA_PRT <- sub0$INITIATOR_SIDEA*sub0$ACTION_PRT
sub0$SIDEB_ANY <- sub0$INITIATOR_SIDEB*sub0$ACTION_ANY
sub0$SIDEB_IND <- sub0$INITIATOR_SIDEB*sub0$ACTION_IND
sub0$SIDEB_DIR <- sub0$INITIATOR_SIDEB*sub0$ACTION_DIR
sub0$SIDEB_PRT <- sub0$INITIATOR_SIDEB*sub0$ACTION_PRT
sub0$SIDEC_ANY <- sub0$INITIATOR_SIDEC*sub0$ACTION_ANY
sub0$SIDEC_IND <- sub0$INITIATOR_SIDEC*sub0$ACTION_IND
sub0$SIDEC_DIR <- sub0$INITIATOR_SIDEC*sub0$ACTION_DIR
sub0$SIDEC_PRT <- sub0$INITIATOR_SIDEC*sub0$ACTION_PRT
sub0$SIDED_ANY <- sub0$INITIATOR_SIDED*sub0$ACTION_ANY
sub0$SIDED_IND <- sub0$INITIATOR_SIDED*sub0$ACTION_IND
sub0$SIDED_DIR <- sub0$INITIATOR_SIDED*sub0$ACTION_DIR
sub0$SIDED_PRT <- sub0$INITIATOR_SIDED*sub0$ACTION_PRT
events <- sub0
# EventType
source("Code/step2_eventcode/step2x_event_types_list.R")
types.specific
events0 <- as.data.frame(matrix(0,nrow=nrow(events),ncol=length(types.specific)))
names(events0) <- paste0("ACTION_",types.specific)
events0 <- cbind(data.frame(ID_TEMP=1:nrow(events)),events0)
names(events0)
head(subdata)
events0$ACTION_FIREFIGHT <- 1*(subdata$wpn.fire>0)
events0$ACTION_KIDNAP <- 1*(subdata$kidnap_tot>0)
events0$ACTION_PROPERTY <- 1*(apply(subdata[,c("build_dmg_total", "bdg_des")],1,function(x){sum(x, na.rm=T)>0})|subdata$ben_kek1%in%c(10,14))
events0$ACTION_KILLING <- 1*apply(subdata[,c("kil_total","kil_f")],1,function(x){sum(x, na.rm=T)>0})
events0$ACTION_TERROR <- 1*(subdata$ben_kek1%in%c(9))
events0$ACTION_SIEGE <- 1*(subdata$ben_kek1%in%c(4))
events0$ACTION_ROBBERY <- 1*(subdata$ben_kek1%in%c(14))
events0$ACTION_RIOT <- 1*(subdata$ben_kek1%in%c(5))
events0$ACTION_PROTEST <- 1*(subdata$ben_kek1%in%c(3))
events0$ACTION_RAID <- 1*(subdata$ben_kek1%in%c(6:8,11))
events0$ACTION_STORM <- 1*(subdata$ben_kek1%in%c(2))
head(events0)
# Save
save(events,file=paste0("Output/Output_NVMS/Events/NVMS_Events_",countrycode(cntz,origin = "iso3c",destination = "iso3c"),".RData"))
events0 <- events0[,names(events0)[!names(events0)%in%names(events)]]
save(events0,file=paste0("Output/Output_NVMS/Events/EventType/NVMS_EventType_",disag[j],".RData"))
summary(events)
|
fd714b74d4c55fd57a507e0edc06eae5c763ea69
|
cc88729bf3878baf40307b2c14accbf0767369a3
|
/hw07/analysis.R
|
2a024aae6e82ebd99695d400d37f5880654ad6d2
|
[] |
no_license
|
chbrown/dbml
|
aeabf4cbcbe7728a7fe26985b3322d177c94e1b0
|
cfb68b5cd6e8aa94a952cac6f40eeee86231ae73
|
refs/heads/master
| 2021-01-15T11:28:48.164220
| 2012-04-24T18:03:05
| 2012-04-24T18:03:05
| 3,356,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,103
|
r
|
analysis.R
|
source('~/.R.rc')
library(ggplot2)
library(scales)
setwd('/Volumes/Zooey/Dropbox/ut/machine-learning/hw07')
draw('results/qlearn.s50x10.o20.tsv', '50 x 10, 20 obstacles, e=0.9, g=0.8, a=0.01')
draw('results/qlearn.s50x10.o20.e99.tsv', '50 x 10, 20 obstacles, e=0.99, g=0.8, a=0.01')
draw('results/qlearn.s120x3.o10.tsv', '120 x 3, 10 obstacles, e=0.99, g=0.8, a=0.01')
draw('results/qlearn.s120x3.o10-2sight.tsv', '120 x 3, 10 obstacles, e=0.999, g=0.8, a=0.01, 2-away State Space')
draw('results/qlearn.s40x7.o10.e99-directions.tsv', '40 x 7, 10 obstacles, e=0.99, g=0.8, a=0.001, 2-away State Space')
# in.csv = 'results/qlearn-s40x7-o10-e9-directions-f.tsv'
# results = read.csv(in.csv)
# results = results[1:300,]
draw = function(in.csv, title) {
results = read.csv(in.csv)
p = ggplot(results, aes(x=epoch, y=reward/time, group=module)) +
geom_line(aes(colour=module), alpha=0.4) +
geom_hline(aes(yintercept=0, colour='Obstacle Best')) +
geom_hline(aes(yintercept=1, colour='Finish Best')) +
geom_smooth(aes(colour=module)) +
scale_colour_discrete(name="Modules") +
ylab("Reward / time spent") + xlab("Time")
options = opts(
title=title,
plot.title=theme_text(size=16, face="bold", vjust=1),
axis.title.x=theme_text(face="bold", size=14),
axis.title.y=theme_text(face="bold", size=14, angle=90, vjust=0.5, hjust=0.5),
axis.text.x=theme_text(colour="black", size=12),
axis.text.y=theme_text(colour="black", size=12),
legend.text=theme_text(size=14),
legend.title=theme_text(size=18, hjust=-0.01, face="bold")
)
print(p + options)
out.nodots = sub('.', '-', in.csv)
out.pdf = sub('-csv', '-pdf', out.nodots)
ggsave(out.pdf, width=9, height=5)
}
draw('results/qlearn-s80x5-o10-e0.99-a0.001-g0.8-.csv', '80 x 5, 10 obstacles, e=0.99, g=0.8, a=0.001, High penalties')
draw('results/qlearn-s80x5-o10-e0.99-a0.001-g0.8-low.csv', '80 x 5, 10 obstacles, e=0.99, g=0.8, a=0.001, Low penalties')
draw('results/qlearn-s80x5-o6-high-penalty.csv', '80 x 5, 6 obstacles, e=0.9, g=0.8, a=0.01, High penalties')
|
7898f4947011686ee64100d38a872ff799ac161a
|
7fe986936ea322468b33aec707ad00c5b62a4de4
|
/R/summarizeHisse.R
|
6f533f567280e053f8ee0f866934ee6980b27bd6
|
[] |
no_license
|
thej022214/hisse
|
1ab46f175677f6c28ce6c7803639aa4eae48f571
|
b7ce7d0a9f83122f5620c0b07050f31574b96094
|
refs/heads/master
| 2023-08-03T10:54:41.234175
| 2023-07-27T22:03:23
| 2023-07-27T22:03:23
| 38,690,262
| 4
| 9
| null | 2021-11-22T21:38:03
| 2015-07-07T13:43:34
|
R
|
UTF-8
|
R
| false
| false
| 561
|
r
|
summarizeHisse.R
|
#STUB for code to summarize results for users. Also consider output from plot.hisse.states
#
# all.poly <- ls() #list all output hisse objects
# all.poly <- all.poly[grepl("poly", all.poly)]
# all.poly <- all.poly[-which(all.poly=="poly")]
# all.poly <- all.poly[-which(all.poly=="poly.dat")]
# all.poly.list <- list()
# for (i in sequence(length(all.poly))) {
# all.poly.list[[i]] <- eval(parse(text=all.poly[i]))
# names(all.poly.list)[i] <- all.poly[i]
# }
# all.AICc <- unlist(lapply(all.poly.list, "[[", "AICc"))
# all.AICc <- all.AICc-min(all.AICc)
#
|
9aabbf89606b77f7715c7afadbe323afa2e09ba7
|
94014ad2e9085e73eb3b95e97120ecb30859f088
|
/R/compute_control.R
|
1f8586ad308a1c74884c55b5ef9de8880b4d699d
|
[] |
no_license
|
aarzalluz/scfilters
|
e491e42580c78c187dbc007629656b5768617b58
|
6c200fb49309003f147879787d6cc423931e61ff
|
refs/heads/master
| 2021-08-23T21:05:15.837834
| 2016-08-07T17:07:39
| 2016-08-07T17:07:39
| 60,845,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,716
|
r
|
compute_control.R
|
#' Generate a negative control.
#'
#' Computes correlations for all the randomized windows against a window of choice.
#'
#' \code{compute_control} only correlates the set of randomized windows to one of the windows
#' into which the data set was divided. Therefore, it needs to be called once for each window.
#'
#' The correlation vector for each random is generated by iterating the genes in it and
#' correlating them to those in the selected window. As a result, there will be as many
#' correlation values in the vector as genes in the top window. At the same time, the output
#' will have as many elements as randomized versions of the top window have been computed.
#' Consequently, both top window size and number of randomizations impact the computation speed
#' of the process.
#'
#' The correlation method argument is passed on to the \code{cor} function, in the \code{stats}
#' package, and therefore, the same options as this function provides are available. However, it
#' is adviseable to use pearson correlation, since it presents the most advantageous balance of
#' result quality and computational efficiency.
#'
#' @param randomized_windows A list containing as many elements as randomized top windows
#' have been computed.
#'
#' @param dataset A data frame containing the binned data.
#'
#' @param window_number An integer indicating the bin for which the control is being computed.
#'
#' @param cor_method A string indicating the type of correlation to use.
#'
#' @return A list containing the negative control: a vector of correlations corresponding to
#' correlating each randomized window to the chosen window in the data.
compute_control <- function(randomized_windows, dataset, window_number, cor_method){
# select a window from the actual data and extract only expression values
selected_window <- subset(dataset, dataset$bin == window_number)
selected_window <- select(selected_window, -mean, -CV, -stdev, -bin)
all_correlations <- list()
for (i in seq_len(length(randomized_windows))){
# create empty list to store sub-calculations
window_correlations <- list()
# extract each randomized window
selected_random <- randomized_windows[[i]] %>% as.matrix()
for (j in seq_len(nrow(randomized_windows[[i]]))){
# compute correlation for each gene in the selected random window
# against all genes in the actual data window
window_correlations[[j]] <- cor(selected_random[j,], t(selected_window),
method = cor_method) %>% as.vector()
}
all_correlations[[i]] <- do.call(c, window_correlations)
}
return(all_correlations)
}
|
1f21f99fe7e53297fe078fa6eac64b0348d4155c
|
2807d9d3515aadba1d63d306ca7106913f6174f1
|
/man/tt.Rd
|
650a8ce9e94d8f01a27c986018af2b997b895b27
|
[
"MIT"
] |
permissive
|
oneilsh/tidytensor
|
09fca1a33aab4001f9805c98824725e333f2e09f
|
14f5b87d2dfae20eb35cfd974adf660a4fd89980
|
refs/heads/master
| 2021-10-12T02:18:35.630630
| 2021-10-07T21:00:59
| 2021-10-07T21:00:59
| 160,564,655
| 5
| 1
|
NOASSERTION
| 2021-10-07T20:50:49
| 2018-12-05T18:58:21
|
R
|
UTF-8
|
R
| false
| true
| 1,421
|
rd
|
tt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/as.tidytensor.R
\name{tt}
\alias{tt}
\title{Convert a vector, matrix, or array to a tidytensor type.}
\usage{
tt(x, ...)
}
\arguments{
\item{x}{input to convert to a tidytensor.}
\item{...}{additional arguments to be passed to or from methods (ignored).}
}
\value{
a new tidytensor.
}
\description{
\code{tt()} is a convenience shorthand for \code{as.tidytensor()}. Given a vector, matrix, or array, returns a tidytensor.
If given a vector, converts to a 1-d array supporting \code{dim()}, matrices are left as matrices,
and in all cases the class 'tidytensor' is added.
}
\details{
Matrices are synonymous with 2-d arrays, so these are left as is. Vectors are converted
to 1-d arrays so that they can support \code{dim()}.
}
\examples{
# From an array (representing e.g. 30 26x26 images (30 sets of 26 rows of 26 pixels))
a <- array(rnorm(30 * 26 * 26), dim = c(30, 26, 26))
t <- tt(a)
ranknames(t) <- c("sample", "row", "pixel")
print(t)
# From a matrix (representing e.g. a 26x26 image (26 rows of 26 pixels)) using \%>\%
library(magrittr)
t <- matrix(rnorm(26 * 26), nrow = 26, ncol = 26) \%>\% tt()
ranknames(t) <- c("row", "pixel")
print(t)
# From a vector (representing e.g. 26 pixel values)
v <- rnorm(26)
t <- tt(rnorm(26))
ranknames(t) <- c("pixel")
print(t)
}
\seealso{
\code{\link{print.tidytensor}}, \code{\link{ranknames}}.
}
|
254fa694c36bdec66a0081877ddb3de25f5908dd
|
a1a21de1a0f0066c1d2ccc7aa15640ed8bdbd86d
|
/scripts/italy_mobility_data.R
|
cd998e93675ec888c69ce6fc50a2620709841bb7
|
[
"MIT"
] |
permissive
|
TheEconomist/covid-19-italy-herd-immunity-GD
|
7db1e81cbef09f19bd3995e62ca767fa71175312
|
a4aacc3057ff15e5eaec060fe55f29cf265ff112
|
refs/heads/main
| 2023-01-03T02:02:56.647574
| 2020-10-28T19:47:13
| 2020-10-28T19:47:13
| 308,123,994
| 8
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,775
|
r
|
italy_mobility_data.R
|
### Italy mobility data:
## Load mobility data (in logic because quite a large file):
if(!exists("mobility")){
library(readr)
mobility <- read_csv("https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv")
mobility$country_region[mobility$country_region == "The Bahamas"] <- "Bahamas"
mobility$country_region[mobility$country_region == "Côte d'Ivoire"] <- "Cote d'Ivoire"
mobility$country_region[mobility$country_region == "Cape Verde"] <- "Cabo Verde"
mobility$country_region[mobility$country_region == "Myanmar (Burma)"] <- "Myanmar"
mobility$country_region[mobility$country_region == "Antigua and Barbuda"] <- "Antigua & Barbuda"
mobility$country_region[mobility$country_region == "United Kingdom"] <- "Britain"
mobility$country_region[mobility$country_region == "Trinidad and Tobago"] <- "Trinidad & Tobago"
mobility <- data.frame(mobility)
mobility$date <- as.Date(mobility$date)
}
# Function to prep mobility data:
prep_mobility <- function(dat, collapse.regions = T){
# Get region identifier:
dat$id <- paste0(dat$country_region, "_", dat$sub_region_1, "_", dat$sub_region_2, "_", dat$metro_area)
if(collapse.regions){
# Take means within regions-dates:
for(i in grep("change_from_baseline", colnames(dat), value = T)){
dat[, i] <- ave(dat[, i], paste0(dat$id, "_", as.numeric(dat$date)), FUN = function(x) mean(x, na.rm = T))
}
# Get unique region-dates:
dat <- dat[!duplicated(paste0(dat$id, "_", as.numeric(dat$date))), ]
}
# Calculate mobility for a few combined categories:
dat$mob_all_except_residential_change_from_baseline <- rowMeans(dat[, c(
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline")], na.rm = T)
dat$mob_transit_work_change_from_baseline <- rowMeans(dat[, c("transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline")], na.rm = T)
dat$mob_transit_work_grocert_pharmacy_retail_change_from_baseline <- rowMeans(dat[, c(
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline")], na.rm = T)
# Sort by date
dat <- dat[order(dat$date), ]
# Calculate 7-day moving average
fun_7dma <- function(x){
unlist(lapply(1:length(x), FUN = function(i){
mean(x[max(c(1,i-3)):min(c(length(x), i+3))], na.rm = T)
}))
}
dat <- data.frame(dat)
for(i in grep("baseline", colnames(dat), value = T)){
dat[, paste0(i, "_7dma")] <- ave(dat[, i], dat$id, FUN = function(x) fun_7dma(x))
}
# Remove if missing:
# for(j in grep("baseline", colnames(mobility))){
# dat[is.na(dat[, j]), grep("baseline", setdiff(colnames(dat), colnames(mobility)), value = T)] <- NA
#}
# Plot for level below national:
dat$level <- "National"
dat$level[!is.na(dat$sub_region_1) & is.na(dat$sub_region_2)] <- "Sub-national"
dat$level[!is.na(dat$sub_region_2)] <- "Sub-sub-national"
return(dat)}
# Restrict to a few countries:
dat <- prep_mobility(mobility[mobility$country_region %in% c("Italy", "Britain", "Spain", "France", "Germany", "Sweden", "Denmark"), ])
# Note: We here collapsing areas within regions through a simple average - below we take the proper average by population, which depends on identifying the superregions by iso code.
# Plot these countries:
library(ggplot2)
ggplot(dat[dat$level == "Sub-national", ], aes(x=date, y=mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma, col = sub_region_1))+geom_line()+facet_grid(.~country_region)+theme_minimal()+theme(legend.position = "none")+ylab("Mobility, change from baseline\n(excepting parks and residencies)")+xlab("")
ggsave("plots/mobility_in_italy_and_comparative_countries.png", width = 6, height = 4)
write.csv(dat, "output-data/mobility_in_italy_and_comparative_countries.csv")
# Restrict to a Lombardy:
lom <- prep_mobility(mobility[mobility$sub_region_1 == "Lombardy", ],
collapse.regions = FALSE)
# Calculate 7-day moving average by iso:
fun_7dma <- function(x){
unlist(lapply(1:length(x), FUN = function(i){
mean(x[max(c(1,i-3)):min(c(length(x), i+3))], na.rm = T)
}))
}
lom[, "mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma"] <- ave(lom[, "mob_transit_work_grocert_pharmacy_retail_change_from_baseline"], lom$iso_3166_2_code, FUN = function(x) fun_7dma(x))
lom <- lom[!is.na(lom$iso_3166_2_code), ]
# Dropping Sondrio province, which unfortunately had extensive missing data problems:
lom <- lom[lom$iso_3166_2_code != "IT-SO", ]
ggplot(lom, aes(x=date, y=mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma, col = iso_3166_2_code))+geom_line()+theme_minimal()+theme(legend.position = "bottom", legend.title = element_blank())+xlab("Mobility")+ylab("Mobility compared to baseline")+geom_line(data = lom[lom$iso_3166_2_code == "IT-BG", ], size =2, alpha= 0.5)
ggsave("plots/mobility_in_lombardy.png", width = 6, height = 4)
write.csv(lom, "output-data/mobility_in_lombardy.csv")
# Calculate average for Italian regions except Lombardy (iso restriction selects region averages):
it <- prep_mobility(mobility[mobility$country_region == "Italy" & mobility$iso_3166_2_code %in%
c("IT-21", "IT-23", "IT-25", "IT-32", "IT-34", "IT-36", "IT-42", "IT-45",
"IT-52", "IT-55", "IT-57", "IT-62", "IT-65", "IT-67", "IT-72", "IT-75",
"IT-77", "IT-78", "IT-82", "IT-88"), ], collapse.regions = F)
# Check to make sure this is all 15 regions and 5 autonomous regions:
if(length(unique(it$sub_region_1)) != 20){
stop("missing regions")
}
# Plot by provinces:
ggplot(it[, ], aes(x=date, y=mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma, col = sub_region_1))+geom_line()+geom_line(data = it[it$iso_3166_2_code == "IT-25", ], size = 2, alpha = 0.2)+theme_minimal()+theme(legend.position = "bottom", legend.title = element_blank())+xlab("Mobility")+ylab("Mobility compared to baseline\n(Lombardy highlighted)")
ggsave("plots/mobility_in_italy.png", width = 10, height = 5)
# Add population in millions:
it$pop <- NA
it$pop[it$sub_region_1 == "Lombardy"] <- 10.06
it$pop[it$sub_region_1 == "Lazio"] <- 5.88
it$pop[it$sub_region_1 == "Campania"] <- 5.80
it$pop[it$sub_region_1 == "Sicily"] <- 5.00
it$pop[it$sub_region_1 == "Veneto"] <- 4.90
it$pop[it$sub_region_1 == "Emilia-Romagna"] <- 4.46
it$pop[it$sub_region_1 == "Piedmont"] <- 4.36
it$pop[it$sub_region_1 == "Apulia"] <- 4.03
it$pop[it$sub_region_1 == "Tuscany"] <- 3.73
it$pop[it$sub_region_1 == "Calabria"] <- 1.95
it$pop[it$sub_region_1 == "Sardinia"] <- 1.64
it$pop[it$sub_region_1 == "Liguria"] <- 1.55
it$pop[it$sub_region_1 == "Marche"] <- 1.53
it$pop[it$sub_region_1 == "Abruzzo"] <- 1.31
it$pop[it$sub_region_1 == "Friuli-Venezia Giulia"] <- 1.21
it$pop[it$sub_region_1 == "Trentino-South Tyrol"] <- 1.07
it$pop[it$sub_region_1 == "Umbria"] <- 0.88
it$pop[it$sub_region_1 == "Basilicata"] <- 0.56
it$pop[it$sub_region_1 == "Molise"] <- 0.31
it$pop[it$sub_region_1 == "Aosta"] <- 0.13
it_except_lom <- it[it$sub_region_1 != "Lombardy" & !is.na(it$sub_region_1), ]
total_pop_except_lom <- sum(it_except_lom[it_except_lom$date == as.Date("2020-06-01"), "pop"])
# Calculate population-weighted average:
for(i in grep("change_from_baseline", colnames(it_except_lom), value = T)){
# it_except_lom[, paste0(i, "_raw_mean")] <- ave(it_except_lom[, i], it_except_lom$date, FUN = mean)
it_except_lom[, i] <- ave(it_except_lom[, i]*it_except_lom$pop/total_pop_except_lom, it_except_lom$date, FUN = sum)
}
it_except_lom$sub_region_1 <- it_except_lom$iso_3166_2_code <- "Italy, excepting Lombardy"
it_except_lom <- it_except_lom[!duplicated(it_except_lom$date), ]
ggplot(it_except_lom, aes(x=date, col = sub_region_1))+geom_line(aes(y=mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma, ))+theme_minimal()+theme(legend.position = "bottom", legend.title = element_blank())+xlab("Mobility")+ylab("Mobility compared to baseline")
# Merge this into lombardy dataset:
lom_plus <- rbind(lom, it_except_lom[, colnames(lom)])
lom_plus$name <- lom_plus$iso_3166_2_code
lom_plus$name[lom_plus$iso_3166_2_code == "IT-BG"] <- "Bergamo"
write.csv(lom_plus, "output-data/mobility_in_lombardy_plus_elsewhere_in_italy.csv")
library(readr)
lom_plus <- read_csv("output-data/mobility_in_lombardy_plus_elsewhere_in_italy.csv")
ggplot(lom_plus, aes(x=date, y=mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma,
group = name,
col = "Lombardian provinces"))+
geom_line()+
geom_line(data = lom_plus[lom_plus$name == "Bergamo", ], aes(col = "Bergamo"), size = 2, alpha = 0.5)+
geom_line(data = lom_plus[lom_plus$name == "Italy, excepting Lombardy", ], aes(col = "Italy, population-weighted average outside Lombardy"), size = 2, alpha = 0.5)+
theme_minimal()+
theme(legend.position = "bottom", legend.title = element_blank())+
ylab("Mobility compared to baseline")+
ggtitle("People stopped moving around in all parts of Lombardy, regardless of outbreak severity")
ggsave("plots/mobility_in_lombardy_plus_elsewhere_in_italy.png", width = 6, height = 4)
# Number quoted in text:
mean(it$mob_transit_work_grocert_pharmacy_retail_change_from_baseline_7dma[it$iso_3166_2_code == "IT-25" & it$date >= as.Date("2020-07-01") & it$date < as.Date("2020-08-01")], na.rm = T) # decline in Lombardy
|
45a8721538ce13c6c0e9a56c1b5c72c9390aa81e
|
785a0b85d6666e3ec45e76bbc79075473f29467c
|
/man/kdisteuclid.Rd
|
6847445705b62c02657b335821d882fe75ee34d0
|
[] |
no_license
|
ashkanfa/ade4
|
0a30a0efff3007cbf2e2a33ba704cc1335231d92
|
53a16c2bb281c6f367a03cf4ac472c9ada030c62
|
refs/heads/master
| 2021-01-01T07:36:49.682809
| 2020-02-03T12:13:31
| 2020-02-03T12:13:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,125
|
rd
|
kdisteuclid.Rd
|
\name{kdisteuclid}
\alias{kdisteuclid}
\title{a way to obtain Euclidean distance matrices}
\description{
a way to obtain Euclidean distance matrices
}
\usage{
kdisteuclid(obj, method = c("lingoes", "cailliez", "quasi"))
}
\arguments{
\item{obj}{an object of class \code{kdist}}
\item{method}{a method to convert a distance matrix in a Euclidean one}
}
\value{
returns an object of class \code{kdist} with all distances Euclidean.
}
\references{
Gower, J.C. and Legendre, P. (1986) Metric and Euclidean properties of dissimilarity coefficients. \emph{Journal of Classification}, \bold{3}, 5--48.
Cailliez, F. (1983) The analytical solution of the additive constant problem. \emph{Psychometrika}, \bold{48}, 305--310.
Lingoes, J.C. (1971) Somme boundary conditions for a monotone analysis of symmetric matrices. \emph{Psychometrika}, \bold{36}, 195--203.
Legendre, P. and Anderson, M.J. (1999) Distance-based redundancy analysis: testing multispecies responses in multifactorial ecological experiments. \emph{Ecological Monographs}, \bold{69}, 1--24.
Legendre, P., and L. Legendre. (1998) Numerical ecology, 2nd English edition edition. Elsevier Science BV, Amsterdam.
}
\author{
Daniel Chessel \cr
Stéphane Dray \email{stephane.dray@univ-lyon1.fr}
}
\note{according to the program DistPCoa of P. Legendre and M.J. Anderson\cr
\url{http://www.fas.umontreal.ca/BIOL/Casgrain/en/labo/distpcoa.html}
}
\examples{
w <- c(0.8, 0.8, 0.377350269, 0.8, 0.377350269, 0.377350269) # see ref.
w <- kdist(w)
w1 <- c(kdisteuclid(kdist(w), "lingoes"), kdisteuclid(kdist(w), "cailliez"),
kdisteuclid(kdist(w), "quasi"))
print(w, print = TRUE)
print(w1, print = TRUE)
data(eurodist)
par(mfrow = c(1, 3))
eu1 <- kdist(eurodist) # an object of class 'dist'
plot(data.frame(unclass(c(eu1, kdisteuclid(eu1, "quasi")))), asp = 1)
title(main = "Quasi")
abline(0,1)
plot(data.frame(unclass(c(eu1, kdisteuclid(eu1, "lingoes")))), asp = 1)
title(main = "Lingoes")
abline(0,1)
plot(data.frame(unclass(c(eu1, kdisteuclid(eu1, "cailliez")))), asp = 1)
title(main = "Cailliez")
abline(0,1)
}
\keyword{multivariate}
\keyword{utilities}
|
59a2bb5646c5c2e02c3a33e9d3a7351d1c65bb3c
|
bf378a66012b6470250c2c9b1e8aa9fd33c67da9
|
/R20190805_15.R
|
faee8a854bdcaa7f015d0d69ec85271b2394f08d
|
[] |
no_license
|
meenzoon/R-programming
|
baa30902c9ca232b00f62c988c13e59de97109b8
|
9065fb9f7168b5487dc314a1a82c4456fd981ee2
|
refs/heads/master
| 2022-01-21T02:11:42.853224
| 2019-08-12T08:51:24
| 2019-08-12T08:51:24
| 198,178,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,051
|
r
|
R20190805_15.R
|
# 15일차 수업 - 20190805(월)
library(dplyr)
library(ggplot2)
library(readxl)
library(xlsx)
welfare <- write.xlsx(welfare, file = "c:/study/data2/welfare.xlsx")
welfare <- read_excel("c:/study/data1/welfare.xlsx", col_names = T)
View(welfare)
##########
### 3-2번째 프로젝트 - 연령대별 월급의 차이 (20대, 30대, 40대, 50대, 60대, 70대)
# < 1단계 > 변수 검토 및 전처리 (연령대, 월급)
# 1-1. 연령대 변수 확인 -> age로부터 파생변수로 생성
# ageg2 - (20대, 30대, 40대, 50대, 60대, 70대)의 6개 분류, 이 외의 값은 NA 처리
welfare$ageg2 <- ifelse(welfare$age >= 20 & welfare$age < 30, "20대",
ifelse(welfare$age >= 30 & welfare$age < 40, "30대",
ifelse(welfare$age >= 40 & welfare$age < 50, "40대",
ifelse(welfare$age >= 50 & welfare$age < 60, "50대",
ifelse(welfare$age >= 60 & welfare$age < 70, "60대",
ifelse(welfare$age >= 70 & welfare$age < 80, "70대", NA))))))
# 1-2. 월급 변수 확인 - 1번째 프로젝트에서 이미 확인
# < 2단계 > 분석표(통계요약표)
# 2. 연령대별로 월급의 평균의 차이
ageg2_income <- welfare %>%
filter(!is.na(income) & !is.na(ageg2)) %>%
group_by(ageg2) %>%
summarise(mean_income = mean(income))
ageg2_income
# < 3단계 > 시각화 - 막대 그래프
ggplot(data = ageg2_income, aes(x = ageg2, y = mean_income)) + geom_col()
# < 4단계 > 분석 결과
# 분석 결과 : 20대의 평균 월급은 189만원, 30대는 287만원, 40대는 가장 많은 345만원, 50대는 319만원, 60대는 198만원의 평균 월급을 받으며, 70대는 76만원의 평균 월급으로 20대의 절반이 되지 않는 가장 적은 월급을 받게 됨을 알 수 있다.
##########
### 4번째 프로젝트 - 연령대 및 성별 월급 차이
# < 1단계 > 변수 확인 및 전처리 (연령대, 성별, 월급)
# 1-1. 연령대 변수 확인 - 3번째 프로젝트에서 이미 확인
# 1-2. 성별 변수 확인 - 1번째 프로젝트에서 이미 확인
# 1-3. 월급 변수 확인 - 1번째 프로젝트에서 이미 확인
# < 2단계 > 분석표(통계요약표)
# 연령대, 성별로 그룹화하여 월급의 평균의 차이
ageg_sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg, sex) %>%
summarise(mean_income = mean(income))
ageg_sex_income
# < 3단계 > 시각화
# 누적 막대 그래프
ggplot(data = ageg_sex_income, aes(x = ageg, y = mean_income, fill = sex)) + geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
# 성별을 따로 막대 그래프로 표현
ggplot(data = ageg_sex_income, aes(x = ageg, y = mean_income, fill = sex)) + geom_col(position = "dodge") +
scale_x_discrete(limits = c("young", "middle", "old"))
# < 4단계 > 분석 결과
# 분석 결과 : 초년에는 남성이 206만원, 여성이 178만원으로 조금 밖에 차이가 나지 않지만, 중년이 되면 남성이 400만원, 여성이 220만원으로 거의 두 배 가까이 차이가 나게 되며, 노년에는 남성 203만원, 여성이 84만원으로 평균 월급은 절반으로 줄어들게 되며, 남녀의 월급 차이는 두 배가 넘게 되는 것을 알 수 있다.
##########
### 5번째 프로젝트 - 나이별 성별 월급 차이
# < 1단계 > 변수 검토 및 전처리 (나이, 성별, 월급)
# 1-1. 나이 변수 확인 - 2번째 프로젝트에서 이미 확인
# 1-2. 성별 변수 확인 - 1번째 프로젝트에서 이미 확인
# 1-3. 월급 변수 확인 - 1번째 프로젝트에서 이미 확인
# < 2단계 > 분석표(통계요약표)
# 2. 나이별로 성별로 그룹화하여 평균 월급의 차이
age_sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(age, sex) %>%
summarise(mean_income = mean(income))
View(age_sex_income)
# < 3단계 > 시각화 - 선 그래프
ggplot(data = age_sex_income, aes(x = age, y = mean_income, col = sex)) + geom_line() + geom_point()
# < 4단계 > 분석 결과
# 분석 결과 : 남성의 월급은 50대 후반까지 지속적으로 증가하다가, 이후 급격한 감소를 보이는 반면에, 여성은 30대 초반까지 지속적으로 증가하다가, 50대 초반까지 서서히 감소하게 되며, 이후 급격하게 감소하게 된다. 그리고, 80세가 되면 남녀가 비슷한 월급을 받게 된다.
##########
### 6번째 프로젝트 - 직업별 월급 차이 (상위 10개, 하위 10개)
# < 1단계 > 변수 검토 및 전처리 (직업 코드, 월급)
# 1-1. 직업 코드 변수 확인, 직업 코드 변수로 부터 직업 파생 변수를 생성
# 직종코드표를 데이터프레임으로 생성
list_job <- read_excel("c:/study/data1/Koweps_Codebook.xlsx", col_names = T, sheet = 2)
View(list_job)
# welfare와 list_job 데이터 가로 결합(조인)하여 job이라는 파생 변수를 생성
welfare <- left_join(welfare, list_job, id = "code_job")
# 1-2. 월급 변수 확인 - 1번째 프로젝트에서 이미 확인
# < 2단계 > 분석표(통계요약표)
# 2-1. 직업별로 월급의 평균
job_income <- welfare %>%
filter(!is.na(income) & !is.na(job)) %>%
group_by(job) %>%
summarise(mean_income = mean(income))
View(job_income)
# 2-2. 상위 10개
job_income_top10 <- job_income %>%
arrange(-mean_income) %>% head(10)
job_income_top10
# 2-3. 하위 10개
job_income_bottom10 <- job_income %>%
arrange(mean_income) %>% head(10)
job_income_bottom10
# < 3단계 > 시각화 - 가로 막대 그래프
# 3-1. 상위 10개
ggplot(data = job_income_top10, aes(x = reorder(job, mean_income), y = mean_income)) +
geom_col() +
coord_flip()
# 3-2. 하위 10개
ggplot(data = job_income_bottom10, aes(x = reorder(job, -mean_income), y = mean_income)) +
geom_col() +
coord_flip()
# < 4단계 > 분석 결과
# 4-1. 상위 10개 분석 결과 : '보험 및 금융 관리자'가 822만원으로 가장 많은 월급을 받고, 그 다음으로는 '의회의원 고위공무원 및 공공단체임원', '인사 및 경영 전문가', '연구 교육 및 법률 관련 관리자', '제관원 및 판금원', '의료진료 전문가' 순으로 많은 월급을 받게 된다.
# 4-2. 하위 10개 분석 결과 : '기타 서비스관련 단순 종사원'이 80만원으로 가장 적은 월급을 받고, 그 다음으로는 '청소원 및 환경 미화원', '가사 및 육아 도우미', '의료 복지 관련 서비스 종사자', '축산 및 사육 관련 종사자', '음식관련 단순 종사원', '판매관련 단순 종사원' 순으로 적은 월급을 받게 된다.
# 4-3. 전체 분석 결과 : 가장 많은 월급을 받는 '보험 및 금융 관리자'가 822만원으로 가장 적은 월급을 받는 '기타 서비스관련 단순 종사원'의 80만원보다 거의 10배 가까운 월급을 받고 있음을 알 수 있다.
|
79d56ae46d2547ad584f4c023152eaaf9bc49ff6
|
2e40356ee4211f06d867cb38a3e5adce0b971943
|
/pedaco de codigo sobre webdriver e phantom.R
|
34773cdf0f1ad25cfe2329fb080689f3a5d030f4
|
[] |
no_license
|
DATAUNIRIO/SER_III_WebScraping
|
02bac8494d9c038d68f5b8065bca9a1bb821d4fb
|
86f1b97049ed083e48d2d5bb5e8a6da00526ee6a
|
refs/heads/master
| 2021-06-11T11:33:51.695437
| 2021-03-10T17:51:18
| 2021-03-10T17:51:18
| 134,454,007
| 0
| 0
| null | 2018-05-22T17:50:57
| 2018-05-22T17:50:57
| null |
UTF-8
|
R
| false
| false
| 676
|
r
|
pedaco de codigo sobre webdriver e phantom.R
|
#install.packages("webdriver")
library(webdriver)
#?webdriver::install_phantomjs
#install_phantomjs(version = "2.1.1",baseURL = "https://github.com/wch/webshot/releases/download/v0.3.1/")
pjs <- run_phantomjs()
pjs
ses <- Session$new(port = pjs$port)
ses
ses$go("http://www.unirio.br/")
ses$getUrl()
ses$getTitle()
ses$takeScreenshot()
titulo <- ses$findElement(".tileHeadline")
titulo$getName()
titulo$getText()
texto <- ses$findElement(".tileBody")
texto$getText()
texto2 <- ses$findElement(".description")
texto2$getText()
texto2$click()
texto2$takeScreenshot()
titulo2 <- ses$findElement(".url")
titulo2$getName()
titulo2$getText()
titulo2 <- ses$findElements()
|
36e2ca1065067c0a79549b010f04713ea4e322d0
|
7afbb148ec11b3105aaead6bdd900f847e49eb18
|
/man/step_impute_mean.Rd
|
ddf5a866f3d805ccd6149ae65e234e9f63b78330
|
[
"MIT"
] |
permissive
|
tidymodels/recipes
|
88135cc131b4ff538a670d956cf6622fa8440639
|
eb12d1818397ad8780fdfd13ea14d0839fbb44bd
|
refs/heads/main
| 2023-08-15T18:12:46.038289
| 2023-08-11T12:32:05
| 2023-08-11T12:32:05
| 76,614,863
| 383
| 123
|
NOASSERTION
| 2023-08-26T13:43:51
| 2016-12-16T02:40:24
|
R
|
UTF-8
|
R
| false
| true
| 4,246
|
rd
|
step_impute_mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/impute_mean.R
\name{step_impute_mean}
\alias{step_impute_mean}
\alias{step_meanimpute}
\title{Impute numeric data using the mean}
\usage{
step_impute_mean(
recipe,
...,
role = NA,
trained = FALSE,
means = NULL,
trim = 0,
skip = FALSE,
id = rand_id("impute_mean")
)
step_meanimpute(
recipe,
...,
role = NA,
trained = FALSE,
means = NULL,
trim = 0,
skip = FALSE,
id = rand_id("impute_mean")
)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the
sequence of operations for this recipe.}
\item{...}{One or more selector functions to choose variables
for this step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Not used by this step since no new variables are
created.}
\item{trained}{A logical to indicate if the quantities for
preprocessing have been estimated.}
\item{means}{A named numeric vector of means. This is \code{NULL} until computed
by \code{\link[=prep]{prep()}}. Note that, if the original data are integers, the mean
will be converted to an integer to maintain the same data type.}
\item{trim}{The fraction (0 to 0.5) of observations to be trimmed from each
end of the variables before the mean is computed. Values of trim outside
that range are taken as the nearest endpoint.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[=bake]{bake()}}? While all operations are baked
when \code{\link[=prep]{prep()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations.}
\item{id}{A character string that is unique to this step to identify it.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of any existing operations.
}
\description{
\code{step_impute_mean()} creates a \emph{specification} of a recipe step that will
substitute missing values of numeric variables by the training set mean of
those variables.
}
\details{
\code{step_impute_mean} estimates the variable means from the data used
in the \code{training} argument of \code{prep.recipe}. \code{bake.recipe} then applies the
new values to new data sets using these averages.
As of \code{recipes} 0.1.16, this function name changed from \code{step_meanimpute()}
to \code{step_impute_mean()}.
}
\section{Tidying}{
When you \code{\link[=tidy.recipe]{tidy()}} this step, a tibble with columns
\code{terms} (the selectors or variables selected) and \code{model} (the mean
value) is returned.
}
\section{Tuning Parameters}{
This step has 1 tuning parameters:
\itemize{
\item \code{trim}: Amount of Trimming (type: double, default: 0)
}
}
\section{Case weights}{
This step performs an unsupervised operation that can utilize case weights.
As a result, case weights are only used with frequency weights. For more
information, see the documentation in \link{case_weights} and the examples on
\code{tidymodels.org}.
}
\examples{
\dontshow{if (rlang::is_installed("modeldata")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
data("credit_data", package = "modeldata")
## missing data per column
vapply(credit_data, function(x) mean(is.na(x)), c(num = 0))
set.seed(342)
in_training <- sample(1:nrow(credit_data), 2000)
credit_tr <- credit_data[in_training, ]
credit_te <- credit_data[-in_training, ]
missing_examples <- c(14, 394, 565)
rec <- recipe(Price ~ ., data = credit_tr)
impute_rec <- rec \%>\%
step_impute_mean(Income, Assets, Debt)
imp_models <- prep(impute_rec, training = credit_tr)
imputed_te <- bake(imp_models, new_data = credit_te, everything())
credit_te[missing_examples, ]
imputed_te[missing_examples, names(credit_te)]
tidy(impute_rec, number = 1)
tidy(imp_models, number = 1)
\dontshow{\}) # examplesIf}
}
\seealso{
Other imputation steps:
\code{\link{step_impute_bag}()},
\code{\link{step_impute_knn}()},
\code{\link{step_impute_linear}()},
\code{\link{step_impute_lower}()},
\code{\link{step_impute_median}()},
\code{\link{step_impute_mode}()},
\code{\link{step_impute_roll}()}
}
\concept{imputation steps}
|
6734b9c7deb2ddffd47c0a49d0f41719ed6e2385
|
e6883422461e927a8ec20a096667ee6136c383e0
|
/src/Tools/SLiMRunGen/build/slim_job.R
|
02c51ddf10c87f3e7cd1fb90cd8f67e3f5dd5ad4
|
[
"CC0-1.0"
] |
permissive
|
sknief/PolygenicSLiMBook
|
02c5be9007fde8cafd67520f07917240df28d89a
|
be60cda5e0ac11fe6749a62b9fd67ed92039485e
|
refs/heads/main
| 2023-06-26T04:23:59.536363
| 2021-07-26T13:49:10
| 2021-07-26T13:49:10
| 389,590,139
| 0
| 0
|
CC0-1.0
| 2021-07-26T10:11:44
| 2021-07-26T10:11:43
| null |
UTF-8
|
R
| false
| false
| 601
|
r
|
slim_job.R
|
# Code generated by SLiM Runner: https://github.com/nobrien97/PolygenicSLiMBook/tree/main/src/Tools/SLiMRunGen
USER <- Sys.getenv('USER')
library(foreach)
library(doParallel)
library(future)
cl <- makeCluster(future::availableCores())
registerDoParallel(cl)
seeds <- read.csv("seeds.csv", header = T)
combos <- read.csv("combos.csv", header = T)
foreach(i=1:nrow(combos)) %:%
foreach(j=seeds$Seed) %dopar% {
slim_out <- system(sprintf("/home/$USER/SLiM/slim -s %s -d s=%s -d mig_rate=%s ~/Desktop/slimrun.slim", as.character(j), combos[i,]$s, combos[i,]$mig_rate, intern=T))
}
stopCluster(cl)
|
8ab6e04b9aec12d303d2246687af6b7623ca1fc5
|
ae6c3f262f9577aa3fc0cf1f0e926dc51bd26232
|
/ui.R
|
24bd530fd80d1acd3bae6141b71660d9d8ce81d0
|
[] |
no_license
|
ksidagam/DevelopingDataProducts_ShinyProject
|
5f23237a553fc426268c99093f7d5cb5abd2fc89
|
f0bea7b9dec8a32cdd3767acad22106c49fd9d53
|
refs/heads/master
| 2016-09-05T14:10:09.439909
| 2015-07-26T13:09:27
| 2015-07-26T13:09:27
| 39,725,721
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
ui.R
|
shinyUI(pageWithSidebar(
headerPanel("Equipment Performance Calculation"),
sidebarPanel(
textInput(inputId="text1", label = "Actual Throughput"),
textInput(inputId="text2", label = "Optimum Throughput"),
p('Actual Throughput'), textOutput('text1'),
p('Optimum Throughput'), textOutput('text2'),
p('Equipment Performance'), textOutput('text3'),
strong('Equipment Performance Rate (%)'), textOutput('text4'),
textOutput('text5')),
mainPanel(
h3("Summary"),
p("This App should be useful to find the Performance rate of the Equipment in Manufacturing Plant"),
br(),
p("For Example let us take an example of Manufacturing Plant,"),
p("Plant Operator what to find's out what is the Performance rate of the Equipment in Manufacturing Plant "),
p("The performance rate is derived from production data. Performance rate compares the speed at which the unit was operated to the rated design speed."),
br(),
strong("Actual Throughput (or) Equipment Operating Time = Equipment Loading Time - Equipment Unplanned Downtime"),
br(),
br(),
strong("Optimum Throughput (or) Equipment Net Operating Time = Equipment Operating Time - Equipment Total Speed Loss"),
br(),
br(),
strong("Performance Rate = Actual Throughput/Optimum Throughput *100")
)))
|
32c3e382aa1d8e312f26b536eb9d42cf6f0e0f59
|
8b5a54ceb71941ac62d5fbf3cc0fdb409e68640d
|
/plot 1.R
|
250b16350d708f218408c67ffba6b90dc2ca51fb
|
[] |
no_license
|
wx5157/ExData_Plotting1
|
b5789972a56bc713ab1646b73698fa5879fc1736
|
7c558feb7055a73851856b346c75965a15f52a7b
|
refs/heads/master
| 2021-01-10T22:54:31.054624
| 2016-09-25T02:38:54
| 2016-09-25T02:38:54
| 69,136,264
| 0
| 0
| null | 2016-09-25T01:02:10
| 2016-09-25T01:02:10
| null |
UTF-8
|
R
| false
| false
| 844
|
r
|
plot 1.R
|
setwd("C:\\Users\\wx5157\\Desktop\\Data Science Training\\Coursera\\Course 4 EDA")
## Read in the Data Only for the time period of interest##
data=read.table("household_power_consumption.txt", header=F, sep=";", stringsAsFactors = F, skip=66637, nrows=2880)
head(data)
str(data)
names=read.table("household_power_consumption.txt", header=F, sep=";", stringsAsFactors = F, nrows=1)
colnames(data) = names[1,]
## Convert the date & time column into 1 datetime column
data$datetime = paste(data$Date, data$Time, sep = "-")
data$datetime = strptime(data$datetime, "%d/%m/%Y-%H:%M:%S")
data$Date=as.Date(data$Date, "%d/%m/%Y")
data$datetime2 = as.Date(data$datetime)
##Plot 1 ##
png(file="plot 1.png")
hist(data$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (Kilowatt)")
dev.off()
|
67540da30970f52e0dd6e920d71e0d7fa17ffbc3
|
adff17e0331625bb92dd2447070e4bc3a2ea8b3d
|
/cachematrix.R
|
1cd0b2142ec878dbacfcdd503796d04c4e45d5bf
|
[] |
no_license
|
maurizioTEST/ProgrammingAssignment2
|
d251ee9ba74a80ff2184a0f25ba5117728ca6a60
|
946f2aedbc40989fa50619224bcc05a748448ef2
|
refs/heads/master
| 2021-01-16T22:12:29.610300
| 2014-07-19T07:51:26
| 2014-07-19T07:51:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,029
|
r
|
cachematrix.R
|
#In the following we define two functions dealing with matrices.
#The scope is to avoid multiple computations of the inverse of a
#given invertible matrix
#The function makeCacheMatrix set a possible empty matrix X in the function
#environment and return a list of 4 functions to set and get the matrix and its inverse
makeCacheMatrix <- function(X = matrix()) {
invX <- NULL
set <- function(Y) {
X <<- Y
invX <<- NULL
}
get <- function() X
setinverse <- function(inv) invX <<- inv
getinverse <- function() invX
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#The function cachesolve computes the inverse of a matrix as long as the
#inverse is not already in the cache. If this happens the function print the message
#getting cached data
cacheSolve <- function(X, ...) {
invX <- X$getinverse()
if(!is.null(invX)) {
message("getting cached data")
return(invX)
}
data <- X$get()
invX <- solve(data, ...)
X$setinverse(invX)
invX
}
|
0f7619ecc174efae5d87047e8d87557062e23790
|
5932d92b0dc72bbdc201ec074d401cd4803c177f
|
/Stock prediction.R
|
0b684f86761f0dead34e438179d731cef64ffacc
|
[] |
no_license
|
sidchakravarty79/Stock-movement
|
9fff6540ff59da670d6cc3b9a3c3b6553cf12b3f
|
06d8d0729c9c42cac40962c8a2ed53d0b0e56879
|
refs/heads/master
| 2020-04-06T04:11:48.730996
| 2018-10-18T19:51:52
| 2018-10-18T19:51:52
| 83,007,646
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,218
|
r
|
Stock prediction.R
|
library("ggplot2")
library("gridExtra")
library("dplyr")
library("e1071")
library("FBN")
library("caret")
library("fpc")
library("mvoutlier")
library("som")
library("DMwR")
library("dtt")
library("plotly")
################################################
# The code above will load any missing packages required
# Delete any predictions.csv file in the working folder
if (file.exists("predictions.csv") == T) {file.remove("predictions.csv")}
#Save stock_returns_base150.csv file in the working directory
train<-read.csv("stock_returns_base150.csv",header = T,nrows = 50,stringsAsFactors = T)
test<-read.csv("stock_returns_base150.csv",header = T,skip = 50,nrows = 50,stringsAsFactors = T)
colnames(test)<-colnames(train)
# Pair-wise Linear regression
plot(train[,-1])
cor.test<-corr.test(train[,-1])
pairs.panels(train[,-1],show.points =FALSE,scale = T,ellipses = F,lm=T,pch=".", cor=T,
cex.cor = 0.9)
# LM Model 1 using S2 through S10 as indepdent variables
lm_model1<-lm(S1~.,data=train[,-1])
summary(lm_model1)
modelRMSE1 <- rmse(lm_model1$residuals)
plot(lm_model2)
# LM Model 2 using S2 and S8 as indepdent variables
lm_model2<-lm(S1~S2+S8,data=train[,-1])
summary(lm_model2)
modelRMSE2 <- rmse(lm_model2$residuals)
# LM Model 3 using S2,S3,S7 and S8 as indepdent variables
lm_model3<-lm(S1~S2+S3+S7+S8,data=train[,-1])
summary(lm_model3)
modelRMSE3 <- rmse(lm_model3$residuals)
# LM Model 4 using S2,S3,S6,S7 and S8 as indepdent variables
lm_model4<-lm(S1~S2+S3+S6+S7+S8,data=train[,-1])
summary(lm_model4)
modelRMSE4 <- rmse(lm_model4$residuals)
# ANOVA comparison
compare1_2<-anova(lm_model1,lm_model2)
compare2_3<-anova(lm_model2,lm_model3)
compare3_4<-anova(lm_model3,lm_model4)
# Predict S1 based on train data
predict_lm_train<-predict(lm_model3,train[,-1],se.fit = T,residuals=T)
R2(predict_lm$fit,train$S1)
RMSE(predict_lm$fit,train$S1)
# Plot model vs acual S1 values
dates <- as.Date(train$date,'%m/%d/%Y')
ggplot(train,aes(dates,train$S1)) + geom_path(colour="red") +
geom_path(aes(dates,predict_lm_train$fit),colour="blue") +
scale_y_continuous(name = "S1") + scale_x_date(name = "Date",date_breaks = "1.5 weeks")
# Predict S1 from test data using LM model
predict_lm_test<-predict(lm_model3,test,se.fit = T,residuals=T,na.action = na.omit)
# RBF kernel SVM model tuning grid 1
tune_model<-tune.svm(S1~S2+S3+S7+S8,data = train[,-1],type="eps",kernel="radial",
cost= seq(1,1000,by=5),gamma =seq(0.1,20,by=0.5),
tunecontrol= tune.control(sampling ="cross",cross = 10))
tune.model<-as.data.frame(tune_model$performances)
plot_ly(x=tune.model$gamma,y=tune.model$cost,z=tune.model$error,type="contour") %>%
layout(xaxis=list(title="Gamma"),yaxis=list(title="Cost"),legend=list(title="error"))
train_svm<-svm(S1~S2+S3+S7+S8,data = train[,-1],type= "eps",
kernel="radial", cost=tune_model$best.parameters$cost,
gamma = tune_model$best.parameters$cost,probability=T)
train_p<-predict(train_svm,train[,-1],probability = T)
ggplot(train,aes(dates,train$S1)) + geom_path(col="red") +
geom_path(aes(dates,train_p),col="blue") +
scale_y_continuous(name = "S1 predict") + scale_x_date(name = "Date")
RMSE_model<-RMSE(train_p,train$S1)
R2_model<-R2(train_p,train$S1)
# RBF kernel SVM model tuning grid 2
tune_model2<-tune.svm(S1~S2+S3+S7+S8,data = train[,-1],type="eps",kernel="radial",
cost= seq(1,10,by=0.5),gamma =seq(0.01,1,by=0.01),
tunecontrol= tune.control(sampling ="cross",cross = 10))
tune.model2<-as.data.frame(tune_model2$performances)
plot_ly(x=tune.model2$gamma,y=tune.model2$cost,z=tune.model2$error,type="contour") %>%
layout(xaxis=list(title="Gamma"),yaxis=list(title="Cost"),legend=list(title="error"))
train_svm2<-svm(S1~S2+S3+S7+S8,data = train[,-1],type= "eps", kernel="radial",
cost=tune_model2$best.parameters$cost,gamma = tune_model2$best.parameters$cost,
probability=T)
train_p2<-predict(train_svm2,train[,-1],probability = T)
ggplot(train,aes(dates,train$S1)) + geom_path(col="red") +
geom_path(aes(dates,train_p2),col="blue") +
scale_y_continuous(name = "S1 predict") + scale_x_date(name = "Date")
RMSE_model2<-RMSE(train_p2,train$S1)
R2_model2<-R2(train_p2,train$S1)
# RBF kernel SVM model tuning grid 3
tune_model3<-tune.svm(S1~S2+S3+S7+S8,data = train[,-1],type="eps",kernel="radial",
gamma= seq(0.001,0.5,by=0.001),cost =seq(0.1,7,by=0.5),
tunecontrol= tune.control(sampling ="cross",cross = 10))
tune.model3<-as.data.frame(tune_model3$performances)
plot_ly(x=tune.model3$gamma,y=tune.model3$cost,z=tune.model3$error,type="contour") %>%
layout(xaxis=list(title="Gamma"),yaxis=list(title="Cost"),legend=list(title="error"))
train_svm3<-svm(S1~S2+S3+S7+S8,data = train[,-1],type= "eps",
kernel="radial", cost=4.6,gamma = 0.004,probability=T)
train_p3<-predict(train_svm3,train[,-1],probability = T)
ggplot(train,aes(dates,train$S1)) + geom_path(col="red") +
geom_path(aes(dates,train_p3),col="blue") +
scale_y_continuous(name = "S1 predict") + scale_x_date(name = "Date")
RMSE_model3<-RMSE(train_p3,train$S1)
R2_model3<-R2(train_p3,train$S1)
# Summary of SVM results
rmse<-rbind(RMSE_model,RMSE_model2,RMSE_model3)
r2<-rbind(R2_model,R2_model2,R2_model3)
summary<-cbind.data.frame(rmse,r2)
# Predict S1 from test data using SVM model 2
predict_svm<-predict(train_svm2,test[,-1])
testdate <- as.Date(test$date,'%m/%d/%Y')
train$S1<-as.data.frame(train$S1)
# Write S1 output to predictions.csv
# Linear model plot
ggplot(test,aes(testdate,predict_lm_test$fit)) + geom_path(colour="red") +
geom_path(aes(dates,train$S1),colour="blue") +
scale_y_continuous(name = "S1 predict") + scale_x_date(name = "Date")
# SVM model plot
ggplot(test,aes(testdate,predict_svm)) + geom_path(colour="red") +
geom_path(aes(dates,train$S1),colour="blue") +
scale_y_continuous(name = "S1") + scale_x_date(name = "Date",date_breaks= "2 weeks")
svm_output<-cbind(as.data.frame(test$date),predict_svm)
colnames(svm_output)<-c("Date","Value")
write.csv(svm_output,file="predictions.csv",append = F)
#### END######
|
d63e9b1975cfbe061a4cb3d97824b5080663871a
|
853ae909d834e08db2fb744222638effcecb8bca
|
/Solution Linear Regression (1).R
|
f385b7ceffab5e696c62e376144bed2ddaadae1b
|
[] |
no_license
|
sanchita21/Linear-Regression-with-R-Assignment
|
a32f23193ed1e132fc8787885736ce8b5fc4f81b
|
01cabad2c7dbdf5bd8c6ec1bd7b33bf711252d3b
|
refs/heads/master
| 2021-01-02T17:52:02.321357
| 2020-02-11T10:16:50
| 2020-02-11T10:16:50
| 239,731,513
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,836
|
r
|
Solution Linear Regression (1).R
|
#Boston Pricing case study
setwd("D:\\")
##Loading Data
prices<-read.csv("boston_prices.csv",header=TRUE,stringsAsFactors=FALSE)
##Checking Data Characteristics
dim(prices)
str(prices)
head(prices)
names(prices)
#summary statistics
summary(prices)
#Missing values treatment
colSums(is.na(prices)) #MEDV has a lot of missing values
summary((prices$MEDV))
prices$MEDV[is.na(prices$MEDV)]<-mean(prices$MEDV,na.rm=TRUE)
#Outlier plots
par(mfrow=c(2,7)) #This allows you to plot 14 charts on a single page; It is optional.
list<-names(prices) #Store the names of the dataset in a list format
list<-list[-4]
for(i in 1:length(list)) #Plot the boxplots of all variables and shortlist which ones need outlier treatment.
{
boxplot(prices[,list[i]],main=list[i])
}
#Restore the par parameters to normal
dev.off()
#In this solution, We have replaced the outlier values by the median values
#You can decide to replace by max or mean values based on business objectives
#Outlier treatment
for(i in 1:length(list)) ##For loop to replace all the outlier values with the mean value ; if you want you can replace with median value as well.
{
x<-boxplot(prices[,list[i]])
out<-x$out
index<-which(prices[,list[i]] %in% x$out)
prices[index,list[i]]<-mean(prices[,list[i]])
rm(x)
rm(out)
}
#Exploratory analysis
library(ggplot2)
#Study the histogram of the DV and the transformed histogram
hist(prices$MEDV)
#hist(prices$log_MEDV) #Once you create the transformations;look down
#You can look at the correlation between each IDV and the DV
#An eg :
ggplot(prices,aes(x=MEDV,y=LSTAT)) +geom_point()
ggplot(prices,aes(x=MEDV,y=DIS)) +geom_point()
ggplot(prices,aes(x=MEDV,y=AGE)) +geom_point()
#Inorder to quicken the process, lets write a function :
#Below is a function that gives you the correlation values between all IDV's and the DV
#Simply taking a look at the output of this function, you can quickly shortlist
#Which all IDV's are correlated to the DV
#Function to get the list of correlations between : DV and the IDV's
list1<-list[-13]
for(i in 1:length(list1))
{
x<-cor(prices$MEDV,prices[list[i]])
print(x)
}
#Significant variables are : B LSTAT AGE X.rooms.dwelling nitric.oxides.concentration INDUS
#You can also try to use data transformations
#Log transformations
#Create the log transformation for all variables
prices$log_CRIM<-log(prices$CRIM)
prices$log_ZN<-log(prices$ZN)
prices$log_NOX<-log(prices$nitric.oxides.concentration)
prices$log_RM<-log(prices$X.rooms.dwelling)
prices$log_AGE<-log(prices$AGE)
prices$log_DIS<-log(prices$DIS)
prices$log_RAD<-log(prices$RAD)
prices$log_TAX<-log(prices$TAX)
prices$log_PTRATIO<-log(prices$PTRATIO)
prices$log_B<-log(prices$B)
prices$log_LSTAT<-log(prices$LSTAT)
prices$log_MEDV<-log(prices$MEDV) #DV
prices$log_INDUS<-log(prices$INDUS)
#Refer to the profiling excel sheet to see all the correlations documented
#Function to get the list of correlations between : log_DV and log of IDV's
list_log<-names(prices)[c(15:25,27)]
for(i in 1:length(list_log))
{
xlog<-cor(prices$log_MEDV,prices[list_log[i]])
print(xlog)
}
#Function to get the list of correlations between : log_DV and IDV's
list_log_DV<-names(prices)[1:13]
list_log_DV<-list_log_DV[-4]
for(i in 1:length(list_log_DV))
{
xlogdv<-cor(prices$log_MEDV,prices[list_log_DV[i]])
print(xlogdv)
}
sampling<-sort(sample(nrow(prices), nrow(prices)*.7))
#Select training sample
train<-prices[sampling,]
test<-prices[-sampling,]
##Building SimpLe Linear Regression Model
#Metrics :
#Rsquare
#Coefficients
#P values : Significance levels of the IDV's
#Residuals distribution
#Factor variables as IDV's
#All good modelssummm
Reg<-lm(log_MEDV~CRIM+INDUS+RAD+TAX+B+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+AGE+X.rooms.dwelling+nitric.oxides.concentration,data=train)
summary(Reg)
#Getting the formula
formula(Reg)
#Getting the formula
formula(Reg)
#Remove insignificant variables :
Reg1<-lm(log_MEDV~
Charles.River.dummy.variable+
DIS+PTRATIO+LSTAT+AGE+X.rooms.dwelling+nitric.oxides.concentration,data=train)
summary(Reg1)
#Reg2 : remove insignificant values
Reg2 <- lm(log_MEDV ~CRIM+INDUS+RAD+TAX+B+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+X.rooms.dwelling+nitric.oxides.concentration, data=train)
summary(Reg2)
#Reg3 _ remove insignificant values
Reg3 <- lm(log_MEDV ~CRIM+RAD+
Charles.River.dummy.variable+
DIS+ZN+PTRATIO+LSTAT+nitric.oxides.concentration, data=train)
summary(Reg3)
#Some other combination
Reg4<-lm(log_MEDV~INDUS +ZN + X.rooms.dwelling + LSTAT+CRIM + Charles.River.dummy.variable,data=train)
summary(Reg4)
#The best model happens to be : Reg3
##Getting predicted values
predicted<-predict(Reg3)
plot(predicted)
length(predicted)
##Finding Residuals
residuals<-resid(Reg3)
plot(residuals)
length(residuals)
##Plotting Residuals vs Predicted Values
##Checking Heteroskedastcity
##There should be no trend between predicted values and residual values
plot(predicted,residuals,abline(0,0))
#You can notice that there seems to be an inverse pattern for some points
#So this model may not be the preferred model.
#atttching predicted values to test data
predicted<-predict(Reg3,newdata=test)
length(predicted)
test$p<-predicted
#Calculating error in the test dataset - (Actual- predicted)/predicted values
test$error<-(test$log_MEDV-test$p)/test$log_MEDV
mean(test$error)*100 #you get to know the average error in the given dataset
##Plotting actual vs predicted values
plot(test$p,col="blue",type="l")
lines(test$log_MEDV,col="red",type="l")
#checking for Correlation between variables
library(car)
vif(Reg3)
#You can drop variables if they have a vif>10 ; means high correlation between variables
|
6b125700f1e78819f6bceaa9ee41a8c8524341fb
|
aec6400c6573bf8f25c23719103daba0bf83a17b
|
/scripts/Sources.R
|
b00fcddeeeb9bd748342195de9502342dea91d02
|
[] |
no_license
|
alexsb1/timeline
|
c60d5054eca67d5339d8cbbd7e9b1a7f53ef30be
|
a4f0a01c525ab2c5145bd7d9d64d2423f900a107
|
refs/heads/master
| 2021-11-30T00:20:58.488688
| 2021-11-25T13:32:18
| 2021-11-25T13:32:18
| 232,618,413
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,621
|
r
|
Sources.R
|
# Alex Searle-Barnes
#
# https://github.com/alexsb1/timeline
#
# This file aggregates a list of sources used in this Timeline project
#
library(tidyverse)
# Load Individual plots to import the dataframes used.
# ATTENTION - take care to avoid this overwriting newer dataframes with the same name.
source("scripts/IndividualPlots.R")
#It is necessary to manually update these values - otherwise they remain interactive (and return NA) on shinyapps.io!
paste("Shiny upload date", now())
paste("Git commit version", system("git rev-parse --short HEAD", intern = TRUE))
paste("Shiny bundleID", deployments(appPath = ".")[8] %>% as.numeric(.))
paste("Shiny upload", deployments(appPath = ".")[10] %>% as.numeric(.))
uploadDate <- NULL
uploadDate <- paste("Shiny upload date 2020-08-26 17:56:47")%>% append(uploadDate, .)
uploadDate <- paste("Git commit version 58944a5") %>% append(uploadDate, .)
uploadDate <- paste("Shiny bundleID 3556124") %>% append(uploadDate, .)
uploadDate <- paste("Shiny upload 1598368987.80569") %>% append(uploadDate, .)
manualRefs <- c("LR04 stack, Lisiecki, L. E., and M. E. Raymo (2005), A Pliocene-Pleistocene stack of 57 globally distributed benthic d18O records, Paleoceanography,20, PA1003, doi:10.1029/2004PA001071.",
"https://www.visualcapitalist.com/history-of-pandemics-deadliest; date accessed 21 July 2020",
"http://www.climatedata.info; date accessed 21 July 2020"
)
#Not yet included
# CO2_ppm_800000 #no reference column
# eonPlot #no reference column
# epochPlot #no reference column
# eraPlot #no reference column
# geoTimeScale #no reference column
# historicEvents #no reference column
# LR04 #no reference column
# pandemics #no reference column
# phanerozoicCO2 #no reference column
# supercontinents #no reference column
# worldPop #no reference column
# construct a single reference list
referenceList <-NULL
referenceList <- bondEvents$Reference %>% append(referenceList, .)
referenceList <- climateEvents$reference %>% append(referenceList, .)
referenceList <- historicTimePeriods$Reference %>% append(referenceList, .)
referenceList <- meteorites$Reference %>% append(referenceList, .)
referenceList <- milankovitch$Source %>% append(referenceList, .)
referenceList <- monarchs$reference %>% append(referenceList, .)
referenceList <- tempAnom$reference %>% append(referenceList, .)
referenceList <- volcanoes$Reference %>% append(referenceList, .)
referenceList <- manualRefs %>% append(referenceList, .)
referenceList <- uploadDate %>% append(referenceList, .)
referenceList <- referenceList %>% unique(.) %>% as.list(.)
|
50b24f3da623db4bdd815b5697e4302bf55ad0de
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MXM/examples/big.fbed.reg.Rd.R
|
4044483ccc71cfbebd213f44e2fb42b376444ecd
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 629
|
r
|
big.fbed.reg.Rd.R
|
library(MXM)
### Name: Forward Backward Early Dropping selection regression for big data
### Title: Forward Backward Early Dropping selection regression for big
### data
### Aliases: big.fbed.reg
### ** Examples
## Not run:
##D #simulate a dataset with continuous data
##D x <- matrix( runif(10^6 * 50, 1, 100), ncol = 50 )
##D require(bigmemory)
##D dataset <- bigmemory::as.big.matrix(x)
##D #define a simulated class variable
##D target <- rt(100, 10)
##D a1 <- big.fbed.reg(target, dataset, test = "testIndFisher")
##D y <- rpois(100, 10)
##D a2 <- big.fbed.reg(y, dataset, test = "testIndPois")
## End(Not run)
|
0f7076adb4f548b5a4e60dd289675dc319832b63
|
03938cbf362b1fe9b2e398f501d214d6b2fbfec2
|
/dimensionality_reduction.R
|
faae07b15f56605875a48a472a2a27f79a385f39
|
[] |
no_license
|
amr15/examples
|
616a2628f20592bc3bb7297c3243714b9f33d409
|
85a5b459e169793de14d79e07ed07959448f7f4a
|
refs/heads/master
| 2020-05-18T15:34:05.395490
| 2019-03-25T18:51:20
| 2019-03-25T18:51:20
| 184,502,189
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,139
|
r
|
dimensionality_reduction.R
|
install.packages('stylo')
library('stylo')
library(sparsesvd)
library(Rtsne)
#' Performs sparse single value decomposition on the scaled UMI values (dimensions of genes x cells))
#' Default rank of matrix is set to 20
#' Coordinates from running TSNE on the 'v' matrix are saved back into the original Seurat object
#' Right singular vectors saved as new "PCs"
#' Input is a seurat object; Output is original seruat object with modified TNSE coordinates and "PCs"
svd<-function(tissue){
decomp <-sparsesvd(as(tissue@scale.data,'sparseMatrix'), rank=20L)
tissue@dr$pca@cell.embeddings <- decomp$v
projection <-Rtsne(decomp$v)
tissue@dr$tsne@cell.embeddings <- projection$Y
colnames(tissue@dr$tsne@cell.embeddings)<-c('tSNE_1', 'tSNE_2')
rownames(tissue@dr$tsne@cell.embeddings)<-colnames(tissue@scale.data)
colnames(tissue@dr$pca@cell.embeddings)<-c('PC1','PC2','PC3','PC4','PC5','PC6','PC7','PC8','PC9','PC10','PC11','PC12','PC13','PC14',\
'PC15','PC16','PC17','PC18','PC19','PC20')
rownames(tissue@dr$pca@cell.embeddings)<-colnames(tissue@scale.data)
return(tissue) }
|
ccd36b8d5d711a7b63bf81a5c542aa59a0c9cf66
|
70c312bc06e3d9eab59eb26cca2118838aefcdda
|
/01-SODA-download.R
|
3ffdd1b85648b4f84e55babb7193432ea98f7d46
|
[] |
no_license
|
roliveros-ramos/envData
|
4d1ec9f85265558c108e1f654e2728025fdf0fed
|
796a09205b2bafb33d83bffb6323bb388d1fde81
|
refs/heads/master
| 2022-05-16T16:43:03.709174
| 2022-05-02T16:57:28
| 2022-05-02T16:57:28
| 96,636,850
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 730
|
r
|
01-SODA-download.R
|
library(kali)
library(ncdf4)
library(osmose.fishmip)
source("auxiliar_functions.R")
years = 1980:2015
rawDir = "raw/soda"
outputDir = "input/soda"
for(year in years) {
# add try to all functions!
DateStamp("Processing year", year)
file = download_soda(year=year, output=rawDir)
process_soda(file, output=outputDir)
file.remove(file)
cat("File downloaded on", date(), file=file)
}
# vars = c("sst", "sbt", "sss", "mlt")
# files = dir(path=outputDir)
#
# for(varid in vars) {
#
# DateStamp("Processing", varid)
# output = gsub(x=files[1], patt="2D_[0-9]*", rep=varid)
# out1 = nc_rcat(filenames = file.path(inputDir, files), varid=varid,
# output=file.path("input", output))
#
# }
|
5dadb238c85029d0611b26a66d14dac1772c4d22
|
a5bd747b4d1ac2800d355b61adee8765d1cc663f
|
/R/combineParameters.R
|
285b9942bd4e0d00ad6a5058303f4bb760c07cef
|
[] |
no_license
|
bonata/HMSC
|
d97cdf0a597565cbe9cf08ed7fe93c35f620a320
|
567f2f7cd118f6c405a833e56b9b5f64a9e2abf2
|
refs/heads/master
| 2020-04-03T13:56:05.314479
| 2018-09-30T16:17:52
| 2018-09-30T16:17:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
combineParameters.R
|
combineParameters = function(Beta,Gamma,iV,rho,iSigma,Eta,Lambda,Alpha,Psi,Delta, rhopw){
for(p in 1:self$nt){
m = self$TrScalePar[1,p]
s = self$TrScalePar[2,p]
if(m!=0 || s!=1){
Gamma[,p] = Gamma[,p]/s
if(!is.null(self$TrInterceptInd)){
Gamma[,self$TrInterceptInd] = Gamma[,self$TrInterceptInd] - m*Gamma[,p]
}
}
}
for(k in 1:self$nc){
m = self$XScalePar[1,k]
s = self$XScalePar[2,k]
if(m!=0 || s!=1){
Beta[k,] = Beta[k,]/s
Gamma[k,] = Gamma[k,]/s
if(!is.null(self$XInterceptInd)){
Beta[self$XInterceptInd,] = Beta[self$XInterceptInd,] - m*Beta[k,]
Gamma[self$XInterceptInd,] = Gamma[self$XInterceptInd,] - m*Gamma[k,]
}
iV[k,] = iV[k,]*s
iV[,k] = iV[,k]*s
}
}
V = chol2inv(chol(iV))
sigma = 1/iSigma
par = list(Beta=Beta, Gamma=Gamma, V=V, rho=rhopw[rho,1], sigma=sigma, Eta=Eta, Lambda=Lambda, Alpha=Alpha, Psi=Psi, Delta=Delta)
}
Hmsc$set("private", "combineParameters", combineParameters, overwrite=TRUE)
|
c5aa520dc557bfd4e584357e6f73e2ff12b8dcb8
|
f64c5492492cf58093ff3d72ecf75a4de143114f
|
/rprojects-master/train data partition.R
|
0750c1ed773304dbbbe7b2a34b5822c7f21fb064
|
[] |
no_license
|
gauravmadarkal/R-Projects
|
1e1beb45eee43aa09267a8e22fff91ce3958500b
|
3e3e39e309a9b5fe5685da148242736fa4e8f2c7
|
refs/heads/master
| 2021-02-10T12:22:59.496974
| 2020-03-02T14:02:59
| 2020-03-02T14:02:59
| 244,381,544
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,420
|
r
|
train data partition.R
|
library(caret)
reviews_data=read.csv("googleplaystore_user_reviews.csv",stringsAsFactors = FALSE)
length(which(!complete.cases(reviews_data)))
refined_reviews_data=na.omit(reviews_data)
#refined_reviews_data = refined_reviews_data[1:10000,]
set.seed(4)
indexes = createDataPartition(refined_reviews_data$Sentiment,times = 1,p = 0.7,list = FALSE)
train1_data= refined_reviews_data[indexes,]
test1_data = refined_reviews_data[-indexes,]
indexes2 = createDataPartition(train1_data$Sentiment,times = 1,p = 0.7,list = FALSE)
train2_data= train1_data[indexes2,]
test2_data = train1_data[-indexes2,]
indexes3 = createDataPartition(train2_data$Sentiment,times = 1,p = 0.75,list = FALSE)
train3_data= train2_data[indexes3,]
test3_data = train2_data[-indexes3,]
indexes4 = createDataPartition(train3_data$Sentiment,times = 1,p = 0.75,list = FALSE)
train4_data= train3_data[indexes4,]
test4_data = train3_data[-indexes4,]
indexes5 = createDataPartition(train4_data$Sentiment,times = 1,p = 0.75,list = FALSE)
train5_data= train4_data[indexes5,]
test5_data = train4_data[-indexes5,]
indexes6 = createDataPartition(train5_data$Sentiment,times = 1,p = 0.75,list = FALSE)
train_data= train5_data[indexes6,]
test_data = train5_data[-indexes6,]
library(quanteda)
train_data.tokens=tokens(train_data$Translated_Review,what="word",remove_numbers=TRUE,remove_punct=TRUE,remove_symbols=TRUE,remove_hyphens=TRUE)
train_data.tokens=tokens_tolower(train_data.tokens)
#View(train_data.tokens)
train_data.tokens=tokens_select(train_data.tokens,stopwords(),selection = "remove")
train_data.tokens=tokens_wordstem(train_data.tokens,language = "english")
train_data.dfm=dfm(train_data.tokens,tolower = FALSE,remove=stopwords())
train_token_matrix=as.matrix(train_data.dfm)
library(caret)
train_tokens.df= cbind(Label=train_data$Sentiment,as.data.frame(train_token_matrix))
names(train_tokens.df) <- make.names(names(train_tokens.df))
#set.seed(48743)
cv.folds = createMultiFolds(train_data$Sentiment,k = 10,times = 3)
cv.cntrl = trainControl(method = "repeatedcv", number = 10,repeats = 3, index = cv.folds)
#install.packages("doSNOW")
library(doSNOW)
start.time = Sys.time()
cl = makeCluster(2 ,type = "SOCK")
registerDoSNOW(cl)
rpart.cv.1 = train(Label ~ .,data = train_tokens.df,method = "rpart",
trControl = cv.cntrl, tuneLength = 7)
cl
stopCluster()
totaltime = Sys.time() - start.time
totaltime
rpart.cv.1
|
7ea93106cb8d5e8e0a25685c791ac3965c9bdd61
|
54261dc38541f8c291261318f2bd1a0d5e06b16f
|
/R/trecase.sex.A.R
|
d79690807f1bb9d831030208135442a889f79bdd
|
[] |
no_license
|
cran/rxSeq
|
150592bf7a71d841c959367487e00a50d062266d
|
d46a886029c778bbdb902e778fddf2bee7eb35c2
|
refs/heads/master
| 2020-06-15T09:00:52.824343
| 2016-12-01T11:45:14
| 2016-12-01T11:45:14
| 75,307,291
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,833
|
r
|
trecase.sex.A.R
|
`trecase.sex.A` =
function(yi, ind.lst, X, sex, ni, ni0, xs, start, iphi=1, theta=1, maxiter=100, eps=1E-3, tech.ctrl){
triali = 0
par0 = start
repeat{
triali = triali + 1
tag = tryCatch({
iphi_i = iphi
theta_i = theta
log.lik0 = ll.jRCI.sex.A(par0, yi=yi, ind.lst=ind.lst, X=X,
ni=ni, ni0=ni0, xs=xs, iphi=iphi_i, theta=theta_i)
for(i in 1:maxiter){
out = optim(par0, ll.jRCI.sex.A, yi=yi, ind.lst=ind.lst, X=X,
ni=ni, ni0=ni0, xs=xs, iphi=iphi_i, theta=theta_i)
log.lik1 = ll.jRCI.sex.A(par0, yi=yi, ind.lst=ind.lst, X=X,
ni=ni, ni0=ni0, xs=xs, iphi=iphi_i, theta=theta_i)
older = c(rep(par0[1:3], each=2), par0[(4:5)], 0, par0[6], 0, log.lik1, iphi_i, theta_i)
par0 = out$par
iphi_i = optimize(f=ll.tRCI.iphi.A, c(tech.ctrl$iphi_l, tech.ctrl$iphi_u),
yi=yi, ind.lst=ind.lst, X=X, twosex=TRUE, betas=older[c(1:8, 10)])$minimum
theta_i = optimize(f=ll.aRC.theta.A, c(tech.ctrl$theta_l, tech.ctrl$theta_u),
bs1=older[c(3, 5)], bs2=older[c(4, 6)], ni=ni, ni0=ni0, twosex=TRUE, sex=sex, xs=xs)$minimum
log.lik1 = ll.jRCI.sex.A(par0, yi=yi, ind.lst=ind.lst, X=X,
ni=ni, ni0=ni0, xs=xs, iphi=iphi_i, theta=theta_i)
newer = c(rep(par0[1:3], each=2), par0[(4:5)], 0, par0[6], 0, log.lik1, iphi_i, theta_i)
if((log.lik0 - log.lik1)<eps)break;
log.lik0 = log.lik1
}
if(log.lik0 < log.lik1){
ret = older
}else{
ret = newer
}
0
}, error=function(e) {
1
})
if((tag == 0) | (triali >= tech.ctrl$maxtrial))break;
par0 = rnorm(length(par0), start, 1)
}
if(tag == 1){
ret = NULL
}
return(ret)
}
|
1a319f099f7847b8341e460a3b62f32b372eaaa2
|
6c11f430941d2a0c7cc6a33d843ffa6a95e67068
|
/man/grattan_yellow.Rd
|
8cbf903b82931a0beb360d48f5c7e59d642751a5
|
[] |
no_license
|
jonathananolan/grattantheme
|
183bc038cb55e6b3434159315e64f8e521775fdf
|
7184bf045f95c0d536642c973b834da8c7a4ac51
|
refs/heads/master
| 2020-04-25T07:55:09.692320
| 2019-02-26T00:03:58
| 2019-02-26T00:03:58
| 172,628,526
| 1
| 0
| null | 2019-02-26T03:10:27
| 2019-02-26T03:10:26
| null |
UTF-8
|
R
| false
| true
| 347
|
rd
|
grattan_yellow.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/colours.R
\docType{data}
\name{grattan_yellow}
\alias{grattan_yellow}
\title{Hex code for the colour: Grattan yellow (butternut pumpkin soup)}
\format{An object of class \code{character} of length 1.}
\usage{
grattan_yellow
}
\description{
#FFC35A
}
\keyword{datasets}
|
7354c8ed7c3971eb51a4b092e25d499080af45c7
|
7dae170955465766bf4bb3e1c0f23868c12043b4
|
/Behaviour/RuralUrban-Tests.R
|
640292dcab76d552fab860a1ee327646c634f504
|
[] |
no_license
|
JJFosterLab/light-pollution
|
1d938a4ec59ee40222e8c9b4848ae46e7917af66
|
2dadc3ab3034251e190b04fd9b3cce4cc618669a
|
refs/heads/master
| 2023-06-20T04:32:00.963969
| 2021-07-23T09:35:48
| 2021-07-23T09:35:48
| 337,023,000
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,367
|
r
|
RuralUrban-Tests.R
|
rm(list = ls())
graphics.off()
#################################################################
# Useful Functions #
#################################################################
source(paste0(Sys.getenv('HOME'),'/Dropbox/My Papers/Light Pollution/LP_PlotFunctions', '.R'))
#####################################################################
# Install and Load Packages #
#####################################################################
Instalload(c('circular', 'beeswarm', 'onewaytests', 'muStat'))
# Heading data that will be used in the paper
lp.data <- read.table(file = paste0(Sys.getenv('HOME'),'/Dropbox/My Papers/Light Pollution/LPdataOrganised20200505', '.txt'))
head(lp.data)
lp.data$Beetle <- as.factor(lp.data$Beetle)
lp.data$Condition <- as.factor(lp.data$Condition)
#reorganise conditions
lp.data$Condition <- relevel(lp.data$Condition, 'EyesCoveredWallsUrban')
lp.data$Condition <- factor(lp.data$Condition, levels(lp.data$Condition)[c(1,2:3,10:11,5:6,14,4,8:9,7,12,13)])
cbind(levels(lp.data$Condition))#is this what you want?
# Mean vector length data that will be used in the paper
lp.data.rho <- read.table(paste0(Sys.getenv('HOME'),'/Dropbox/My Papers/Light Pollution/LPdataRho20200505', '.txt') , header = T)#, sep = '\t')
head(lp.data.rho)
lp.data.rho$Beetle <- as.factor(lp.data.rho$Beetle)
lp.data.rho$Condition <- as.factor(lp.data.rho$Condition)
#reorganise conditions
lp.data.rho$Condition <- relevel(lp.data.rho$Condition, 'EyesCoveredWallsUrban')
lp.data.rho$Condition <- factor(lp.data.rho$Condition, levels(lp.data.rho$Condition)[c(1,2:3,10:11,5:6,14,4,8:9,7,12,13)])
cbind(levels(lp.data.rho$Condition))#is this what you want?
# Mean vector length data that will be used in the paper
lp.data.mu <- read.table(paste0(Sys.getenv('HOME'),'/Dropbox/My Papers/Light Pollution/LPdataRho20200528', '.txt') , header = T)#, sep = '\t')
head(lp.data.mu)
lp.data.mu$Beetle <- as.factor(lp.data.mu$Beetle)
lp.data.mu$Condition <- as.factor(lp.data.mu$Condition)
#reorganise conditions
lp.data.mu$Condition <- relevel(lp.data.mu$Condition, 'EyesCoveredWallsUrban')
lp.data.mu$Condition <- factor(lp.data.mu$Condition, levels(lp.data.mu$Condition)[c(1,2:3,10:11,5:6,14,4,8:9,7,12,13)])
cbind(levels(lp.data.mu$Condition))#is this what you want?
#####################################################################
# Focus on Urban verus Rural #
#####################################################################
#Choose data that appears in Figure 1
UrbanRural <- subset(lp.data.rho, !grepl('Walls', Condition)&
(grepl('Moon', Condition) |
grepl('Stars', Condition) |
grepl('Overcast', Condition)) )
UrbanRural$sky <- with(UrbanRural, gsub('Rural','',Condition))
UrbanRural$sky <- with(UrbanRural, gsub('Urban','',sky))
UrbanRural$Condition <- factor(UrbanRural$Condition)
#####################################################################
# Location Tests #
#####################################################################
kt.UrbanRural <- kruskal.test(rho~Condition, data = UrbanRural)
# Kruskal-Wallis chi-squared = 20.701, df = 5, p-value = 0.0009224
# Post-hoc tests #
#remember condition order is important here
cbind(levels(UrbanRural$Condition))
# compare to reference #
# Rural
# MoonRural - OvercastRural
dt.Moon.Cloud.Rural <- subset(UrbanRural,
Condition == 'MoonRural' |
Condition == 'OvercastRural')
dt.Moon.Cloud.Rural$Condition <- factor(
dt.Moon.Cloud.Rural$Condition,
levels = c('MoonRural','OvercastRural'))
#MoonRural is alternative
wc.Moon.Cloud.Rural <- wilcox.test(rho ~ Condition,
data = dt.Moon.Cloud.Rural,
alternative = 'greater')
# print(wc.Moon.Cloud.Rural)
# W = 79, p-value = 0.0144
# StarsRural - OvercastRural
dt.Stars.Cloud.Rural <- subset(UrbanRural,
Condition == 'StarsRural' |
Condition == 'OvercastRural')
dt.Stars.Cloud.Rural$Condition <- factor(
dt.Stars.Cloud.Rural$Condition,
levels = c('StarsRural','OvercastRural'))
#StarsRural is alternative
wc.Stars.Cloud.Rural <- wilcox.test(rho ~ Condition,
data = dt.Stars.Cloud.Rural,
alternative = 'greater')
# print(wc.Stars.Cloud.Rural)
# W = 156, p-value = 0.006365
# Urban
# MoonUrban - OvercastUrban
dt.Moon.Cloud.Urban <- subset(UrbanRural,
Condition == 'MoonUrban' |
Condition == 'OvercastUrban')
dt.Moon.Cloud.Urban$Condition <- factor(dt.Moon.Cloud.Urban$Condition,
levels = c('MoonUrban','OvercastUrban'))
wc.Moon.Cloud.Urban <- wilcox.test(rho ~ Condition,
data = dt.Moon.Cloud.Urban,
alternative = 'greater')
# print(wc.Moon.Cloud.Urban)
# W = 63, p-value = 0.1763
# StarsUrban - OvercastUrban
dt.Stars.Cloud.Urban <- subset(UrbanRural,
Condition == 'StarsUrban' |
Condition == 'OvercastUrban')
dt.Stars.Cloud.Urban$Condition <- factor(dt.Stars.Cloud.Urban$Condition,
levels = c('StarsUrban', 'OvercastUrban'))
wc.Stars.Cloud.Urban <- wilcox.test(rho ~ Condition,
data = dt.Stars.Cloud.Urban,
alternative = 'greater')
# print(wc.Stars.Cloud.Urban)
# W = 78, p-value = 0.8359
# MoonRural - MoonUrban
dt.Moon.Rural.Urban <- subset(UrbanRural, Condition == 'MoonRural' | Condition == 'MoonUrban')
dt.Moon.Rural.Urban$Condition <- factor(dt.Moon.Rural.Urban$Condition,
levels = c('MoonUrban','MoonRural'))
wc.Moon.Rural.Urban <- wilcox.test(rho ~ Condition,
data = dt.Moon.Rural.Urban,
alternative = 'greater')
# print(wc.Moon.Rural.Urban)
# W = 34, p-value = 0.1237
# StarsRural - StarsUrban
dt.Stars.Rural.Urban <- subset(UrbanRural,
Condition == 'StarsRural' |
Condition == 'StarsUrban')
dt.Stars.Rural.Urban$Condition <- factor(dt.Stars.Rural.Urban$Condition,
levels = c('StarsUrban','StarsRural'))
wc.Stars.Rural.Urban <- wilcox.test(rho ~ Condition,
data = dt.Stars.Rural.Urban,
alternative = 'greater')
# print(wc.Stars.Rural.Urban)
# W = 136, p-value = 0.04296
# OvercastRural - OvercastUrban
dt.Cloud.Rural.Urban <- subset(UrbanRural,
Condition == 'OvercastRural' |
Condition == 'OvercastUrban')
dt.Cloud.Rural.Urban$Condition <- factor(dt.Cloud.Rural.Urban$Condition,
levels = c('OvercastUrban','OvercastRural'))
wc.Cloud.Rural.Urban <- wilcox.test(rho ~ Condition,
data = dt.Cloud.Rural.Urban,
alternative = 'greater')
# print(wc.Cloud.Rural.Urban)
# W = 11, p-value = 0.001045
# MoonUrban - OvercastRural
dt.Moon.Cloud.Urban.Rural <- subset(UrbanRural,
Condition == 'MoonUrban' |
Condition == 'OvercastRural')
dt.Moon.Cloud.Urban.Rural$Condition <-
factor(dt.Moon.Cloud.Urban.Rural$Condition,
levels = c('MoonUrban', 'OvercastRural'))
wc.Moon.Cloud.Urban.Rural <- wilcox.test(rho ~ Condition,
data = dt.Moon.Cloud.Urban.Rural,
alternative = 'greater')
# print(wc.Moon.Cloud.Urban.Rural)
# W = 91, p-value = 0.000525
# StarsUrban - OvercastRural
dt.Stars.Cloud.Urban.Rural <- subset(UrbanRural,
Condition == 'StarsUrban' |
Condition == 'OvercastRural')
dt.Stars.Cloud.Urban.Rural$Condition <-
factor(dt.Stars.Cloud.Urban.Rural$Condition,
levels = c('StarsUrban', 'OvercastRural'))
wc.Stars.Cloud.Urban.Rural <- wilcox.test(rho ~ Condition,
data = dt.Stars.Cloud.Urban.Rural,
alternative = 'greater')
# print(wc.Stars.Cloud.Urban.Rural)
# W = 162, p-value = 0.002643
#?
# MoonRural - StarsRural
dt.Moon.Stars.Rural <- subset(UrbanRural,
Condition == 'MoonRural' |
Condition == 'StarsRural')
dt.Moon.Stars.Rural$Condition <- factor(
dt.Moon.Stars.Rural$Condition,
levels = c('MoonRural','StarsRural'))
#MoonRural is alternative
wc.Moon.Stars.Rural <- wilcox.test(rho ~ Condition,
data = dt.Moon.Stars.Rural,
alternative = 'greater')
#W = 139, p-value = 0.04529
UrbanRural.wc.p <- c(wc.Moon.Cloud.Rural$p.value,
wc.Stars.Cloud.Rural$p.value,
wc.Moon.Cloud.Urban$p.value,
wc.Stars.Cloud.Urban$p.value,
wc.Moon.Rural.Urban$p.value,
wc.Stars.Rural.Urban$p.value,
wc.Cloud.Rural.Urban$p.value,
wc.Moon.Cloud.Urban.Rural$p.value,
wc.Stars.Cloud.Urban.Rural$p.value)
names(UrbanRural.wc.p) <- c('wc.Moon.Cloud.Rural',
'wc.Stars.Cloud.Rural',
'wc.Moon.Cloud.Urban',
'wc.Stars.Cloud.Urban',
'wc.Moon.Rural.Urban',
'wc.Stars.Rural.Urban',
'wc.Cloud.Rural.Urban',
'wc.Moon.Cloud.Urban.Rural',
'wc.Stars.Cloud.Urban.Rural')
cbind(round(p.adjust(UrbanRural.wc.p, 'BH'),5))
# wc.Moon.Cloud.Rural 0.03361 #Moon contributes
# wc.Stars.Cloud.Rural 0.02228 #Stars contribute
# wc.Moon.Cloud.Urban 0.20573 #
# wc.Stars.Cloud.Urban 0.83591 #
# wc.Moon.Rural.Urban 0.17322 #
# wc.Stars.Rural.Urban 0.07517 #Urban contributes
# wc.Cloud.Rural.Urban 0.00731 #Urban contributes
# wc.Moon.Cloud.Urban.Rural 0.00470 #Urban contributes
# wc.Stars.Cloud.Urban.Rural 0.00793 #Urban contributes
UrbanRural.prent <- with(UrbanRural, prentice.test(
y = rho,
groups = Indirect,
blocks = sky,
alternative = 'greater'))#asking specifically if condition light pollution conditions were more oriented))
# statistic: chi-square = 8.2491, df = 1, p-value = 0.002039
median(subset(UrbanRural, Indirect == F)$rho)#no light pollution
# 0.8673096
median(subset(UrbanRural, Indirect == T)$rho)#light pollution
# 0.9534928
#On average, orientation precision was higher in the presence of light pollution
bf.test(rho ~ Condition, data = UrbanRural)
# statistic : 7.489311
# num df : 5
# denom df : 39.36693
# p.value : 5.10413e-05
#certainly some differences in variance, let's ignore these for now
# Rural
# MoonRural - StarsRural
# dt.Moon.Stars.Rural <- subset(UrbanRural,
# Condition == 'MoonRural' |
# Condition == 'StarsRural')
# dt.Moon.Stars.Rural$Condition <- factor(
# dt.Moon.Stars.Rural$Condition,
# levels = c('MoonRural','StarsRural'))
# #MoonRural is alternative
bf.Moon.Stars.Rural <- bf.test(rho ~ Condition,
data = dt.Moon.Stars.Rural)
#####################################################################
# Heading Choice #
#####################################################################
MMRayleigh.test <- function(mang, mvec, method = 'simulation'){
mrstar <- function(ang,vec){
nn <- length(ang)#number of vectors
mv.rank <- rank(vec)#rank of each vector
ma.rad <- ang*pi/180#angles in radians
x.rank <- sum(mv.rank*cos(ma.rad))#x projection of angles by rank
y.rank <- sum(mv.rank*sin(ma.rad))#y projection of angles by rank
MM.R <- sqrt(x.rank^2+y.rank^2)#Resultant vector
MM.mv <- MM.R/nn#Normalised resultant vector (not used)
MM.rstar <- MM.R/(nn^(3/2))#Tranformation used as test statistic
return(MM.rstar)
}
nn <- length(mang)#number of vectors
mv.rank <- rank(mvec)#rank of each vector
ma.rad <- mang*pi/180#angles in radians
x.rank <- sum(mv.rank*cos(ma.rad))#x projection of angles by rank
y.rank <- sum(mv.rank*sin(ma.rad))#y projection of angles by rank
MM.R <- sqrt(x.rank^2+y.rank^2)#Resultant vector
MM.mv <- MM.R/nn#Normalised resultant vector (not used)
MM.rstar <- MM.R/(nn^(3/2))#Tranformation used as test statistic
MM.ma <- atan2(y.rank,x.rank)*180/pi#Resultant angle
sig.sq <- nn*(nn+1)*(2*nn+1)/12#estimated variance of rstar distribution
#P value calculation is a long step
#inspiration taken from
#https://github.com/ruthcfong/Family-of-Rayleigh-Statistics
#In Matlab this takes a permutation approach which is quite conservative
#focussing on the question of whether vectors are longer near pop-mean
#To calculate answers closer to Moore, I use a semi-empirical,
#simulation method, reassigning vector lengths to
#a uniform distribution on the circle
num.samples = 1e6;
perm.rstar <- rep(NA, num.samples)
for(i in 1:num.samples){
if(method == 'permutation'){
# perm.rstar[i] <- mrstar(mang,sample(mvec))
}else{
perm.rstar[i] <- mrstar(runif(nn)*360-.Machine$double.eps,mvec)
}
}
# #faster as a table?
# permi <- sort(rep(1:num.samples,nn))
# dtf <- data.frame(i = permi,
# aa = runif(nn* num.samples)*360-.Machine$double.eps,
# mv = rep(mvec,num.samples))
p.val <- sum(perm.rstar>MM.rstar)/sum(!is.na(perm.rstar))
# p.val <- pchisq(MM.rstar^2/sig.sq, nn)#probability of > rstar
result <- list(`α*` = MM.ma, R = MM.R, `R*` = MM.rstar, n = nn, p.value = p.val)
return(result)
}#MMRayleigh.test
mycirc <- function(angles, clock){
if(missing(clock)){clock <- T}
if(clock){
return(
as.circular(angles,
units = 'degrees',
type = 'angles', #don't set this to directions, apparently very different
modulo = '2pi',
zero = pi/2,
rotation = 'clock',
template = 'none')
)
}else{
as.circular(angles,
units = 'degrees',
type = 'angles', #don't set this to directions, apparently very different
modulo = '2pi',
zero = pi/2,
rotation = 'counter',
template = 'none')
}#if(clock)
}################### END OF FUNCTION ###########################
for(cnd in levels(UrbanRural$Condition)){
aaa <- subset(lp.data.mu, Condition == cnd)$mu
rrr <- subset(lp.data.rho, Condition == cnd)$rho
print(cnd)
rslt <- MMRayleigh.test(aaa,rrr)
print(data.frame(rslt))
print(rao.spacing.test(mycirc(aaa)))
}
# [1] "MoonRural"
# α. R R. n p.value
# 1 32.71862 18.98214 0.6002679 10 0.411248
# Rao's Spacing Test of Uniformity
# Test Statistic = 157.9346
# P-value > 0.10
# [1] "MoonUrban"
# α. R R. n p.value
# 1 73.5945 30.09391 0.951653 10 0.091417
# Rao's Spacing Test of Uniformity
# Test Statistic = 191.7851
# 0.01 < P-value < 0.05
# [1] "StarsRural"
# α. R R. n p.value
# 1 79.21921 141.9599 1.58716 20 0.000285
# Rao's Spacing Test of Uniformity
# Test Statistic = 180.558
# 0.001 < P-value < 0.01
# [1] "StarsUrban"
# α. R R. n p.value
# 1 -172.1731 70.43802 0.787521 20 0.180238
# Rao's Spacing Test of Uniformity
# Test Statistic = 131.3955
# P-value > 0.10
# [1] "OvercastRural"
# α. R R. n p.value
# 1 -152.6841 15.39424 0.4868087 10 0.562372
# Rao's Spacing Test of Uniformity
# Test Statistic = 185.6164
# 0.01 < P-value < 0.05
# [1] "OvercastUrban"
# α. R R. n p.value
# 1 -99.87 22.50903 0.711798 10 0.280059
# Rao's Spacing Test of Uniformity
# Test Statistic = 153.9682
# P-value > 0.10
#Pairwise comparisons
#Moon
watson.wheeler.test(mycirc(mu)~ Indirect,
subset(lp.data.mu,
grepl('Moon', Condition) &
!grepl('Walls', Condition)
)
)
# Watson-Wheeler test for homogeneity of angles
# data: mycirc(mu) by Indirect
# W = 1.5754, df = 2, p-value = 0.4549
#Stars
watson.wheeler.test(mycirc(mu)~Indirect,
subset(lp.data.mu,
grepl('Star', Condition) &
!grepl('Walls', Condition)
)
)
# Watson-Wheeler test for homogeneity of angles
# data: mycirc(mu) by Indirect
# W = 10.724, df = 2, p-value = 0.004691
#Clouds
watson.wheeler.test(mycirc(mu)~Indirect,
subset(lp.data.mu,
grepl('Overcast', Condition) &
!grepl('Walls', Condition)
)
)
# Watson-Wheeler test for homogeneity of angles
# data: mycirc(mu) by Indirect
# W = 2.0706, df = 2, p-value = 0.3551
#####################################################################
# Plot Data #
#####################################################################
cbind(levels(UrbanRural$Condition))
#Marie's suggestion for ordering
UrbanRural$Condition <- factor(UrbanRural$Condition,
levels = c('MoonRural','StarsRural','OvercastRural',
'MoonUrban','StarsUrban','OvercastUrban') )
# Open plot
dev.new(width =5); par(mai = c(0,0.8, 0, 0), lend = 'butt')
#plot on the appropriate scale transformed
boxplot(rho~Condition, data = UrbanRural,
cex = 0.5, outline = F, border = rgb(0,0,0,0.5),
pars = list(boxwex = 0.3, staplewex = 0.5, outwex = 0.5),
axes = F, ylim =(c(0,1)),# xlim = c(0.5, 4.5),
ylab = 'Mean Vector Length', xlab = '')
polygon(c(0,2+rep(length(levels(UrbanRural$Condition)),2),0), c(0,0, sqrt(-log(0.05)/10),sqrt(-log(0.05)/10)), col = rgb(1,0,0,0.05), border = NA)
legend('bottom', inset = sqrt(-log(0.05)/10), legend = ' Rayleigh test \n p<0.05 \n \n p>0.05 ', cex = 0.5, bty = 'n')
beeswarm(rho~Condition, data = UrbanRural,
pch = 21, cex = 1.8/2, #method = 'center',
pwcol = c('gray20', 'orange4')[UrbanRural$Direct+1],
pwbg = c('gray', 'orange')[UrbanRural$Indirect+1],
add = T)
#
axis(2)#
mtext(levels(UrbanRural$Condition), side = 1, at = 1:length(levels(UrbanRural$Condition))-0.5, line = -0.25-24*with(UrbanRural, aggregate(rho, by = list(Condition = Condition), function(x) quantile(x, 0.75, na.rm=T)))$x, las = 2, cex = 0.75 )
abline(h =c(0,1), lwd = 0.25)
segments(3.35,0,3.35,1, lwd = 0.25, lty = 2)
PDFsave(paste0(Sys.getenv('HOME'),'/Dropbox/My Papers/Light Pollution/'), Experiment = 'LP', PlotName = paste0("Beeswarm_", 'UrbanRural'))
#####################################################################
# Add significant differences #
#####################################################################
cbind(names(UrbanRural.wc.p[p.adjust(UrbanRural.wc.p, 'BH')<=0.05] ))
# [1,] "wc.Moon.Cloud.Rural"
# [2,] "wc.Stars.Cloud.Rural"
# [3,] "wc.Cloud.Rural.Urban"
# So from MoonRural to CloudRural
# from StarsRural to CloudRural
# and from CloudRural to CloudUrban
lines(c(1,3), rep(0.4-0.03*0,2), col = 'darkred', lwd = 2)
text(1.5,0.4-0.03*0.75, '*', cex = 1.5)
lines(c(2,3), rep(0.4-0.03*1,2), col = 'darkred', lwd = 2)
text(2.5,0.4-0.03*1.75, '*', cex = 1.5)
lines(c(3,6), rep(0.4-0.03*2,2), col = 'darkred', lwd = 2)
text(4.5,0.4-0.03*2.75, '**', cex = 1.5)
PDFsave(paste0(Sys.getenv('HOME'),'/Dropbox/My Papers/Light Pollution/'), Experiment = 'LP', PlotName = paste0("Hypotheses_", 'UrbanRural'))
|
4ccc9400feb82a31b2c160e34b2ba0c8653779b7
|
f7e8523b0b6bfaf2fe3c0cec6c07337da847d7e3
|
/man/get_p_value.Rd
|
37d13414f6da69db6c389af6815210dbbc32d1e2
|
[] |
no_license
|
kennethban/infer
|
e48019b7e20523ddfd5b8843cb4eabaa5f3fbd43
|
0f1cde327e3f2b36ec3235ed46a2c15cda3ebf16
|
refs/heads/master
| 2023-04-27T15:05:31.868492
| 2023-04-16T14:07:37
| 2023-04-16T14:07:37
| 233,613,346
| 0
| 0
| null | 2020-01-13T14:22:55
| 2020-01-13T14:22:55
| null |
UTF-8
|
R
| false
| true
| 1,822
|
rd
|
get_p_value.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_p_value.R
\name{get_p_value}
\alias{get_p_value}
\alias{get_pvalue}
\title{Compute p-value}
\usage{
get_p_value(x, obs_stat, direction)
get_pvalue(x, obs_stat, direction)
}
\arguments{
\item{x}{Data frame of calculated statistics as returned by \code{\link[=generate]{generate()}}}
\item{obs_stat}{A numeric value or a 1x1 data frame (as extreme or more
extreme than this).}
\item{direction}{A character string. Options are \code{"less"}, \code{"greater"}, or
\code{"two_sided"}. Can also use \code{"left"}, \code{"right"}, or \code{"both"}.}
}
\value{
A 1x1 \link[tibble:tibble]{tibble} with value between 0 and 1.
}
\description{
\Sexpr[results=rd, stage=render]{lifecycle::badge("stable")}
Compute a p-value from a null distribution and observed statistc.
Simulation-based methods are (currently only) supported.
Learn more in \code{vignette("infer")}.
}
\section{Aliases}{
\code{get_pvalue()} is an alias of \code{get_p_value()}.
\code{p_value} is a deprecated alias of \code{get_p_value()}.
}
\examples{
# find the point estimate---mean number of hours worked per week
point_estimate <- gss \%>\%
specify(response = hours) \%>\%
calculate(stat = "mean") \%>\%
dplyr::pull()
# starting with the gss dataset
gss \%>\%
# ...we're interested in the number of hours worked per week
specify(response = hours) \%>\%
# hypothesizing that the mean is 40
hypothesize(null = "point", mu = 40) \%>\%
# generating data points for a null distribution
generate(reps = 10000, type = "bootstrap") \%>\%
# finding the null distribution
calculate(stat = "mean") \%>\%
get_p_value(obs_stat = point_estimate, direction = "two_sided")
# More in-depth explanation of how to use the infer package
vignette("infer")
}
|
455aa12ac5c9c7bf647b0978d1488081b5474975
|
12c2739ecf1a424fc959dbca3f7b9f5f1794e512
|
/bathtub/rglAnimation.R
|
728224a3085098721a85f99828f681c82600a61c
|
[] |
no_license
|
majazaloznik/DataViz-Oxford-2015
|
c0bff5826c3f5edc29785a7bca6978f44119666c
|
8a50c0aa32a6ab8ab12d3f59a9375f1721a13e87
|
refs/heads/master
| 2021-01-01T06:34:03.940178
| 2015-03-12T19:36:44
| 2015-03-12T19:36:44
| 32,096,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,586
|
r
|
rglAnimation.R
|
##################################
## test of creating movies with rgl rotations!
## see resulting pdf in pics/animation.pdf
## (.tex file is there as well)
##################################
library(rgl)
library(reshape2)
library(latticeExtra)
# read data
rates_14_all <- read.csv("data/rates_14_all.csv")
# inputs
gender <- "total"
minage <- 0
maxage <- 80
minyear <- 1751
maxyear <- 2012
log = TRUE
# prep data
ss <- subset(rates_14_all, subset=
sex==gender &
age >= minage &
age <= maxage &
year >= minyear &
year <= maxyear)
ss <- ss[,c(2,3,7)]
xlabs <- unique(ss[,1])
ylabs <- unique(ss[,2])
ssc <- dcast(ss, age ~ year, value.var="death_rate")
ssc <- ssc[,-1]
if (log == TRUE){heights <- as.matrix(log(ssc))} else{
heights <- as.matrix(ssc)}
# plot chart
rgl.clear()
rgl.clear("lights")
rgl.light(theta = 45, phi = 45, viewpoint.rel=TRUE)
rgl.surface( ylabs, xlabs, as.matrix(ssc),
specular = "#FFFFFF",
zlim=range(log(ssc)),
ambient = "#222222",
color="yellow")
aspect3d(1,1,1)
axes3d(edges = "bbox", labels = TRUE, tick = TRUE, nticks = 5,
box=FALSE, expand = 1.03, xlabs=xlabs)
for(i in seq(0, 3 * 360, by = 1)) {
rgl.viewpoint(theta = i, phi = 0)
}
## Record rotating rgl object as "movie"
M <- par3d("userMatrix")
M1 <- rotate3d(M, .9*pi/2, 1, 0, 0)
M2 <- rotate3d(M1, pi/2, 0, 0, 1)
movie3d(par3dinterp(userMatrix=list(M, M1, M2, M1, M),method="linear"),
duration=4, convert=F,clean=F, dir="pics")
|
0648a87bc8c0f8048bab9cd146e621b5de3a0085
|
2a40dee36671ca3eafda8105d376ff954df0a237
|
/trainee forum outline.R
|
8f2abcf229a6f85382d53e6a6b29ac9ed3ad0943
|
[] |
no_license
|
dr-romster/Rtutorial
|
cbcc0ad4b0721cf3a444c4ad2a938bfb18d9af01
|
be78a35c270a4a4366c418385389b57d0b024c01
|
refs/heads/master
| 2022-07-28T14:38:38.941637
| 2022-07-14T08:55:03
| 2022-07-14T08:55:03
| 158,443,477
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,988
|
r
|
trainee forum outline.R
|
# What is data science?
# Measurement / Data collation / curation / cleaning / sharing
# Statistics / inference / prediction (ML/AI)
# Visualisation / Dissemination
# Can be applied to all natural and life sciences
#
# The old model:
# Research
# Generate a hypothesis
# Devise a (controlled) experiment
# Measure / collect data
# Data analysis and interpretation
# Rinse and repeat
# Publication
#
#
# So what has changed?
# Data availability
# Routinely collected and stored
# Automated
# Easier to generate due to elentronic healthcare records
# More people generating data (Reseach / QI)
# Public databases with healthcare / socioeconomic / biological
#
# Accesibility
# Not just the data but the METHODS and TOOLS
# Recent revolution in how programme languages are designed
# Programming languages no longer the domain of statisticians and
# software engineers
# Domain experts (like you) can use these tools on their home computers
# even where data is large you can use cloud computing methods
# Trusted research environements (TREs)
# Sanboxed environments where priviedlged (potentially identifiable) data
# can be accessed and queries run to address specific problems or questions
# Data cannot leave without approval
# The need for a narrative
# Data no longer just presented to super-specialist audiences
# (conferences and journals) but to colleagues (clinical and
# non-clinical) and beyond
# The narrative can be used to
# Disseminate new knowledge
# Highlight problems
# Change behaviour
# Change policy
#
# Data science offers automated workflows
# that generate: stats / presentations / report / blog posts / dashboards
# thesis
#
# Yeah but I know / like / am comfortable with MS Excel
# Excel maxes out after (18k, 2000) 1 million rows (Office 365)
# Its great for small projects using single tables
# The real power of excel is in its macros and more advanced commands
# -> computer grinds to a halt
# No reproducibility
# You can't track your clicks and the steps you made to clean /
# organise your data from raw -> usable
# New data needs to undergo the same process
# These steps are best done in computer program
# Easier to address edge-cases consistently
# Transparent methodolgy allows others to interogate and correct methods
# Iteration improves the efficiency over time
#
# Back to business: YEAH BUT I JUST WANT TO GIVE AN ANAESTHETIC!
# Data changes behaviour and practice
# NAP-4
# NELA
# National hip fracture databases (spinal vs GA)
# Health services research
# Pulse oximetry values in non-white individuals (MIMIC and eICU)
#
# Data science allows us to ask questions that would only otherwise be
# available to specialised researchers
# COVID H and L phenotypes
# Intra-operative opiates
# Tracking COVID genotypes locally
# Gives you the tools to question / support the assertions of others
# (especially if they are an opinion / perspective that relies on their
# eminence)
# It's no longer good enough to say
# "lets try this, it's unlikely to do any harm"
# LOVIT trial
# Where do I go next?
# Find a question that interests you
# Learn a language
# Is this data already available? (locally, public repository)
# Work out how to clean and organise the data
# Come and have a chat
# Much quicker than a biological experiment based study
# Probably more relevant to clinical care
# How deep does this rabbit hole go?
# As far as you like
# Cambrdige center for AI in healthcare
# Big Data insttitute, University of Oxford
# UCL Institute for Health Informatics
# HDR UK
# What do I get out of it?
#
# Problem solving
# Using a different part of my brain that doesn't involve
# appeasing a bit of paperwork / protocol /
# broken NHS software that was never fit for purpose
# Genuine "Eureka!" moments
# Collaboration
# High turnover of questions
# Contributing to some importanta questions in my field
# Less exhausted by clinical workload
# Think about how to make changes - what bit of data
# do I need to provide the narrative that will support changes?
# So in summary
# Data is NOT the new gold
# But it is now more accessible than experimental research (which it does not replace)
# It is not a life sentance
# Explore for yourself how far you can get down the rabbit-hole
|
2f7f39c431589614b03edb293bde112f2d04bc25
|
9589b7aa8da3aba429323054df235285e4bdccac
|
/R/SQL_Interface.R
|
829f359a296a604a2b8abab3891fe2dc5467fe64
|
[] |
no_license
|
Planeshifter/newscrapeR
|
f9b85d78bc6d3f1455b878229e42fbd08242fcc2
|
ac580bda187af423c55477fb04d8203c76aaa860
|
refs/heads/master
| 2020-04-05T23:40:24.732554
| 2013-07-01T13:07:44
| 2013-07-01T13:07:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,644
|
r
|
SQL_Interface.R
|
fetch_SQL = function(db_name, keywords = character(), sources = vector(),
from = NULL, to = NULL)
{
require(RSQLite)
if (!is.null(from)) from <- as.numeric(as.POSIXlt(from))
if (!is.null(to)) to <- as.numeric(as.POSIXlt(to)) + 1000*60*60*3
driver <- dbDriver("SQLite")
con <- dbConnect(driver, db_name)
query_string <- "SELECT * FROM Article WHERE "
if (length(keywords)>0)
cond1 <- paste(paste("content LIKE '%",keywords,"%'",sep=""),collapse=" AND ")
else cond1 <- ""
if (length(sources)>0)
{
if(nchar(cond1)>1) cond1 <- paste(cond1," AND ",collapse=" ")
cond2 <- paste(paste("published_in LIKE '%",sources,"%'",sep=""),collapse=" OR ")
}
else cond2 <- ""
if (!is.null(from))
{
if (nchar(cond2)>1) cond2 <- paste(cond2," AND ",collapse=" ")
if (nchar(cond1)>1&&!nchar(cond2)>1) cond1 <- paste(cond1," AND ",collapse=" ")
cond3 <- paste("load_date >=", from)
}
else cond3 <- ""
if (!is.null(to))
{
if (nchar(cond3)>1) cond3 <- paste(cond3," AND ",collapse=" ")
else
{
if (nchar(cond2)>1) cond2 <- paste(cond2," AND ",collapse=" ")
if (nchar(cond1)>1&&!nchar(cond2)>1) cond1 <- paste(cond1," AND ",collapse=" ")
}
cond4 <- paste("load_date <=", to)
}
else cond4 <- ""
print(cond3)
print(cond4)
query_string <- paste(query_string, cond1, cond2, cond3, cond4, sep="")
print(query_string)
res <- dbGetQuery(conn=con,query_string)
Encoding(res$content) <- "UTF-8"
Encoding(res$title) <- "UTF-8"
res$load_date <- as.Date(unix2POSIXct(res$load_date))
return(res)
}
|
a7e68b155f92581de8148de366b99f6bb9b88fd7
|
205a269537cc4bfbc526da048db8d185e1e678c9
|
/man/geom_density_interactive.Rd
|
85112406d60d58d05f4d2f513b43e26617220662
|
[] |
no_license
|
davidgohel/ggiraph
|
bca2fc5c61ef7cbeecc0a0d067f7479822117ab0
|
b3ce2998b57d8c8b63055499925fd9fe99f4d1a7
|
refs/heads/master
| 2023-09-03T00:06:10.817100
| 2023-08-30T14:35:40
| 2023-08-30T14:35:40
| 40,061,589
| 735
| 86
| null | 2023-09-03T09:50:54
| 2015-08-01T22:17:06
|
R
|
UTF-8
|
R
| false
| true
| 1,981
|
rd
|
geom_density_interactive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom_density_interactive.R
\name{geom_density_interactive}
\alias{geom_density_interactive}
\title{Create interactive smoothed density estimates}
\usage{
geom_density_interactive(...)
}
\arguments{
\item{...}{arguments passed to base function,
plus any of the \link{interactive_parameters}.}
}
\description{
The geometry is based on \code{\link[=geom_density]{geom_density()}}.
See the documentation for those functions for more details.
}
\section{Details for interactive geom functions}{
The interactive parameters can be supplied with two ways:
\itemize{
\item As aesthetics with the mapping argument (via \code{\link[=aes]{aes()}}).
In this way they can be mapped to data columns and apply to a set of geometries.
\item As plain arguments into the geom_*_interactive function.
In this way they can be set to a scalar value.
}
}
\examples{
# add interactive bar -------
library(ggplot2)
library(ggiraph)
p <- ggplot(diamonds, aes(carat)) +
geom_density_interactive(tooltip="density", data_id="density")
x <- girafe(ggobj = p)
x <- girafe_options(x = x,
opts_hover(css = "stroke:orange;stroke-width:3px;") )
if( interactive() ) print(x)
p <- ggplot(diamonds, aes(depth, fill = cut, colour = cut)) +
geom_density_interactive(aes(tooltip=cut, data_id=cut), alpha = 0.1) +
xlim(55, 70)
x <- girafe(ggobj = p)
x <- girafe_options(x = x,
opts_hover(css = "stroke:yellow;stroke-width:3px;fill-opacity:0.8;") )
if( interactive() ) print(x)
p <- ggplot(diamonds, aes(carat, fill = cut)) +
geom_density_interactive(aes(tooltip=cut, data_id=cut), position = "stack")
x <- girafe(ggobj = p)
if( interactive() ) print(x)
p <- ggplot(diamonds, aes(carat, stat(count), fill = cut)) +
geom_density_interactive(aes(tooltip=cut, data_id=cut), position = "fill")
x <- girafe(ggobj = p)
if( interactive() ) print(x)
}
\seealso{
\code{\link[=girafe]{girafe()}}
}
|
0bf402d100aee3df1dec45e87f964ba24a4ba331
|
a01cefedd32f761da75056e6fd9bad74654cde27
|
/exp-cnv-match.R
|
961b215a45a2b34cc928a1e3c5c35c01f1cc6143
|
[] |
no_license
|
ddiannae/cnv-correction
|
8b9862f755c4fb5f056655281e57c61920fd512f
|
3bef355670277412ad8cbed0ea15c53da7f93335
|
refs/heads/master
| 2020-04-11T09:09:51.986831
| 2018-12-13T20:51:31
| 2018-12-13T20:51:31
| 161,668,270
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 923
|
r
|
exp-cnv-match.R
|
setwd("/mnt/ddisk/transpipeline-data/breast-data-cnvs")
cnvs <- read.table("cnv-matrix.tsv", header = T, check.names = F, row.names = 1)
exp <- read.table("exp-matrix.tsv", header = T, check.names = F, row.names = 1)
cases.intersect <- intersect(colnames(cnvs), colnames(exp))
genes.intersect <- intersect(rownames(cnvs), rownames(exp))
cnvs.filtered <- cnvs[genes.intersect, cases.intersect]
exp.filtered <- exp[genes.intersect, cases.intersect]
cnvs.filtered["Gene.Symbol"] <- rownames(cnvs.filtered)
exp.filtered["Gene.Symbol"] <- rownames(exp.filtered)
cnvs.filtered <- cnvs.filtered[, c(ncol(cnvs.filtered), 1:ncol(cnvs.filtered)-1)]
exp.filtered <- exp.filtered[, c(ncol(exp.filtered), 1:ncol(exp.filtered)-1)]
write.table(cnvs.filtered, file = "cnv-filtered-matrix.tsv", sep="\t", quote=FALSE, row.names=FALSE)
write.table(exp.filtered, file = "exp-filtered-matrix.tsv", sep="\t", quote=FALSE, row.names=FALSE)
|
4260646748add17abb36cece03a233d7140e1f5b
|
f2d2cc9a2514d98a205e47393f50a808876bda3e
|
/2019-11-14/task1.R
|
5755ce027c957081568de02c8808a27465966e0a
|
[] |
no_license
|
georgistoilov8/R-language
|
0566a25d89f9b3d4a0cba626e6063c15845ad9e2
|
349475c2183fcfeb0882b09d716061c78640ce20
|
refs/heads/master
| 2020-08-27T03:35:44.973663
| 2020-01-16T12:31:54
| 2020-01-16T12:31:54
| 217,233,475
| 17
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 580
|
r
|
task1.R
|
// Колкото по-голям е коефициента на свобода(df), толкова по-вероятно е данните да са нормално разпределени!
x = rt(100, df = 1)
qqnorm(x)
qqline(x)
shapiro.test(x)
Shapiro-Wilk normality test
data: x
W = 0.27683, p-value < 2.2e-16
y = rt(100, df = 10)
qqnorm(y)
qqline(y)
shapiro.test(y)
Shapiro-Wilk normality test
data: y
W = 0.98364, p-value = 0.2522
z = rt(100, df = 100)
qqnorm(z)
qqline(z)
shapiro.test(z)
Shapiro-Wilk normality test
data: z
W = 0.98614, p-value = 0.3822
|
d157c8ff3d9e9498037a14e3a981eccbaf4fbc29
|
2423cde21514581a9fdcb97b4d94d0f2641d1ba4
|
/R/WrangleDataframes.R
|
c9a628b298f418c4baf7df160feb1b7bddd678ad
|
[] |
no_license
|
jakeyeung/JFuncs
|
0d06af9181b98b94096d906c2d998fa9a9b6b2b3
|
9c783f5576990315bf3f844e2994950bd30580f1
|
refs/heads/master
| 2022-10-29T21:35:25.286060
| 2022-10-22T08:23:37
| 2022-10-22T08:23:37
| 164,300,954
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 603
|
r
|
WrangleDataframes.R
|
# Jake Yeung
# Date of Creation: 2020-11-05
# File: ~/projects/JFuncs/R/WrangleDataframes.R
#
cbind.fill.lst <- function(mats.lst, all.rnames, fill = 0){
mats.lst.filled <- lapply(mats.lst, function(mat.tmp){
missing.rnames <- all.rnames[!all.rnames %in% rownames(mat.tmp)]
mat.tmp.to.fill <- matrix(data = fill, nrow = length(missing.rnames), ncol = ncol(mat.tmp), dimnames = list(missing.rnames, colnames(mat.tmp)))
mat.tmp.bind <- rbind(mat.tmp, mat.tmp.to.fill)
mat.tmp.bind <- mat.tmp.bind[all.rnames, ]
return(mat.tmp.bind)
})
return(do.call(cbind, mats.lst.filled))
}
|
6a2cf2d727b607a70cf229c79d0204c7c8dea09f
|
930fe20f4c7d322b717c91e1a6eb3aa6fa6b4236
|
/man/get_templates.Rd
|
e326bdc627eae94d106a9aa919f423328cdfe83c
|
[] |
no_license
|
NanaAkwasiAbayieBoateng/meme
|
d5cf1b0cd4bff579d340578b6451078f4932a5c0
|
47233dc80d0f27a41b755eea134febcd09a97955
|
refs/heads/master
| 2021-05-31T15:38:38.807287
| 2015-04-25T20:34:57
| 2015-04-25T20:34:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,914
|
rd
|
get_templates.Rd
|
\name{get_templates}
\alias{get_templates}
\alias{plot.meme_template}
\title{Get meme templates}
\description{Get a list of meme templates}
\usage{
get_templates(site = "memecaptain", type = NULL, query = NULL, ...)
}
\arguments{
\item{site}{The site used to generate the meme. This is set by default if \code{template} is an object of class \dQuote{meme_template}. One of \dQuote{imgflip}, \dQuote{memegenerator}, or \dQuote{memecaptain} (the default).}
\item{type}{If \code{site} is \dQuote{memegenerator}, optionally one of \dQuote{new}, \dQuote{popular} (the implicit default), \dQuote{trending}, \dQuote{related}, or \dQuote{search} to return a different subset of template images. For \dQuote{related} and \dQuote{search}, \code{query} should specify an image name or search term, respectively.}
\item{query}{When \code{site} is \dQuote{memegenerator} and \code{type} is \dQuote{related} or \dQuote{search}, \code{query} should specify an image name or search term, respectively.}
\item{...}{Additional arguments to \code{curlPerform}.}
}
\details{This function retrieves a list of template images from the specified site, which can then be passed to \code{\link{create_meme}} for generating a meme image. The resulting list of objects are of class \dQuote{meme_template}, which has an associated S3 \code{print} method that will display the template image as a margin-free JPEG plot in the current graphics device.}
\value{A list of objects of class \dQuote{meme_template}.}
\references{
\href{http://version1.api.memegenerator.net/}{memegenerator API}
\href{https://api.imgflip.com/}{imgflip API}
\href{http://memecaptain.com/}{memecaptain}
}
\author{Thomas J. Leeper}
%\note{}
\seealso{\code{\link{create_meme}}}
\examples{
\dontrun{
# use imgflip
t1 <- get_templates("imgflip")
# use memegenerator
t2 <- get_templates("memegenerator")
# use memecaptain
t3 <- get_templates("memecaptain")
}
}
|
cf5efdf02f24d7d919589e2115c43b44696ed37e
|
e74b3fe11ff2a4448e898a06ae24cc6fe7e0c784
|
/README.rd
|
051412d66cf924e26635fbb8ab17d5fc0f0e682d
|
[] |
no_license
|
Angiefl/Portfolio-1
|
b9444a27fa3d4229aa5535a75a7fe2e0ab863b26
|
f8a0344d9b5096dc6f41e62829b90432439f5e48
|
refs/heads/master
| 2021-01-25T08:29:34.565215
| 2015-06-24T21:47:04
| 2015-06-24T21:47:04
| 38,011,408
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37
|
rd
|
README.rd
|
substring and Caesar Cipher programs
|
aa594c2160727050254f19b78582b702998ef76b
|
89b7b46088f31ebff265cd6549f42d886ea1ec81
|
/app/c/R_code_official/KS.R
|
4d7745347a60e7fd9d479d5ab5a2e468c3e24816
|
[
"MIT"
] |
permissive
|
dblyon/agotool
|
5124ee3c64f2bb3b47fcacc715d22c231ed8016e
|
b4eb705a00b710c596fcd723b24b7596d5eca751
|
refs/heads/master
| 2023-08-28T12:28:19.546847
| 2023-08-06T09:36:53
| 2023-08-06T09:36:53
| 38,004,274
| 8
| 1
|
MIT
| 2023-05-01T20:18:46
| 2015-06-24T18:45:23
|
HTML
|
UTF-8
|
R
| false
| false
| 3,982
|
r
|
KS.R
|
> ks.test
function (x, y, ..., alternative = c("two.sided", "less", "greater"),
exact = NULL)
{
alternative <- match.arg(alternative)
DNAME <- deparse(substitute(x))
x <- x[!is.na(x)]
n <- length(x)
if (n < 1L)
stop("not enough 'x' data")
PVAL <- NULL
if (is.numeric(y)) {
DNAME <- paste(DNAME, "and", deparse(substitute(y)))
y <- y[!is.na(y)]
n.x <- as.double(n)
n.y <- length(y)
if (n.y < 1L)
stop("not enough 'y' data")
if (is.null(exact))
exact <- (n.x * n.y < 10000)
METHOD <- "Two-sample Kolmogorov-Smirnov test"
TIES <- FALSE
n <- n.x * n.y/(n.x + n.y)
w <- c(x, y)
z <- cumsum(ifelse(order(w) <= n.x, 1/n.x, -1/n.y))
if (length(unique(w)) < (n.x + n.y)) {
if (exact) {
warning("cannot compute exact p-value with ties")
exact <- FALSE
}
else warning("p-value will be approximate in the presence of ties")
z <- z[c(which(diff(sort(w)) != 0), n.x + n.y)]
TIES <- TRUE
}
STATISTIC <- switch(alternative, two.sided = max(abs(z)), greater = max(z), less = -min(z))
nm_alternative <- switch(alternative, two.sided = "two-sided", less = "the CDF of x lies below that of y", greater = "the CDF of x lies above that of y")
if (exact && (alternative == "two.sided") && !TIES)
PVAL <- 1 - .Call(C_pSmirnov2x, STATISTIC, n.x, n.y)
}
else {
if (is.character(y))
y <- get(y, mode = "function", envir = parent.frame())
if (!is.function(y))
stop("'y' must be numeric or a function or a string naming a valid function")
METHOD <- "One-sample Kolmogorov-Smirnov test"
TIES <- FALSE
if (length(unique(x)) < n) {
warning("ties should not be present for the Kolmogorov-Smirnov test")
TIES <- TRUE
}
if (is.null(exact))
exact <- (n < 100) && !TIES
x <- y(sort(x), ...) - (0:(n - 1))/n
STATISTIC <- switch(alternative, two.sided = max(c(x, 1/n - x)), greater = max(1/n - x), less = max(x))
if (exact) {
PVAL <- 1 - if (alternative == "two.sided")
.Call(C_pKolmogorov2x, STATISTIC, n)
else {
pkolmogorov1x <- function(x, n) {
if (x <= 0)
return(0)
if (x >= 1)
return(1)
j <- seq.int(from = 0, to = floor(n * (1 -
x)))
1 - x * sum(exp(lchoose(n, j) + (n - j) * log(1 -
x - j/n) + (j - 1) * log(x + j/n)))
}
pkolmogorov1x(STATISTIC, n)
}
}
nm_alternative <- switch(alternative, two.sided = "two-sided", less = "the CDF of x lies below the null hypothesis",
greater = "the CDF of x lies above the null hypothesis")
}
names(STATISTIC) <- switch(alternative, two.sided = "D", greater = "D^+", less = "D^-")
if (is.null(PVAL)) {
pkstwo <- function(x, tol = 1e-06) {
if (is.numeric(x))
x <- as.double(x)
else stop("argument 'x' must be numeric")
p <- rep(0, length(x))
p[is.na(x)] <- NA
IND <- which(!is.na(x) & (x > 0))
if (length(IND))
p[IND] <- .Call(C_pKS2, p = x[IND], tol)
p
}
PVAL <- if (alternative == "two.sided")
1 - pkstwo(sqrt(n) * STATISTIC)
else exp(-2 * n * STATISTIC^2)
}
PVAL <- min(1, max(0, PVAL))
RVAL <- list(statistic = STATISTIC, p.value = PVAL, alternative = nm_alternative,
method = METHOD, data.name = DNAME)
class(RVAL) <- "htest"
return(RVAL)
}
### 3 different C functions available
- C_pSmirnov2x --> this one should be used
- C_pKolmogorov2x
- C_pKS2
|
64d7c5b13b119db5cf804aefbe26b981e07679a3
|
c8e9e754e3a751ea785aaaf6a929d02fa106dbcc
|
/tests/testthat/test-fcut.R
|
2aa6b75401c25be1d748e1f25db7c0e0f0555659
|
[] |
no_license
|
beerda/lfl
|
3a6849da19165990bcac9c57cf136b62d8ccc951
|
9b8028447ab53e0f91553cd827f7a15783593c6b
|
refs/heads/master
| 2023-02-24T10:09:01.496322
| 2023-02-15T06:57:08
| 2023-02-15T06:57:08
| 99,807,073
| 5
| 1
| null | 2020-02-26T07:49:58
| 2017-08-09T12:42:17
|
R
|
UTF-8
|
R
| false
| false
| 11,015
|
r
|
test-fcut.R
|
test_that('fcut for factor', {
x <- factor(c('a', 'b', 'a', 'c', 'c', 'b', 'c'))
res <- fcut(x)
expect_true(is.matrix(res))
expect_equal(ncol(res), 3)
expect_equal(nrow(res), 7)
expect_equal(colnames(res), c('x=a', 'x=b', 'x=c'))
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), rep('x', 3))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(rep(0, 3*3),
nrow=3,
ncol=3))
expect_true(is.fsets(res))
expect_equivalent(as.matrix(res)[1, 1], 1)
expect_equivalent(as.matrix(res)[1, 2], 0)
expect_equivalent(as.matrix(res)[1, 3], 0)
expect_equivalent(as.matrix(res)[2, 1], 0)
expect_equivalent(as.matrix(res)[2, 2], 1)
expect_equivalent(as.matrix(res)[2, 3], 0)
expect_equivalent(as.matrix(res)[3, 1], 1)
expect_equivalent(as.matrix(res)[3, 2], 0)
expect_equivalent(as.matrix(res)[3, 3], 0)
expect_equivalent(as.matrix(res)[4, 1], 0)
expect_equivalent(as.matrix(res)[4, 2], 0)
expect_equivalent(as.matrix(res)[4, 3], 1)
expect_equivalent(as.matrix(res)[5, 1], 0)
expect_equivalent(as.matrix(res)[5, 2], 0)
expect_equivalent(as.matrix(res)[5, 3], 1)
expect_equivalent(as.matrix(res)[6, 1], 0)
expect_equivalent(as.matrix(res)[6, 2], 1)
expect_equivalent(as.matrix(res)[6, 3], 0)
expect_equivalent(as.matrix(res)[7, 1], 0)
expect_equivalent(as.matrix(res)[7, 2], 0)
expect_equivalent(as.matrix(res)[7, 3], 1)
})
test_that('fcut for logical', {
x <- c(TRUE, FALSE, FALSE, TRUE)
res <- fcut(x)
expect_true(is.matrix(res))
expect_equal(ncol(res), 2)
expect_equal(nrow(res), 4)
expect_equal(colnames(res), c('x', 'not.x'))
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), rep('x', 2))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(0, nrow=2, ncol=2))
expect_true(is.fsets(res))
expect_equivalent(as.matrix(res)[1, 1], 1)
expect_equivalent(as.matrix(res)[1, 2], 0)
expect_equivalent(as.matrix(res)[2, 1], 0)
expect_equivalent(as.matrix(res)[2, 2], 1)
expect_equivalent(as.matrix(res)[3, 1], 0)
expect_equivalent(as.matrix(res)[3, 2], 1)
expect_equivalent(as.matrix(res)[4, 1], 1)
expect_equivalent(as.matrix(res)[4, 2], 0)
})
test_that('fcut of numeric by single triangle', {
x <- 0:100
res <- fcut(x,
breaks=c(0, 50, 100),
type='triangle')
expect_true(is.matrix(res))
expect_equal(ncol(res), 1)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), 'x=1')
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), 'x')
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(0,
nrow=1,
ncol=1))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[26, 1], 0.5)
expect_equivalent(as.matrix(res)[51, 1], 1)
expect_equivalent(as.matrix(res)[76, 1], 0.5)
expect_equivalent(as.matrix(res)[101, 1], 0)
expect_true(is.fsets(res))
})
test_that('fcut of numeric by multiple triangles', {
x <- 0:100
res <- fcut(x,
breaks=c(0, 25, 50, 75, 100),
type='triangle')
expect_true(is.matrix(res))
expect_equal(ncol(res), 3)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), c('x=1', 'x=2', 'x=3'))
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), rep('x', 3))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(rep(0, 3*3),
nrow=3,
ncol=3))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[26, 1], 1)
expect_equivalent(as.matrix(res)[51, 1], 0)
expect_equivalent(as.matrix(res)[76, 1], 0)
expect_equivalent(as.matrix(res)[101, 1], 0)
expect_equivalent(as.matrix(res)[1, 2], 0)
expect_equivalent(as.matrix(res)[26, 2], 0)
expect_equivalent(as.matrix(res)[51, 2], 1)
expect_equivalent(as.matrix(res)[76, 2], 0)
expect_equivalent(as.matrix(res)[101, 2], 0)
expect_equivalent(as.matrix(res)[1, 3], 0)
expect_equivalent(as.matrix(res)[26, 3], 0)
expect_equivalent(as.matrix(res)[51, 3], 0)
expect_equivalent(as.matrix(res)[76, 3], 1)
expect_equivalent(as.matrix(res)[101, 3], 0)
expect_true(is.fsets(res))
})
test_that('fcut of numeric with merge 1:3', {
x <- 0:100
res <- fcut(x,
breaks=c(0, 25, 50, 75, 100),
type='triangle',
merge=1:3)
expect_true(is.matrix(res))
expect_equal(ncol(res), 6)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), c('x=1', 'x=2', 'x=3',
'x=1|x=2', 'x=2|x=3',
'x=1|x=2|x=3'))
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), rep('x', 6))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(c(0,0,0,1,0,1,
0,0,0,1,1,1,
0,0,0,0,1,1,
0,0,0,0,0,1,
0,0,0,0,0,1,
0,0,0,0,0,0),
byrow=TRUE,
nrow=6))
expect_true(is.fsets(res))
})
test_that('fcut of numeric with merge 2', {
x <- 0:100
res <- fcut(x,
breaks=c(0, 25, 50, 75, 100),
type='triangle',
merge=2)
expect_true(is.matrix(res))
expect_equal(ncol(res), 2)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), c('x=1|x=2', 'x=2|x=3'))
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), rep('x', 2))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(rep(0, 2*2),
nrow=2,
ncol=2))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[26, 1], 1)
expect_equivalent(as.matrix(res)[30, 1], 1)
expect_equivalent(as.matrix(res)[51, 1], 1)
expect_equivalent(as.matrix(res)[76, 1], 0)
expect_equivalent(as.matrix(res)[101, 1], 0)
expect_equivalent(as.matrix(res)[1, 2], 0)
expect_equivalent(as.matrix(res)[26, 2], 0)
expect_equivalent(as.matrix(res)[51, 2], 1)
expect_equivalent(as.matrix(res)[60, 2], 1)
expect_equivalent(as.matrix(res)[76, 2], 1)
expect_equivalent(as.matrix(res)[101, 2], 0)
expect_true(is.fsets(res))
})
test_that('fcut of numeric with merge 1,3', {
x <- 0:100
res <- fcut(x,
breaks=c(0, 25, 50, 75, 100),
type='triangle',
merge=c(1,3))
expect_true(is.matrix(res))
expect_equal(ncol(res), 4)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), c('x=1', 'x=2', 'x=3',
'x=1|x=2|x=3'))
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), rep('x', 4))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(c(0,0,0,1,
0,0,0,1,
0,0,0,1,
0,0,0,0),
byrow=TRUE,
nrow=4))
expect_true(is.fsets(res))
})
test_that('fcut of matrix', {
x <- matrix(1:100, byrow=TRUE, ncol=4)
colnames(x) <- letters[1:4]
res <- fcut(x,
breaks=c(1, 30, 60, 100),
type='triangle')
expect_true(is.fsets(res))
expect_equal(ncol(res), 8)
expect_equal(nrow(res), 25)
expect_equal(colnames(res), c('a=1', 'a=2', 'b=1', 'b=2', 'c=1', 'c=2', 'd=1', 'd=2'))
expect_equal(vars(res), c(rep('a', 2), rep('b', 2), rep('c', 2), rep('d', 2)))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(0,
nrow=8,
ncol=8))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[8, 3], 1)
expect_equivalent(as.matrix(res)[15, 7], 0)
})
test_that('fcut of data frame', {
x <- matrix(1:100, byrow=TRUE, ncol=4)
colnames(x) <- letters[1:4]
res <- fcut(as.data.frame(x),
breaks=c(1, 30, 60, 100),
type='triangle')
expect_true(is.fsets(res))
expect_equal(ncol(res), 8)
expect_equal(nrow(res), 25)
expect_equal(colnames(res), c('a=1', 'a=2', 'b=1', 'b=2', 'c=1', 'c=2', 'd=1', 'd=2'))
expect_equal(vars(res), c(rep('a', 2), rep('b', 2), rep('c', 2), rep('d', 2)))
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(0,
nrow=8,
ncol=8))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[8, 3], 1)
expect_equivalent(as.matrix(res)[15, 7], 0)
})
test_that('fcut for custom function factory', {
func <- function(a, b, c) {
f <- triangular(a, b, c)
return(function(x) f(x)^2)
}
x <- 0:100
res <- fcut(x,
breaks=c(0, 50, 100),
type=func)
expect_true(is.fsets(res))
expect_equal(ncol(res), 1)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), 'x=1')
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), 'x')
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(0,
nrow=1,
ncol=1))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[26, 1], 0.25)
expect_equivalent(as.matrix(res)[51, 1], 1)
expect_equivalent(as.matrix(res)[76, 1], 0.25)
expect_equivalent(as.matrix(res)[101, 1], 0)
expect_true(is.fsets(res))
})
test_that('fcut for custom function', {
func <- function(x, a, b, c) {
f <- triangular(a, b, c)
return(f(x)^2)
}
x <- 0:100
res <- fcut(x,
breaks=c(0, 50, 100),
type=func)
expect_true(is.fsets(res))
expect_equal(ncol(res), 1)
expect_equal(nrow(res), 101)
expect_equal(colnames(res), 'x=1')
expect_true(inherits(res, 'fsets'))
expect_equivalent(vars(res), 'x')
expect_equal(names(vars(res)), NULL)
expect_equal(specs(res), matrix(0,
nrow=1,
ncol=1))
expect_equivalent(as.matrix(res)[1, 1], 0)
expect_equivalent(as.matrix(res)[26, 1], 0.25)
expect_equivalent(as.matrix(res)[51, 1], 1)
expect_equivalent(as.matrix(res)[76, 1], 0.25)
expect_equivalent(as.matrix(res)[101, 1], 0)
expect_true(is.fsets(res))
})
|
2148aac0c9d7390340805eb0fb7859a93ee846aa
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/radmixture/examples/tfrdpub.Rd.R
|
93a051321112c44a3e5532aad17c3b854f32d84e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 513
|
r
|
tfrdpub.Rd.R
|
library(radmixture)
### Name: tfrdpub
### Title: Transfer personal genotype raw data according public dataset
### Aliases: tfrdpub
### ** Examples
## download.file(url = 'https://github.com/wegene-llc/radmixture/
## raw/master/data/globe4.alleles.RData', destfile = 'K4.RData')
## download.file(url = 'https://github.com/wegene-llc/radmixture/
## raw/master/data/globe4.4.F.RData', destfile = 'K4f.RData')
## load('K4.RData')
## load('K4f.RData')
## res <- tfrdpub(genotype, 4, globe4.alleles, globe4.4.F)
|
c08a9fa1f660ad38dbebc5bf2cc74633eda85697
|
27b0ef3882f6449f34c9e48e5bee46b09166b6ba
|
/man/read_data_csv.Rd
|
9ca9ebd7500d2c42ea2149be3ad15bec0e3d78d8
|
[
"Apache-2.0"
] |
permissive
|
thuzarwin/imputeData
|
e77c40492b51c377082d9b4959889d60205978af
|
bf69a04aaa219e1da941ce45691983e3be9111c0
|
refs/heads/master
| 2021-06-22T10:35:50.563010
| 2017-07-21T15:45:28
| 2017-07-21T15:45:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 565
|
rd
|
read_data_csv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_preprocessing.R
\name{read_data_csv}
\alias{read_data_csv}
\title{Read csv to data.table}
\usage{
read_data_csv(file, sep = ",", header = F, select = c(1, 2, 3, 4),
vars = c("b", "g"))
}
\arguments{
\item{file}{filename to read from}
\item{sep}{separator within rows in csv-file}
\item{header}{does csv-file contain header}
\item{select}{which columns to read}
\item{vars}{the variables with missing data}
}
\value{
filled data.table
}
\description{
Read csv to data.table
}
|
d26119cd40336d95193c0a3f65b3afde8af06291
|
92f102f304493eb2b2e3de5f4c1e2e6dac864ffc
|
/man/cPlot.Rd
|
f2d713484c13ce603641139247d389091a18abf1
|
[] |
no_license
|
cran/RcmdrPlugin.TextMining
|
554cdd00f22734a778b16196667ae3b53ffa38dc
|
c6d8ac3d5c437e8cb18e950591925f9cb50ebde2
|
refs/heads/master
| 2021-03-12T20:26:57.094202
| 2010-01-01T00:00:00
| 2010-01-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 562
|
rd
|
cPlot.Rd
|
\name{cPlot}
\alias{cPlot}
\title{
Dialog for function plot() from package (tm)
}
\description{
This function provide interface to function \code{\link[tm]{plot}} from \pkg{tm} package
}
\usage{
cPlot()
}
\details{
Visualize correlations between terms of a term-document matrix.\cr
You can also acces this function using \code{Rcmdr} menu.\cr
From \code{Rcmdr} menu choose \emph{TextMining -> Plot ... }
}
\author{
Dzemil Lushija \email{dzemill@gmail.com}
}
\seealso{
See also \code{\link[tm]{plot}}
}
\keyword{ Text mining }
|
d2cc1e7d4c8cf5284d8fc079a5d90195f2bebd97
|
4a2bff98ad5d6ad7ce3be718aba4137ee294a7e6
|
/DiasUteisPreco.R
|
0b01c015c131f6f0f07f7fcd9f0726640f7a6aa3
|
[] |
no_license
|
pmeno/TG_2
|
33162ea92d968e5561091c072c7df3567435fec5
|
b476d9b164915cfb156e863077604528f7a9a41d
|
refs/heads/main
| 2023-06-11T21:04:22.298308
| 2021-07-09T22:07:28
| 2021-07-09T22:07:28
| 356,704,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 207
|
r
|
DiasUteisPreco.R
|
DiasUteisPreco <- function(anos)
{
diasUteis <- data.table(ano = anos)
diasUteis[, dataPreco := offset(as.Date(paste0(anos,'-01-01')), 110, 'Brazil/ANBIMA')]
du <- diasUteis[, dataPreco]
du
}
|
21944ccd28abb5ab6a1db1d538bd3bf6a2d56f90
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/EGRET/R/plotFour.R
|
ae22caa8d033adde7b25150eb63ce34695384795
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,407
|
r
|
plotFour.R
|
#' Makes four graphs of streamflow statistics on a single page
#'
#' @description
#' Part of the flowHistory system. The four statistics are 1-day maximum, annual mean, annual 7-day minimum, and the running standard deviation of the log daily discharge values.
#'
#' Although there are a lot of optional arguments to this function, most are set to a logical default.
#'
#' Data come from named list, which contains a Daily dataframe with the daily flow data,
#' and an INFO dataframe with metadata.
#'
#' @param eList named list with at least Daily and INFO dataframes
#' @param yearStart A numeric value for year in which the graph should start, default is NA, which indicates that the graph should start with first annual value
#' @param yearEnd A numeric value for year in which the graph should end, default is NA, which indicates that the graph should end with last annual value
#' @param printTitle logical variable, if TRUE title is printed, if FALSE title is not printed, default is TRUE
#' @param runoff logical variable, if TRUE the streamflow data are converted to runoff values in mm/day
#' @param qUnit object of qUnit class \code{\link{printqUnitCheatSheet}}, or numeric represented the short code, or character representing the descriptive name.
#' @param window numeric which is the full width, in years, of the time window over which the standard deviation is computed, default = 15
#' @param cex numerical value giving the amount by which plotting symbols should be magnified
#' @param cex.main magnification to be used for main titles relative to the current setting of cex
#' @param cex.axis magnification to be used for axis annotation relative to the current setting of cex
#' @param col color of points on plot, see ?par 'Color Specification'
#' @param lwd number line width
#' @param \dots arbitrary graphical parameters that will be passed to genericEGRETDotPlot function (see ?par for options)
#' @keywords graphics streamflow statistics
#' @export
#' @seealso \code{\link{plotFlowSingle}}
#' @examples
#' eList <- Choptank_eList
#' \dontrun{
#' #Water year:
#' plotFour(eList)
#' # Graphs consisting of Jun-Aug
#' eList <- setPA(eList,paStart=6,paLong=3)
#' plotFour(eList)
#' }
plotFour<-function (eList,
yearStart = NA, yearEnd = NA, printTitle = TRUE, runoff = FALSE,
qUnit = 1, window=15, cex = 0.8, cex.axis = 1.2,cex.main=1.2,
col="black", lwd=1,...) {
localINFO <- getInfo(eList)
localDaily <- getDaily(eList)
localAnnualSeries <- makeAnnualSeries(eList)
par(mfcol = c(2, 2), oma = c(0, 1.7, 6, 1.7))
setYearStart <- if (is.na(yearStart)) {
min(localAnnualSeries[1, , ], na.rm = TRUE)
} else {
yearStart
}
setYearEnd <- if (is.na(yearEnd)) {
max(localAnnualSeries[1, , ], na.rm = TRUE)
} else {
yearEnd
}
plotFlowSingle(eList, istat = 8, yearStart = setYearStart, yearEnd = setYearEnd,
tinyPlot = TRUE, runoff = runoff, qUnit = qUnit, printPA = FALSE,
printIstat = TRUE, printStaName = FALSE,cex=cex, cex.main=1,
cex.axis = cex.axis, col=col,lwd=lwd,...)
plotFlowSingle(eList, istat = 2, yearStart = setYearStart, yearEnd = setYearEnd,
tinyPlot = TRUE, runoff = runoff, qUnit = qUnit, printPA = FALSE,
printIstat = TRUE, printStaName = FALSE,cex=cex, cex.main=1,
cex.axis = cex.axis, col=col,lwd=lwd, ...)
plotFlowSingle(eList, istat = 5, yearStart = setYearStart, yearEnd = setYearEnd,
tinyPlot = TRUE, runoff = runoff, qUnit = qUnit, printPA = FALSE,
printIstat = TRUE, printStaName = FALSE,cex=cex, cex.main=1,
cex.axis = cex.axis, col=col,lwd=lwd, ...)
plotSDLogQ(eList, yearStart = setYearStart, yearEnd = setYearEnd, window = window,
tinyPlot = TRUE, printPA = FALSE,
printStaName = FALSE, cex=cex, cex.main=1,
cex.axis = cex.axis, col=col,lwd=lwd, ...)
textPA <- setSeasonLabelByUser(paStartInput = localINFO$paStart,
paLongInput = localINFO$paLong)
title <- if (printTitle)
paste(localINFO$shortName, "\n", textPA)
mtext(title, outer = TRUE, font = 2,cex=cex.main)
par(mfcol = c(1, 1), oma = c(0, 0, 0, 0))
}
|
25952b3ced7992a05958e1e3d89f818653e381e2
|
5daf36d44ea5e702a34a346be2fee101e5d72e10
|
/man/SkeletonCohortCharacterization.Rd
|
4f19d10963866e2a677f174f66cbcf282da61a10
|
[
"Apache-2.0"
] |
permissive
|
OHDSI/SkeletonCohortCharacterization
|
786e6ca442a862b419868fe78e46a0dda636cb13
|
7d44dbcea272fa63c9108c98cbd7e1f906fc1798
|
refs/heads/master
| 2023-05-11T12:52:25.973489
| 2022-02-15T15:40:57
| 2022-02-15T15:41:21
| 176,298,556
| 2
| 4
|
Apache-2.0
| 2023-05-02T04:43:06
| 2019-03-18T14:02:34
|
Java
|
UTF-8
|
R
| false
| true
| 653
|
rd
|
SkeletonCohortCharacterization.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Main.R
\docType{package}
\name{SkeletonCohortCharacterization}
\alias{SkeletonCohortCharacterization}
\alias{SkeletonCohortCharacterization-package}
\title{This package is the skeleton to build own packages with Cohort Characterization analyses. That package is able to run at any site that has access to an observational database in the Common Data Model.}
\description{
This package is the skeleton to build own packages with Cohort Characterization analyses. That package is able to run at any site that has access to an observational database in the Common Data Model.
}
|
98bffcfa9f8384dc0feda54db6be83e79a1c8961
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/eseis/examples/plot_spectrum.Rd.R
|
cdca781ad8900f6abced825d65f0aa08118db8aa
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 361
|
r
|
plot_spectrum.Rd.R
|
library(eseis)
### Name: plot_spectrum
### Title: Plot a spectrum of a seismic signal
### Aliases: plot_spectrum
### Keywords: eseis
### ** Examples
## load example data set
data(rockfall)
## calculate spectrum
spectrum_rockfall <- signal_spectrum(data = rockfall_eseis)
## plot data set with lower resolution
plot_spectrum(data = spectrum_rockfall)
|
f46e713c1f12e5d8eead222454d3574a23e39422
|
ef7738f156befa48a1ef90fa4c05623d75c892df
|
/tests/testthat/test-extract.R
|
04957eb6f90828e270d076a8095a62798177cebd
|
[] |
no_license
|
leifeld/texreg
|
7cb8d1ed1d447f201406bbfabfab715e7872f0d2
|
d1d4a8b3e915f310d426c7b1be662867db9847dc
|
refs/heads/master
| 2023-07-13T02:36:08.352409
| 2023-06-26T21:32:10
| 2023-06-26T21:32:10
| 75,412,704
| 112
| 52
| null | 2023-03-17T20:39:06
| 2016-12-02T16:35:10
|
R
|
UTF-8
|
R
| false
| false
| 41,293
|
r
|
test-extract.R
|
context("extract methods")
suppressPackageStartupMessages(library("texreg"))
# Arima (stats) ----
test_that("extract Arima objects from the stats package", {
testthat::skip_on_cran()
set.seed(12345)
m <- arima(USAccDeaths,
order = c(0, 1, 1),
seasonal = list(order = c(0, 1, 1)))
tr <- extract(m)
expect_length(tr@coef.names, 2)
expect_length(tr@coef, 2)
expect_length(tr@se, 2)
expect_length(tr@pvalues, 2)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 4)
expect_length(tr@gof.names, 4)
expect_length(tr@gof.decimal, 4)
expect_equivalent(which(tr@gof.decimal), 1:3)
expect_equivalent(which(tr@pvalues < 0.05), 1:2)
expect_equivalent(dim(matrixreg(m)), c(9, 2))
})
# forecast_ARIMA (forecast) ----
test_that("extract forecast_ARIMA objects from the forecast package", {
testthat::skip_on_cran()
skip_if_not_installed("forecast")
require("forecast")
set.seed(12345)
air.model <- Arima(window(AirPassengers, end = 1956 + 11 / 12),
order = c(0, 1, 1),
seasonal = list(order = c(0, 1, 1), period = 12),
lambda = 0)
tr <- extract(air.model)
expect_length(tr@coef.names, 2)
expect_length(tr@coef, 2)
expect_length(tr@se, 2)
expect_length(tr@pvalues, 2)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 5)
expect_length(tr@gof.names, 5)
expect_length(tr@gof.decimal, 5)
expect_equivalent(which(tr@gof.decimal), 1:4)
expect_equivalent(which(tr@pvalues < 0.05), 1:2)
expect_equivalent(dim(matrixreg(air.model)), c(10, 2))
m1 <- arima(USAccDeaths,
order = c(0, 1, 1),
seasonal = list(order = c(0, 1, 1)))
m2 <- Arima(USAccDeaths,
order = c(0, 1, 1),
seasonal = list(order = c(0, 1, 1)))
expect_s3_class(m1, "Arima")
expect_s3_class(m2, "Arima")
expect_s3_class(m2, "forecast_ARIMA")
m <- matrixreg(list(m1, m2))
expect_equivalent(dim(m), c(10, 3))
expect_equivalent(m[2:9, 2], m[2:9, 3])
expect_equivalent(m[10, 1], "AICc")
})
# bergm (Bergm) ----
test_that("extract bergm objects from the Bergm package", {
testthat::skip_on_cran()
suppressWarnings(skip_if_not_installed("Bergm", minimum_version = "5.0.2"))
require("Bergm")
set.seed(12345)
data(florentine)
suppressWarnings(suppressMessages(
p.flo <- bergm(flomarriage ~ edges + kstar(2),
burn.in = 10,
aux.iters = 30,
main.iters = 30,
gamma = 1.2)))
tr <- extract(p.flo)
expect_length(tr@se, 0)
expect_length(tr@pvalues, 0)
expect_length(tr@ci.low, 2)
expect_length(tr@ci.up, 2)
expect_length(tr@gof, 0)
expect_length(tr@coef, 2)
expect_equivalent(dim(matrixreg(p.flo)), c(5, 2))
})
# bife (bife) ----
test_that("extract bife objects from the bife package", {
testthat::skip_on_cran()
skip_if_not_installed("bife", minimum_version = "0.7")
require("bife")
set.seed(12345)
mod <- bife(LFP ~ I(AGE^2) + log(INCH) + KID1 + KID2 + KID3 + factor(TIME) | ID, psid)
tr <- extract(mod)
expect_length(tr@coef.names, 13)
expect_length(tr@coef, 13)
expect_length(tr@se, 13)
expect_length(tr@pvalues, 13)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 3)
expect_length(tr@gof.names, 3)
expect_length(tr@gof.decimal, 3)
expect_equivalent(which(tr@gof.decimal), 1:2)
expect_equivalent(which(tr@pvalues < 0.05), c(1:4, 8:13))
expect_equivalent(dim(matrixreg(mod)), c(30, 2))
})
# brmsfit (brms) ----
test_that("extract brmsfit objects from the brms package", {
testthat::skip_on_cran()
skip_if_not_installed("brms", minimum_version = "2.8.8")
skip_if_not_installed("coda", minimum_version = "0.19.2")
require("brms")
require("coda")
# example 2 from brm help page; see ?brm
sink(nullfile())
suppressMessages(
fit2 <- brm(rating ~ period + carry + cs(treat),
data = inhaler, family = sratio("logit"),
prior = set_prior("normal(0,5)"), chains = 1))
sink()
suppressWarnings(tr <- extract(fit2))
expect_length(tr@gof.names, 4)
expect_length(tr@coef, 8)
expect_length(tr@se, 8)
expect_length(tr@pvalues, 0)
expect_length(tr@ci.low, 8)
expect_length(tr@ci.up, 8)
expect_equivalent(which(tr@gof.decimal), c(1, 3, 4))
suppressWarnings(expect_equivalent(dim(matrixreg(fit2)), c(21, 2)))
# example 1 from brm help page; see ?brm
bprior1 <- prior(student_t(5, 0, 10), class = b) + prior(cauchy(0, 2), class = sd)
sink(nullfile())
suppressMessages(
fit1 <- brm(count ~ zAge + zBase * Trt + (1|patient),
data = epilepsy,
family = poisson(),
prior = bprior1))
sink()
expect_warning(suppressMessages(tr <- extract(fit1, use.HDI = TRUE, reloo = TRUE)))
expect_length(tr@gof.names, 5)
expect_length(tr@coef, 5)
expect_length(tr@se, 5)
expect_length(tr@pvalues, 0)
expect_length(tr@ci.low, 5)
expect_length(tr@ci.up, 5)
expect_equivalent(which(tr@gof.decimal), c(1:2, 4:5))
expect_equivalent(suppressWarnings(dim(matrixreg(fit1))), c(16, 2))
})
# btergm (btergm) ----
test_that("extract btergm objects from the btergm package", {
testthat::skip_on_cran()
skip_if_not_installed("btergm", minimum_version = "1.10.10")
set.seed(5)
networks <- list()
for (i in 1:10) { # create 10 random networks with 10 actors
mat <- matrix(rbinom(100, 1, .25), nrow = 10, ncol = 10)
diag(mat) <- 0 # loops are excluded
networks[[i]] <- mat # add network to the list
}
covariates <- list()
for (i in 1:10) { # create 10 matrices as covariate
mat <- matrix(rnorm(100), nrow = 10, ncol = 10)
covariates[[i]] <- mat # add matrix to the list
}
suppressWarnings(fit <- btergm::btergm(networks ~ edges + istar(2) + edgecov(covariates), R = 100, verbose = FALSE))
tr <- extract(fit)
expect_length(tr@se, 0)
expect_length(tr@pvalues, 0)
expect_length(tr@ci.low, 3)
expect_length(tr@ci.up, 3)
expect_length(tr@gof, 1)
expect_length(tr@coef, 3)
expect_equivalent(dim(matrixreg(fit)), c(8, 2))
expect_true(all(tr@ci.low < tr@coef))
expect_true(all(tr@coef < tr@ci.up))
})
# clm (ordinal) ----
test_that("extract clm objects from the ordinal package", {
testthat::skip_on_cran()
skip_if_not_installed("ordinal", minimum_version = "2019.12.10")
set.seed(12345)
fit <- ordinal::clm(Species ~ Sepal.Length, data = iris)
tr <- extract(fit)
expect_length(tr@coef.names, 3)
expect_length(tr@coef, 3)
expect_length(tr@se, 3)
expect_length(tr@pvalues, 3)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 4)
expect_length(tr@gof.names, 4)
expect_length(tr@gof.decimal, 4)
expect_equivalent(which(tr@gof.decimal), 1:3)
expect_equivalent(which(tr@pvalues < 0.05), 1:3)
expect_equivalent(dim(matrixreg(fit)), c(11, 2))
})
# dynlm (dynlm) ----
test_that("extract dynlm objects from the dynlm package", {
testthat::skip_on_cran()
skip_if_not_installed("dynlm")
skip_if_not_installed("datasets")
require("dynlm")
set.seed(12345)
data("UKDriverDeaths", package = "datasets")
uk <- log10(UKDriverDeaths)
dfm <- dynlm(uk ~ L(uk, 1) + L(uk, 12))
tr <- extract(dfm, include.rmse = TRUE)
expect_length(tr@coef.names, 3)
expect_length(tr@coef, 3)
expect_length(tr@se, 3)
expect_length(tr@pvalues, 3)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 4)
expect_length(tr@gof.names, 4)
expect_length(tr@gof.decimal, 4)
expect_equivalent(which(tr@gof.decimal), c(1, 2, 4))
expect_equivalent(which(tr@pvalues < 0.05), 2:3)
expect_equivalent(dim(matrixreg(dfm)), c(10, 2))
})
# ergm (ergm) ----
test_that("extract ergm objects from the ergm package", {
testthat::skip_on_cran()
skip_if_not_installed("ergm", minimum_version = "4.1.2")
require("ergm")
set.seed(12345)
data(florentine)
suppressMessages(gest <- ergm(flomarriage ~ edges + absdiff("wealth")))
tr1 <- extract(gest)
expect_length(tr1@coef.names, 2)
expect_length(tr1@coef, 2)
expect_length(tr1@se, 2)
expect_length(tr1@pvalues, 2)
expect_length(tr1@ci.low, 0)
expect_length(tr1@ci.up, 0)
expect_length(tr1@gof, 3)
expect_length(tr1@gof.names, 3)
expect_length(tr1@gof.decimal, 3)
expect_equivalent(which(tr1@gof.decimal), 1:3)
expect_equivalent(dim(matrixreg(gest)), c(8, 2))
data(molecule)
molecule %v% "atomic type" <- c(1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3,
3, 3, 3, 3, 3)
suppressMessages(gest <- ergm(molecule ~ edges + kstar(2) + triangle +
nodematch("atomic type")))
tr2 <- extract(gest)
expect_length(tr2@coef.names, 4)
expect_length(tr2@coef, 4)
expect_length(tr2@se, 4)
expect_length(tr2@pvalues, 4)
expect_length(tr2@ci.low, 0)
expect_length(tr2@ci.up, 0)
expect_length(tr2@gof, 3)
expect_length(tr2@gof.names, 3)
expect_length(tr2@gof.decimal, 3)
expect_equivalent(which(tr2@gof.decimal), 1:3)
expect_equivalent(dim(matrixreg(gest)), c(12, 2))
})
# feglm (alpaca) ----
test_that("extract feglm objects from the alpaca package", {
testthat::skip_on_cran()
skip_if_not_installed("alpaca", minimum_version = "0.3.2")
require("alpaca")
set.seed(12345)
data <- simGLM(1000L, 20L, 1805L, model = "logit")
mod <- feglm(y ~ x1 + x2 + x3 | i + t, data)
tr <- extract(mod)
expect_length(tr@coef.names, 3)
expect_length(tr@coef, 3)
expect_length(tr@se, 3)
expect_length(tr@pvalues, 3)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 4)
expect_length(tr@gof.names, 4)
expect_length(tr@gof.decimal, 4)
expect_equivalent(which(tr@gof.decimal), 1)
expect_equivalent(which(tr@pvalues < 0.05), 1:3)
expect_equivalent(dim(matrixreg(mod)), c(11, 2))
})
# feis (feisr) ----
test_that("extract feis objects from the feisr package", {
testthat::skip_on_cran()
skip_if_not_installed("feisr", minimum_version = "1.0.1")
require("feisr")
set.seed(12345)
data("mwp", package = "feisr")
feis1.mod <- feis(lnw ~ marry | exp, data = mwp, id = "id")
feis2.mod <- feis(lnw ~ marry + enrol + as.factor(yeargr) | exp,
data = mwp,
id = "id")
tr <- extract(feis1.mod)
expect_equivalent(tr@coef, 0.056, tolerance = 1e-3)
expect_equivalent(tr@se, 0.0234, tolerance = 1e-3)
expect_equivalent(tr@pvalues, 0.0165, tolerance = 1e-3)
expect_equivalent(tr@gof, c(0.002, 0.002, 3100, 268, 0.312), tolerance = 1e-3)
expect_length(tr@gof.names, 5)
tr2 <- extract(feis2.mod)
expect_length(tr2@coef, 6)
expect_length(which(tr2@pvalues < 0.05), 2)
expect_length(which(tr2@gof.decimal), 3)
})
# felm (lfe) ----
test_that("extract felm objects from the lfe package", {
testthat::skip_on_cran()
skip_if_not_installed("lfe", minimum_version = "2.8.5")
require("lfe")
set.seed(12345)
x <- rnorm(1000)
x2 <- rnorm(length(x))
id <- factor(sample(20, length(x), replace = TRUE))
firm <- factor(sample(13, length(x),replace = TRUE))
id.eff <- rnorm(nlevels(id))
firm.eff <- rnorm(nlevels(firm))
u <- rnorm(length(x))
y <- x + 0.5 * x2 + id.eff[id] + firm.eff[firm] + u
est <- felm(y ~ x + x2 | id + firm)
tr <- extract(est)
expect_equivalent(tr@coef, c(1.0188, 0.5182), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.032, 0.032), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.00, 0.00), tolerance = 1e-2)
expect_equivalent(tr@gof, c(1000, 0.7985, 0.575, 0.792, 0.560, 20, 13), tolerance = 1e-2)
expect_length(tr@gof.names, 7)
expect_length(tr@coef, 2)
expect_equivalent(which(tr@pvalues < 0.05), 1:2)
expect_equivalent(which(tr@gof.decimal), 2:5)
# check exclusion of projected model statistics
tr <- extract(est, include.proj.stats = FALSE)
expect_length(tr@gof.names, 5)
expect_false(any(grepl('proj model', tr@gof.names, fixed = TRUE)))
# without fixed effects
OLS1 <- felm(Sepal.Length ~ Sepal.Width |0|0|0, data = iris)
tr1 <- extract(OLS1)
expect_length(tr1@gof, 5)
})
# fixest (fixest) ----
test_that("extract fixest objects created with the fixest package", {
testthat::skip_on_cran()
skip_if_not_installed("fixest", minimum_version = "0.10.5")
require("fixest")
# test ordinary least squares with multiple fixed effects
set.seed(12345)
x <- rnorm(1000)
data <- data.frame(
x = x,
x2 = rnorm(length(x)),
id = factor(sample(20, length(x), replace = TRUE)),
firm = factor(sample(13, length(x),replace = TRUE))
)
id.eff <- rnorm(nlevels(data$id))
firm.eff <- rnorm(nlevels(data$firm))
u <- rnorm(length(x))
data$y <- with(data, x + 0.5 * x2 + id.eff[id] + firm.eff[firm] + u)
est <- feols(y ~ x + x2 | id + firm, data = data)
tr <- extract(est)
expect_equivalent(tr@coef, c(1.0188, 0.5182), tolerance = 1e-2)
# NOTE: standard errors differ from default produced by lfe (tested above)
# see https://cran.r-project.org/web/packages/fixest/vignettes/standard_errors.html
expect_equivalent(tr@se, c(0.021, 0.032), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.00, 0.00), tolerance = 1e-2)
expect_equivalent(tr@gof, c(1000, 20, 13, 0.7985, 0.575, 0.792, 0.57), tolerance = 1e-2)
expect_lte(length(tr@gof.names), 7)
expect_gte(length(tr@gof.names), 5)
expect_length(tr@coef, 2)
expect_equivalent(which(tr@pvalues < 0.05), 1:2)
# test generalized linear model
data$y <- rpois(length(data$x), exp(data$x + data$x2 + id.eff[data$id]))
est <- fepois(y ~ x + x2 | id, data = data)
tr <- extract(est)
expect_equivalent(tr@coef, c(1.00, 1.00), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.01, 0.02), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.00, 0.00), tolerance = 1e-2)
expect_equivalent(tr@gof, c(1000, 20, 955.4, -1479.6, 0.83), tolerance = 1e-2)
expect_length(tr@gof.names, 5)
expect_length(tr@coef, 2)
expect_equivalent(which(!tr@gof.decimal), 1:2)
})
# gamlssZadj (gamlss.inf) ----
test_that("extract gamlssZadj objects from the gamlss.inf package", {
testthat::skip_on_cran()
skip_if_not_installed("gamlss.inf", minimum_version = "1.0.1")
require("gamlss.inf")
set.seed(12345)
sink(nullfile())
y0 <- rZAGA(1000, mu = .3, sigma = .4, nu = .15)
g0 <- gamlss(y0 ~ 1, family = ZAGA)
t0 <- gamlssZadj(y = y0, mu.formula = ~1, family = GA, trace = TRUE)
sink()
tr <- extract(t0)
expect_length(tr@gof.names, 2)
expect_length(tr@coef, 3)
expect_length(tr@se, 3)
expect_length(tr@pvalues, 3)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_equivalent(which(tr@gof.decimal), 2)
expect_equivalent(tr@coef.names, c("$\\mu$ (Intercept)",
"$\\sigma$ (Intercept)",
"$\\nu$ (Intercept)"))
})
# glm.cluster (miceadds) ----
test_that("extract glm.cluster objects from the miceadds package", {
testthat::skip_on_cran()
skip_if_not_installed("miceadds", minimum_version = "3.8.9")
require("miceadds")
data(data.ma01)
dat <- data.ma01
dat$highmath <- 1 * (dat$math > 600)
mod2 <- miceadds::glm.cluster(data = dat,
formula = highmath ~ hisei + female,
cluster = "idschool",
family = "binomial")
tr <- extract(mod2)
expect_equivalent(tr@coef, c(-2.76, 0.03, -0.15), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.25, 0.00, 0.10), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.00, 0.00, 0.13), tolerance = 1e-2)
expect_equivalent(tr@gof, c(3108.095, 3126.432, -1551.047, 3102.095, 3336.000), tolerance = 1e-2)
expect_length(tr@gof.names, 5)
expect_length(tr@coef, 3)
expect_equivalent(which(tr@pvalues < 0.05), 1:2)
expect_equivalent(which(tr@gof.decimal), 1:4)
})
# glmerMod (lme4) ----
test_that("extract glmerMod objects from the lme4 package", {
testthat::skip_on_cran()
skip_if_not_installed("lme4")
require("lme4")
set.seed(12345)
gm1 <- glmer(cbind(incidence, size - incidence) ~ period + (1 | herd),
data = cbpp,
family = binomial)
expect_equivalent(class(gm1)[1], "glmerMod")
tr <- extract(gm1, include.dic = TRUE, include.deviance = TRUE)
expect_equivalent(tr@coef, c(-1.40, -0.99, -1.13, -1.58), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.23, 0.30, 0.32, 0.42), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0, 0, 0, 0), tolerance = 1e-2)
expect_length(tr@gof.names, 8)
expect_equivalent(which(tr@gof.decimal), c(1:5, 8))
expect_length(which(grepl("Var", tr@gof.names)), 1)
expect_length(which(grepl("Cov", tr@gof.names)), 0)
tr_profile <- extract(gm1, method = "profile", nsim = 5)
tr_boot <- suppressWarnings(extract(gm1, method = "boot", nsim = 5))
tr_wald <- extract(gm1, method = "Wald")
expect_length(tr_profile@se, 0)
expect_length(tr_profile@ci.low, 4)
expect_length(tr_profile@ci.up, 4)
expect_length(tr_boot@se, 0)
expect_length(tr_boot@ci.low, 4)
expect_length(tr_boot@ci.up, 4)
expect_length(tr_wald@se, 0)
expect_length(tr_wald@ci.low, 4)
expect_length(tr_wald@ci.up, 4)
})
# glmmTMB (glmmTMB) ----
test_that("extract glmmTMB objects from the glmmTMB package", {
testthat::skip_on_cran()
skip_if_not_installed("glmmTMB", minimum_version = "1.0.1")
require("glmmTMB")
set.seed(12345)
m2 <- glmmTMB(count ~ spp + mined + (1|site),
zi = ~ spp + mined,
family = nbinom2, data = Salamanders)
tr <- extract(m2)
expect_length(tr@gof.names, 5)
expect_length(tr@coef, 16)
expect_length(tr@se, 16)
expect_length(tr@pvalues, 16)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_equivalent(which(tr@gof.decimal), c(1, 2, 5))
tr <- extract(m2, beside = TRUE)
expect_length(tr[[1]]@gof.names, 5)
expect_length(tr[[1]]@coef, 8)
expect_length(tr[[2]]@coef, 8)
expect_length(tr[[1]]@se, 8)
expect_length(tr[[2]]@se, 8)
expect_length(tr[[1]]@pvalues, 8)
expect_length(tr[[2]]@pvalues, 8)
expect_length(tr, 2)
expect_equivalent(which(tr[[2]]@gof.decimal), c(1, 2, 5))
})
# ivreg (AER) ----
test_that("extract ivreg objects from the AER package", {
testthat::skip_on_cran()
skip_if_not_installed("AER")
require("AER")
set.seed(12345)
data("CigarettesSW", package = "AER")
CigarettesSW$rprice <- with(CigarettesSW, price / cpi)
CigarettesSW$rincome <- with(CigarettesSW, income/population / cpi)
CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi)
fm <- ivreg(log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax/cpi),
data = CigarettesSW,
subset = year == "1995")
tr1 <- extract(fm, vcov = sandwich, df = Inf, diagnostics = TRUE, include.rmse = TRUE)
fm2 <- ivreg(log(packs) ~ log(rprice) | tdiff, data = CigarettesSW,
subset = year == "1995")
tr2 <- extract(fm2)
expect_equivalent(tr1@coef, c(9.89, -1.28, 0.28), tolerance = 1e-2)
expect_equivalent(tr1@se, c(0.93, 0.24, 0.25), tolerance = 1e-2)
expect_equivalent(tr1@pvalues, c(0.00, 0.00, 0.25), tolerance = 1e-2)
expect_equivalent(tr1@gof, c(0.43, 0.40, 48, 0.19), tolerance = 1e-2)
expect_length(tr1@gof.names, 4)
expect_length(tr2@coef, 2)
expect_length(which(tr2@pvalues < 0.05), 2)
expect_equivalent(which(tr2@gof.decimal), 1:2)
})
# lm (stats) ----
test_that("extract lm objects from the stats package", {
set.seed(12345)
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1)
tr <- extract(lm.D9)
expect_equivalent(tr@coef, c(5.032, -0.371), tolerance = 1e-3)
expect_equivalent(tr@se, c(0.22, 0.31), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.00, 0.25), tolerance = 1e-2)
expect_equivalent(tr@gof, c(0.07, 0.02, 20), tolerance = 1e-2)
expect_length(tr@gof.names, 3)
tr2 <- extract(lm.D90, include.rmse = TRUE)
expect_length(tr2@coef, 2)
expect_length(which(tr2@pvalues < 0.05), 2)
expect_length(which(tr2@gof.decimal), 3)
})
# lm.cluster (miceadds) ----
test_that("extract lm.cluster objects from the miceadds package", {
testthat::skip_on_cran()
skip_if_not_installed("miceadds", minimum_version = "3.8.9")
require("miceadds")
data(data.ma01)
dat <- data.ma01
mod1 <- miceadds::lm.cluster(data = dat,
formula = read ~ hisei + female,
cluster = "idschool")
tr <- extract(mod1)
expect_equivalent(tr@coef, c(418.80, 1.54, 35.70), tolerance = 1e-2)
expect_equivalent(tr@se, c(6.45, 0.11, 3.81), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.00, 0.00, 0.00), tolerance = 1e-2)
expect_equivalent(tr@gof, c(0.15, 0.15, 3180), tolerance = 1e-2)
expect_length(tr@gof.names, 3)
expect_length(tr@coef, 3)
expect_equivalent(which(tr@pvalues < 0.05), 1:3)
expect_equivalent(which(tr@gof.decimal), 1:2)
})
# lmerMod (lme4) ----
test_that("extract lmerMod objects from the lme4 package", {
testthat::skip_on_cran()
skip_if_not_installed("lme4")
require("lme4")
set.seed(12345)
fm1 <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy)
fm1_ML <- update(fm1, REML = FALSE)
fm2 <- lmer(Reaction ~ Days + (Days || Subject), sleepstudy)
tr1 <- extract(fm1, include.dic = TRUE, include.deviance = TRUE)
tr1_ML <- extract(fm1_ML, include.dic = TRUE, include.deviance = TRUE)
tr2_profile <- extract(fm2, method = "profile", nsim = 5)
tr2_boot <- suppressWarnings(extract(fm2, method = "boot", nsim = 5))
tr2_wald <- extract(fm2, method = "Wald")
expect_equivalent(class(fm1)[1], "lmerMod")
expect_equivalent(tr1@coef, c(251.41, 10.47), tolerance = 1e-2)
expect_equivalent(tr1@coef, tr1_ML@coef, tolerance = 1e-2)
expect_equivalent(tr1@se, c(6.82, 1.55), tolerance = 1e-2)
expect_equivalent(tr1@pvalues, c(0, 0), tolerance = 1e-2)
expect_equivalent(tr1@gof, c(1755.63, 1774.79, 1760.25, 1751.94, -871.81, 180, 18, 611.90, 35.08, 9.61, 654.94), tolerance = 1e-2)
expect_length(tr1@gof.names, 11)
expect_equivalent(which(tr1@gof.decimal), c(1:5, 8:11))
expect_equivalent(tr1@coef, tr1_ML@coef)
expect_length(tr1_ML@gof, 11)
expect_length(tr2_profile@gof, 8)
expect_equivalent(tr1@coef, tr2_profile@coef, tolerance = 1e-2)
expect_equivalent(tr1@coef, tr2_boot@coef, tolerance = 1e-2)
expect_equivalent(tr1@coef, tr2_wald@coef, tolerance = 1e-2)
expect_length(which(grepl("Var", tr1@gof.names)), 3)
expect_length(which(grepl("Var", tr2_wald@gof.names)), 3)
expect_length(which(grepl("Cov", tr1@gof.names)), 1)
expect_length(which(grepl("Cov", tr2_wald@gof.names)), 0)
})
# maxLik (maxLik) ----
test_that("extract maxLik objects from the maxLik package", {
testthat::skip_on_cran()
testthat::skip_if_not_installed("maxLik", minimum_version = "1.4.8")
require("maxLik")
set.seed(12345)
# example 1 from help page
t <- rexp(100, 2)
loglik <- function(theta) log(theta) - theta * t
gradlik <- function(theta) 1 / theta - t
hesslik <- function(theta) -100 / theta^2
sink(nullfile())
a <- maxLik(loglik, start = 1, control = list(printLevel = 2))
sink()
tr1 <- extract(a)
expect_length(tr1@coef.names, 1)
expect_length(tr1@coef, 1)
expect_length(tr1@se, 1)
expect_length(tr1@pvalues, 1)
expect_length(tr1@ci.low, 0)
expect_length(tr1@ci.up, 0)
expect_true(!any(is.na(tr1@coef)))
expect_length(tr1@gof, 2)
expect_length(tr1@gof.names, 2)
expect_length(tr1@gof.decimal, 2)
expect_equivalent(which(tr1@gof.decimal), 1:2)
# example 2 from help page
b <- maxLik(loglik, gradlik, hesslik, start = 1,
control = list(tol = -1, reltol = 1e-12, gradtol = 1e-12))
tr2 <- extract(b)
expect_length(tr2@coef.names, 1)
expect_length(tr2@coef, 1)
expect_length(tr2@se, 1)
expect_length(tr2@pvalues, 1)
expect_length(tr2@ci.low, 0)
expect_length(tr2@ci.up, 0)
expect_true(!any(is.na(tr2@coef)))
expect_length(tr2@gof, 2)
expect_length(tr2@gof.names, 2)
expect_length(tr2@gof.decimal, 2)
expect_equivalent(which(tr2@gof.decimal), 1:2)
# example 3 from help page
loglik <- function(param) {
mu <- param[1]
sigma <- param[2]
ll <- -0.5 * N * log(2 * pi) - N * log(sigma) - sum(0.5 * (x - mu)^2 / sigma^2)
ll
}
x <- rnorm(100, 1, 2)
N <- length(x)
res <- maxLik(loglik, start = c(0, 1))
tr3 <- extract(res)
expect_length(tr3@coef.names, 2)
expect_length(tr3@coef, 2)
expect_length(tr3@se, 2)
expect_length(tr3@pvalues, 2)
expect_length(tr3@ci.low, 0)
expect_length(tr3@ci.up, 0)
expect_true(!any(is.na(tr3@coef)))
expect_length(tr3@gof, 2)
expect_length(tr3@gof.names, 2)
expect_length(tr3@gof.decimal, 2)
expect_equivalent(which(tr3@gof.decimal), 1:2)
# example 4 from help page
resFix <- maxLik(loglik, start = c(mu = 0, sigma = 1), fixed = "sigma")
tr4 <- extract(resFix)
expect_length(tr3@coef.names, 2)
expect_length(tr3@coef, 2)
expect_length(tr3@se, 2)
expect_length(tr3@pvalues, 2)
expect_length(tr3@ci.low, 0)
expect_length(tr3@ci.up, 0)
expect_true(!any(is.na(tr3@coef)))
expect_length(tr3@gof, 2)
expect_length(tr3@gof.names, 2)
expect_length(tr3@gof.decimal, 2)
expect_equivalent(which(tr3@gof.decimal), 1:2)
})
# mlogit (mlogit) ----
test_that("extract mlogit objects from the mlogit package", {
testthat::skip_on_cran()
testthat::skip_if_not_installed("mlogit", minimum_version = "1.1.0")
require("mlogit")
set.seed(12345)
data("Fishing", package = "mlogit")
Fish <- dfidx(Fishing, varying = 2:9, shape = "wide", choice = "mode")
m <- mlogit(mode ~ price + catch | income, data = Fish)
tr1 <- extract(m)
expect_equivalent(sum(abs(tr1@coef)), 3.382753, tolerance = 1e-2)
expect_equivalent(sum(tr1@se), 0.7789933, tolerance = 1e-2)
expect_equivalent(sum(tr1@pvalues), 0.6136796, tolerance = 1e-2)
expect_equivalent(sum(tr1@gof), 2417.138, tolerance = 1e-2)
expect_length(tr1@coef, 8)
expect_length(tr1@gof, 4)
expect_equivalent(which(tr1@gof.decimal), 1:2)
expect_equivalent(tr1@gof[4], 4)
expect_equal(dim(matrixreg(tr1)), c(21, 2))
expect_warning(extract(m, beside = TRUE), "choice-specific covariates")
})
# mnlogit (mnlogit) ----
test_that("extract mnlogit models from the mnlogit package", {
testthat::skip_on_cran()
testthat::skip_if_not_installed("mnlogit", minimum_version = "1.2.6")
require("mnlogit")
set.seed(12345)
data(Fish, package = "mnlogit")
fit <- mnlogit(mode ~ price | income | catch, Fish, ncores = 1)
tr <- extract(fit)
expect_equivalent(sum(abs(tr@coef)), 13.33618, tolerance = 1e-2)
expect_equivalent(sum(tr@se), 3.059299, tolerance = 1e-2)
expect_equivalent(sum(tr@pvalues), 0.4701358, tolerance = 1e-2)
expect_equivalent(sum(tr@gof), 2407.143, tolerance = 1e-2)
expect_length(tr@coef, 11)
expect_length(tr@gof, 4)
expect_equivalent(which(tr@gof.decimal), 1:2)
expect_equivalent(tr@gof[4], 4)
expect_equal(dim(matrixreg(tr)), c(27, 2))
expect_warning(extract(fit, beside = TRUE), "choice-specific covariates")
})
# multinom (nnet) ----
test_that("extract multinom objects from the nnet package", {
testthat::skip_on_cran()
testthat::skip_if_not_installed("nnet", minimum_version = "7.3.12")
require("nnet")
# example from https://thomasleeper.com/Rcourse/Tutorials/nominalglm.html
set.seed(100)
y <- sort(sample(1:3, 600, TRUE))
x <- numeric(length = 600)
x[1:200] <- -1 * x[1:200] + rnorm(200, 4, 2)
x[201:400] <- 1 * x[201:400] + rnorm(200)
x[401:600] <- 2 * x[401:600] + rnorm(200, 2, 2)
sink(nullfile())
m1 <- multinom(y ~ x)
sink()
tr2 <- extract(m1, beside = FALSE)
tr3 <- extract(m1, beside = TRUE)
expect_equivalent(sum(abs(tr2@coef)), 6.845567, tolerance = 1e-2)
expect_equivalent(sum(tr2@se), 0.6671602, tolerance = 1e-2)
expect_equivalent(sum(tr2@pvalues), 1.677308e-16, tolerance = 1e-2)
expect_equivalent(sum(tr2@gof), 2852.451, tolerance = 1e-2)
expect_length(tr2@coef, 4)
expect_length(tr2@gof, 6)
expect_equivalent(which(tr2@gof.decimal), 1:4)
expect_equivalent(tr2@gof[6], 3)
expect_equal(dim(matrixreg(tr2)), c(15, 2))
expect_length(tr3, 2)
expect_length(tr3[[1]]@coef, 2)
expect_length(tr3[[2]]@coef, 2)
})
# nlmerMod (lme4) ----
test_that("extract nlmerMod objects from the lme4 package", {
testthat::skip_on_cran()
skip_if_not_installed("lme4")
require("lme4")
set.seed(12345)
startvec <- c(Asym = 200, xmid = 725, scal = 350)
nm1 <- nlmer(circumference ~ SSlogis(age, Asym, xmid, scal) ~ Asym|Tree,
Orange,
start = startvec)
expect_equivalent(class(nm1)[1], "nlmerMod")
expect_warning(extract(nm1, include.dic = TRUE, include.deviance = TRUE),
"falling back to var-cov estimated from RX")
tr <- suppressWarnings(extract(nm1, include.dic = TRUE, include.deviance = TRUE))
expect_equivalent(tr@coef, c(192.05, 727.90, 348.07), tolerance = 1e-2)
expect_equivalent(tr@se, c(15.58, 34.44, 26.31), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0, 0, 0), tolerance = 1e-2)
expect_length(tr@gof.names, 9)
expect_equivalent(which(tr@gof.decimal), c(1:5, 8, 9))
expect_length(which(grepl("Var", tr@gof.names)), 2)
expect_length(which(grepl("Cov", tr@gof.names)), 0)
tr_wald <- suppressWarnings(extract(nm1, method = "Wald"))
expect_length(tr_wald@se, 0)
expect_length(tr_wald@ci.low, 3)
expect_length(tr_wald@ci.up, 3)
})
# pcce (plm) ----
test_that("extract pcce objects from the plm package", {
testthat::skip_on_cran()
skip_if_not_installed("plm", minimum_version = "2.4.1")
require("plm")
set.seed(12345)
data("Produc", package = "plm")
ccepmod <- pcce(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model="p")
tr <- extract(ccepmod)
expect_length(tr@coef.names, 4)
expect_length(tr@coef, 4)
expect_length(tr@se, 4)
expect_length(tr@pvalues, 4)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 4)
expect_length(tr@gof.names, 4)
expect_length(tr@gof.decimal, 4)
expect_equivalent(which(tr@gof.decimal), 1:3)
ccemgmod <- pcce(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model="mg")
tr2 <- extract(ccemgmod)
expect_length(tr2@coef.names, 4)
expect_length(tr2@coef, 4)
expect_length(tr2@se, 4)
expect_length(tr2@pvalues, 4)
expect_length(tr2@ci.low, 0)
expect_length(tr2@ci.up, 0)
expect_length(tr2@gof, 4)
expect_length(tr2@gof.names, 4)
expect_length(tr2@gof.decimal, 4)
expect_equivalent(which(tr2@gof.decimal), 1:3)
})
# Sarlm (spatialreg) ----
test_that("extract Sarlm objects from the spatialreg package", {
testthat::skip_on_cran()
skip_if_not_installed("spatialreg", minimum_version = "1.2.1")
require("spatialreg")
set.seed(12345)
# first example from ?lagsarlm
data(oldcol, package = "spdep")
listw <- spdep::nb2listw(COL.nb, style = "W")
ev <- spatialreg::eigenw(listw)
W <- as(listw, "CsparseMatrix")
trMatc <- spatialreg::trW(W, type = "mult")
sink(nullfile())
COL.lag.eig <- spatialreg::lagsarlm(CRIME ~ INC + HOVAL,
data = COL.OLD,
listw = listw,
method = "eigen",
quiet = FALSE,
control = list(pre_eig = ev,
OrdVsign = 1))
sink()
tr <- extract(COL.lag.eig)
expect_length(tr@coef.names, 4)
expect_length(tr@coef, 4)
expect_length(tr@se, 4)
expect_length(tr@pvalues, 4)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 7)
expect_length(tr@gof.names, 7)
expect_length(tr@gof.decimal, 7)
expect_equivalent(which(tr@gof.decimal), 3:7)
# example from ?predict.Sarlm
lw <- spdep::nb2listw(COL.nb)
COL.lag.eig2 <- COL.mix.eig <- lagsarlm(CRIME ~ INC + HOVAL,
data = COL.OLD,
lw,
type = "mixed")
tr2 <- extract(COL.lag.eig2)
expect_length(tr2@coef.names, 6)
expect_length(tr2@coef, 6)
expect_length(tr2@se, 6)
expect_length(tr2@pvalues, 6)
expect_length(tr2@ci.low, 0)
expect_length(tr2@ci.up, 0)
expect_length(tr2@gof, 7)
expect_length(tr2@gof.names, 7)
expect_length(tr2@gof.decimal, 7)
expect_equivalent(which(tr2@gof.decimal), 3:7)
})
# speedglm (speedglm) ----
test_that("extract speedglm objects from the speedglm package", {
testthat::skip_on_cran()
skip_if_not_installed("speedglm", minimum_version = "0.3.2")
require("speedglm")
set.seed(12345)
n <- 50000
k <- 80
y <- rgamma(n, 1.5, 1)
x <-round( matrix(rnorm(n * k), n, k), digits = 3)
colnames(x) <-paste("s", 1:k, sep = "")
da <- data.frame(y, x)
fo <- as.formula(paste("y ~", paste(paste("s", 1:k, sep = ""), collapse = " + ")))
m3 <- speedglm(fo, data = da, family = Gamma(log))
tr <- extract(m3)
expect_length(tr@gof.names, 5)
expect_length(tr@coef, 81)
expect_equivalent(tr@gof.names, c("AIC", "BIC", "Log Likelihood", "Deviance", "Num. obs."))
expect_equivalent(which(tr@pvalues < 0.05), c(1, 4, 5, 17, 20, 21, 43, 65, 68, 73, 80))
expect_equivalent(which(tr@gof.decimal), 1:4)
})
# speedlm (speedglm) ----
test_that("extract speedlm objects from the speedglm package", {
testthat::skip_on_cran()
skip_if_not_installed("speedglm", minimum_version = "0.3.2")
require("speedglm")
set.seed(12345)
n <- 1000
k <- 3
y <- rnorm(n)
x <- round(matrix(rnorm(n * k), n, k), digits = 3)
colnames(x) <- c("s1", "s2", "s3")
da <- data.frame(y, x)
do1 <- da[1:300, ]
do2 <- da[301:700, ]
do3 <- da[701:1000, ]
m1 <- speedlm(y ~ s1 + s2 + s3, data = do1)
m1 <- update(m1, data = do2)
m1 <- update(m1, data = do3)
tr <- extract(m1, include.fstatistic = TRUE)
expect_equivalent(tr@coef, c(0.05, 0.04, -0.01, -0.03), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.03, 0.03, 0.03, 0.03), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0.13, 0.22, 0.69, 0.39), tolerance = 1e-2)
expect_equivalent(tr@gof, c(0, 0, 1000, 0.80), tolerance = 1e-2)
expect_length(tr@gof.names, 4)
expect_length(tr@coef, 4)
expect_equivalent(which(tr@pvalues < 0.05), integer())
expect_equivalent(which(tr@gof.decimal), c(1, 2, 4))
})
# truncreg (truncreg) ----
test_that("extract truncreg objects from the truncreg package", {
testthat::skip_on_cran()
skip_if_not_installed("truncreg", minimum_version = "0.2.5")
require("truncreg")
set.seed(12345)
x <- rnorm(100, mean = 1)
y <- rnorm(100, mean = 1.3)
dta <- data.frame(x, y)
dta <- dta[y < quantile(y, 0.8), ]
model <- truncreg(y ~ x, data = dta, point = max(dta$y), direction = "right")
tr <- extract(model)
expect_equivalent(tr@coef, c(1.24, 0.05, 0.96), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.25, 0.12, 0.14), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0, 0.67, 0), tolerance = 1e-2)
expect_equivalent(tr@gof, c(80, -81.69, 169.38, 176.53), tolerance = 1e-2)
expect_length(tr@gof.names, 4)
expect_length(tr@coef, 3)
expect_equivalent(which(tr@pvalues < 0.05), c(1, 3))
expect_equivalent(which(tr@gof.decimal), 2:4)
})
# weibreg (eha) ----
test_that("extract weibreg objects from the eha package", {
testthat::skip_on_cran()
skip_if_not_installed("eha", minimum_version = "2.9.0")
require("eha")
set.seed(12345)
# stratified model example from weibreg help page
dat <- data.frame(time = c(4, 3, 1, 1, 2, 2, 3),
status = c(1, 1, 1, 0, 1, 1, 0),
x = c(0, 2, 1, 1, 1, 0, 0),
sex = c(0, 0, 0, 0, 1, 1, 1))
model <- eha::weibreg(Surv(time, status) ~ x + strata(sex), data = dat)
tr <- extract(model)
expect_length(tr@coef, 5)
expect_equivalent(class(tr@coef), "numeric")
expect_length(tr@se, 5)
expect_equivalent(class(tr@se), "numeric")
expect_length(tr@pvalues, 5)
expect_equivalent(class(tr@pvalues), "numeric")
expect_length(tr@coef.names, 5)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof, 6)
expect_length(tr@gof.names, 6)
expect_length(tr@gof.decimal, 6)
expect_equivalent(tr@gof[5], 5)
expect_equivalent(which(tr@pvalues < 0.05), 2:5)
expect_equivalent(which(tr@gof.decimal), 1:3)
})
# wls (metaSEM) ----
test_that("extract wls objects from the metaSEM package", {
testthat::skip_on_cran()
skip_if_not_installed("metaSEM", minimum_version = "1.2.5.1")
require("metaSEM")
set.seed(12345)
# example 1 from wls help page: analysis of correlation structure
R1.labels <- c("a1", "a2", "a3", "a4")
R1 <- matrix(c(1.00, 0.22, 0.24, 0.18,
0.22, 1.00, 0.30, 0.22,
0.24, 0.30, 1.00, 0.24,
0.18, 0.22, 0.24, 1.00), ncol = 4, nrow = 4,
dimnames = list(R1.labels, R1.labels))
n <- 1000
acovR1 <- metaSEM::asyCov(R1, n)
model1 <- "f =~ a1 + a2 + a3 + a4"
RAM1 <- metaSEM::lavaan2RAM(model1, obs.variables = R1.labels)
wls.fit1a <- metaSEM::wls(Cov = R1, aCov = acovR1, n = n, RAM = RAM1,
cor.analysis = TRUE, intervals = "LB")
tr1 <- extract(wls.fit1a)
expect_length(tr1@coef.names, 4)
expect_length(tr1@coef, 4)
expect_length(tr1@se, 0)
expect_length(tr1@pvalues, 0)
expect_length(tr1@ci.low, 4)
expect_length(tr1@ci.up, 4)
expect_true(!any(is.na(tr1@coef)))
expect_length(tr1@gof, 11)
expect_length(tr1@gof.names, 11)
expect_length(tr1@gof.decimal, 11)
expect_equivalent(tr1@gof[8], 0.23893943, tolerance = 1e-2)
expect_equivalent(which(tr1@gof.decimal), c(1, 3, 4, 5, 6, 7, 8, 10, 11))
# example 2 from wls help page: multiple regression
R2.labels <- c("y", "x1", "x2")
R2 <- matrix(c(1.00, 0.22, 0.24,
0.22, 1.00, 0.30,
0.24, 0.30, 1.00), ncol = 3, nrow = 3,
dimnames = list(R2.labels, R2.labels))
acovR2 <- metaSEM::asyCov(R2, n)
model2 <- "y ~ x1 + x2
## Variances of x1 and x2 are 1
x1 ~~ 1*x1
x2 ~~ 1*x2
## x1 and x2 are correlated
x1 ~~ x2"
RAM2 <- metaSEM::lavaan2RAM(model2, obs.variables = R2.labels)
wls.fit2a <- metaSEM::wls(Cov = R2, aCov = acovR2, n = n, RAM = RAM2,
cor.analysis = TRUE, intervals = "LB")
tr2 <- extract(wls.fit2a)
expect_length(tr2@coef.names, 3)
expect_length(tr2@coef, 3)
expect_length(tr2@se, 0)
expect_length(tr2@pvalues, 0)
expect_length(tr2@ci.low, 3)
expect_length(tr2@ci.up, 3)
expect_true(!any(is.na(tr2@coef)))
expect_length(tr2@gof, 11)
expect_length(tr2@gof.names, 11)
expect_length(tr2@gof.decimal, 11)
expect_equivalent(tr2@gof[8], 0.0738, tolerance = 1e-2)
expect_equivalent(which(tr2@gof.decimal), c(1, 3, 4, 5, 6, 7, 8, 10, 11))
# example 3 from wls help page
R3.labels <- c("a1", "a2", "a3", "a4")
R3 <- matrix(c(1.50, 0.22, 0.24, 0.18,
0.22, 1.60, 0.30, 0.22,
0.24, 0.30, 1.80, 0.24,
0.18, 0.22, 0.24, 1.30), ncol = 4, nrow = 4,
dimnames = list(R3.labels, R3.labels))
n <- 1000
acovS3 <- metaSEM::asyCov(R3, n, cor.analysis = FALSE)
model3 <- "f =~ a1 + a2 + a3 + a4"
RAM3 <- metaSEM::lavaan2RAM(model3, obs.variables = R3.labels)
wls.fit3a <- metaSEM::wls(Cov = R3, aCov = acovS3, n = n, RAM = RAM3,
cor.analysis = FALSE)
tr3 <- extract(wls.fit3a)
expect_length(tr3@coef.names, 8)
expect_length(tr3@coef, 8)
expect_length(tr3@se, 8)
expect_length(tr3@pvalues, 8)
expect_length(tr3@ci.low, 0)
expect_length(tr3@ci.up, 0)
expect_true(!any(is.na(tr3@coef)))
expect_length(tr3@gof, 10)
expect_length(tr3@gof.names, 10)
expect_length(tr3@gof.decimal, 10)
expect_equivalent(which(tr3@gof.decimal), c(1, 3, 4, 5, 6, 7, 9, 10))
expect_true(all(tr3@pvalues < 0.05))
})
# logitr (logitr) ----
test_that("extract logitr objects from the logitr package", {
testthat::skip_on_cran()
skip_if_not_installed("logitr", minimum_version = "0.8.0")
require("logitr")
set.seed(12345)
mnl_pref <- logitr(
data = yogurt,
outcome = "choice",
obsID = "obsID",
pars = c("price", "feat", "brand")
)
tr <- extract(mnl_pref)
expect_equivalent(tr@coef, c(-0.37, 0.49, -3.72, -0.64, 0.73), tolerance = 1e-2)
expect_equivalent(tr@se, c(0.02, 0.12, 0.15, 0.05, 0.08), tolerance = 1e-2)
expect_equivalent(tr@pvalues, c(0, 0, 0, 0, 0), tolerance = 1e-2)
expect_equivalent(tr@gof, c(2412.00, -2656.89, 5323.78, 5352.72), tolerance = 1e-2)
expect_equivalent(which(tr@gof.decimal), c(2, 3, 4))
expect_equivalent(which(tr@pvalues < 0.05), seq(1, 5))
expect_length(tr@coef.names, 5)
expect_length(tr@coef, 5)
expect_length(tr@se, 5)
expect_length(tr@pvalues, 5)
expect_length(tr@ci.low, 0)
expect_length(tr@ci.up, 0)
expect_length(tr@gof.names, 4)
expect_length(tr@gof, 4)
expect_length(tr@gof.decimal, 4)
expect_equivalent(dim(matrixreg(mnl_pref)), c(15, 2))
})
|
582644baae7a094d7097cf7765d87d0126634985
|
0c382c012907bde6cf5f91dedaf236831154dfd9
|
/R/flow_cytometry.R
|
d87e91e80184fc300b1c8ea5a24dc7933d9eb31b
|
[
"MIT"
] |
permissive
|
melaniedavila/orloj
|
d9f991c0348dc22fd8aa05a3467b63758978a59b
|
acb78fdea5c5db05d0bbd5d659ccd7ff445e04d6
|
refs/heads/master
| 2021-04-30T04:34:50.162822
| 2018-02-10T01:11:06
| 2018-02-10T01:11:06
| 121,536,124
| 0
| 0
| null | 2018-02-14T16:54:43
| 2018-02-14T16:54:43
| null |
UTF-8
|
R
| false
| false
| 404
|
r
|
flow_cytometry.R
|
# flow_cytometry.R
# Pre-processing flow cytometry data.
#' Preprocess flow cytometry sample.
#'
#' Quality control, cleaning, and transformation for flow cytometry sample.
#'
#' @inheritParams preprocess
#' @return Sample after the above steps are done.
flowPreprocess <- function(sample) {
if (!isSample(sample)) stop("Expecting an Astrolabe sample")
stop("flowPreprocess not implemented yet")
}
|
2e1eecab751d2b4e2a5748f980e925f7b5dffac0
|
46d2a4c95999f3634e15fc607d7b4fcf40075702
|
/r_packages/np/doc/np.R
|
ed0b864064bce3bdf1c85fcd136d07d78af9895b
|
[] |
no_license
|
gagejane/NYS-fostering
|
9751b8980db55bc4c3f3c82524bfad3fd36e6dd4
|
e7ca5d7077df9453eb92f30320575e618c939d9f
|
refs/heads/master
| 2021-08-17T10:24:10.342672
| 2020-04-18T18:07:07
| 2020-04-18T18:07:07
| 163,456,670
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,560
|
r
|
np.R
|
### R code from vignette source 'np.Rnw'
### Encoding: UTF-8
###################################################
### code chunk number 1: np.Rnw:90-91
###################################################
options(prompt = "R> ", np.messages = FALSE, digits = 3)
###################################################
### code chunk number 2: np.Rnw:554-558
###################################################
library("np")
data("cps71")
model.par <- lm(logwage ~ age + I(age^2), data = cps71)
summary(model.par)
###################################################
### code chunk number 3: np.Rnw:570-576
###################################################
model.np <- npreg(logwage ~ age,
regtype = "ll",
bwmethod = "cv.aic",
gradients = TRUE,
data = cps71)
summary(model.np)
###################################################
### code chunk number 4: np.Rnw:592-593
###################################################
npsigtest(model.np)
###################################################
### code chunk number 5: np.Rnw:651-661 (eval = FALSE)
###################################################
## plot(cps71$age, cps71$logwage, xlab = "age", ylab = "log(wage)", cex=.1)
## lines(cps71$age, fitted(model.np), lty = 1, col = "blue")
## lines(cps71$age, fitted(model.par), lty = 2, col = " red")
## plot(model.np, plot.errors.method = "asymptotic")
##
## plot(model.np, gradients = TRUE)
## lines(cps71$age, coef(model.par)[2]+2*cps71$age*coef(model.par)[3],
## lty = 2,
## col = "red")
## plot(model.np, gradients = TRUE, plot.errors.method = "asymptotic")
###################################################
### code chunk number 6: np.Rnw:666-691
###################################################
getOption("SweaveHooks")[["multifig"]]()
options(SweaveHooks = list(multifig = function() par(mfrow=c(2,2),mar=c(4,4,3,2))))
# Plot 1
plot(cps71$age,cps71$logwage,xlab="age",ylab="log(wage)",cex=.1)
lines(cps71$age,fitted(model.np),lty=1,col="blue")
lines(cps71$age,fitted(model.par),lty=2,col="red")
# Plot 2
plot(cps71$age,gradients(model.np),xlab="age",ylab="gradient",type="l",lty=1,col="blue")
lines(cps71$age,coef(model.par)[2]+2*cps71$age*coef(model.par)[3],lty=2,col="red")
# Plot 3
plot(cps71$age,fitted(model.np),xlab="age",ylab="log(wage)",ylim=c(min(fitted(model.np)-2*model.np$merr),max(fitted(model.np)+2*model.np$merr)),type="l")
lines(cps71$age,fitted(model.np)+2*model.np$merr,lty=2,col="red")
lines(cps71$age,fitted(model.np)-2*model.np$merr,lty=2,col="red")
# Plot 4
plot(cps71$age,gradients(model.np),xlab="age",ylab="gradient",ylim=c(min(gradients(model.np)-2*model.np$gerr),max(gradients(model.np)+2*model.np$gerr)),type="l",lty=1,col="blue")
lines(cps71$age,gradients(model.np)+2*model.np$gerr,lty=2,col="red")
lines(cps71$age,gradients(model.np)-2*model.np$gerr,lty=2,col="red")
###################################################
### code chunk number 7: np.Rnw:752-755
###################################################
cps.eval <- data.frame(age = seq(10,70, by=10))
predict(model.par, newdata = cps.eval)
predict(model.np, newdata = cps.eval)
###################################################
### code chunk number 8: np.Rnw:797-805
###################################################
data("wage1")
model.ols <- lm(lwage ~ female +
married +
educ +
exper +
tenure,
data = wage1)
summary(model.ols)
###################################################
### code chunk number 9: np.Rnw:828-848
###################################################
model.ols <- lm(lwage ~ female +
married +
educ +
exper +
tenure,
x = TRUE,
y = TRUE,
data = wage1)
X <- data.frame(wage1$female,
wage1$married,
wage1$educ,
wage1$exper,
wage1$tenure)
output <- npcmstest(model = model.ols,
xdat = X,
ydat = wage1$lwage,
nmulti = 1,
tol = 0.1,
ftol = 0.1)
summary(output)
###################################################
### code chunk number 10: np.Rnw:903-913
###################################################
#bw.all <- npregbw(formula = lwage ~ female +
# married +
# educ +
# exper +
# tenure,
# regtype = "ll",
# bwmethod = "cv.aic",
# data = wage1)
model.np <- npreg(bws = bw.all)
summary(model.np)
###################################################
### code chunk number 11: np.Rnw:946-981
###################################################
set.seed(123)
ii <- sample(seq(1, nrow(wage1)), replace=FALSE)
wage1.train <- wage1[ii[1:400],]
wage1.eval <- wage1[ii[401:nrow(wage1)],]
model.ols <- lm(lwage ~ female +
married +
educ +
exper +
tenure,
data = wage1.train)
fit.ols <- predict(model.ols,
data = wage1.train,
newdata = wage1.eval)
pse.ols <- mean((wage1.eval$lwage - fit.ols)^2)
#bw.subset <- npregbw(formula = lwage ~ female +
# married +
# educ +
# exper +
# tenure,
# regtype = "ll",
# bwmethod = "cv.aic",
# data = wage1.train)
model.np <- npreg(bws = bw.subset)
fit.np <- predict(model.np,
data = wage1.train,
newdata = wage1.eval)
pse.np <- mean((wage1.eval$lwage - fit.np)^2)
bw.freq <- bw.subset
bw.freq$bw[1] <- 0
bw.freq$bw[2] <- 0
model.np.freq <- npreg(bws = bw.freq)
fit.np.freq <- predict(model.np.freq,
data = wage1.train,
newdata = wage1.eval)
pse.np.freq <- mean((wage1.eval$lwage - fit.np.freq)^2)
###################################################
### code chunk number 12: np.Rnw:1017-1020 (eval = FALSE)
###################################################
## plot(model.np,
## plot.errors.method = "bootstrap",
## plot.errors.boot.num = 25)
###################################################
### code chunk number 13: np.Rnw:1025-1028
###################################################
plot(model.np,
plot.errors.method = "bootstrap",
plot.errors.boot.num = 25)
###################################################
### code chunk number 14: np.Rnw:1073-1104
###################################################
data("birthwt", package = "MASS")
birthwt$low <- factor(birthwt$low)
birthwt$smoke <- factor(birthwt$smoke)
birthwt$race <- factor(birthwt$race)
birthwt$ht <- factor(birthwt$ht)
birthwt$ui <- factor(birthwt$ui)
birthwt$ftv <- factor(birthwt$ftv)
model.logit <- glm(low ~ smoke +
race +
ht +
ui +
ftv +
age +
lwt,
family = binomial(link = logit),
data = birthwt)
model.np <- npconmode(low ~
smoke +
race +
ht +
ui +
ftv +
age +
lwt,
tol = 0.1,
ftol = 0.1,
data = birthwt)
cm <- table(birthwt$low,
ifelse(fitted(model.logit) > 0.5, 1, 0))
cm
summary(model.np)
###################################################
### code chunk number 15: np.Rnw:1132-1137
###################################################
data("faithful", package = "datasets")
f.faithful <- npudens(~ eruptions + waiting, data = faithful)
F.faithful <- npudist(~ eruptions + waiting, data = faithful)
summary(f.faithful)
summary(F.faithful)
###################################################
### code chunk number 16: np.Rnw:1142-1144 (eval = FALSE)
###################################################
## plot(f.faithful, xtrim = -0.2, view = "fixed", main = "")
## plot(F.faithful, xtrim = -0.2, view = "fixed", main = "")
###################################################
### code chunk number 17: np.Rnw:1149-1163
###################################################
getOption("SweaveHooks")[["multifig"]]()
options(SweaveHooks = list(multifig = function() par(mfrow=c(1,2),mar=rep(1.5,4))))
plot.output <- plot(f.faithful, xtrim=-0.2, view="fixed", main = "",plot.behavior="data")
# Retrieve data from plot() to create multiple figures
f <- matrix(plot.output$d1$dens, 50, 50)
plot.x1 <- unique(plot.output$d1$eval[,1])
plot.x2 <- unique(plot.output$d1$eval[,2])
persp(plot.x1,plot.x2,f,xlab="eruptions",ylab="waiting",zlab="Joint Density",col="lightblue", ticktype="detailed")
plot.output <- plot(F.faithful, xtrim = -0.2, view = "fixed", main="",plot.behavior="data")
# Retrieve data from plot() to create multiple figures
F <- matrix(plot.output$d1$dist, 50, 50)
plot.x1 <- unique(plot.output$d1$eval[,1])
plot.x2 <- unique(plot.output$d1$eval[,2])
persp(plot.x1,plot.x2,F,xlab="eruptions",ylab="waiting",zlab="Joint Distribution",col="lightblue", ticktype="detailed")
###################################################
### code chunk number 18: np.Rnw:1189-1200
###################################################
data("Italy")
fhat <- npcdens(gdp ~ year,
tol = 0.1,
ftol = 0.1,
data = Italy)
summary(fhat)
Fhat <- npcdist(gdp ~ year,
tol = 0.1,
ftol = 0.1,
data = Italy)
summary(Fhat)
###################################################
### code chunk number 19: np.Rnw:1207-1209 (eval = FALSE)
###################################################
## plot(fhat, view = "fixed", main = "", theta = 300, phi = 50)
## plot(Fhat, view = "fixed", main = "", theta = 300, phi = 50)
###################################################
### code chunk number 20: np.Rnw:1214-1229
###################################################
getOption("SweaveHooks")[["multifig"]]()
options(SweaveHooks = list(multifig = function() par(mfrow=c(1,2),mar=rep(1.25,4))))
plot.output <- plot(fhat, view="fixed", main="",plot.behavior="data")
# Retrieve data from plot() to create multiple figures
f <- matrix(plot.output$cd1$condens, 48, 50)
plot.y1 <- unique(plot.output$cd1$yeval)
plot.x1 <- unique(plot.output$cd1$xeval)
persp(as.integer(levels(plot.x1)),plot.y1,f,xlab="year",ylab="gdp",zlab="Conditional Density",col="lightblue", ticktype="detailed",theta=300,phi=50)
plot.output <- plot(Fhat, view="fixed", main="",plot.behavior="data")
# Retrieve data from plot() to create multiple figures
F <- matrix(plot.output$cd1$condist, 48, 50)
plot.y1 <- unique(plot.output$cd1$yeval)
plot.x1 <- unique(plot.output$cd1$xeval)
persp(as.numeric(plot.x1)+1951,plot.y1,F,xlab="year",ylab="gdp",zlab="Conditional Distribution",col="lightblue", ticktype="detailed",theta=300,phi=50)
###################################################
### code chunk number 21: np.Rnw:1259-1266
###################################################
bw <- npcdistbw(formula = gdp ~ year,
tol = 0.1,
ftol = 0.1,
data = Italy)
model.q0.25 <- npqreg(bws = bw, tau = 0.25)
model.q0.50 <- npqreg(bws = bw, tau = 0.50)
model.q0.75 <- npqreg(bws = bw, tau = 0.75)
###################################################
### code chunk number 22: np.Rnw:1273-1280 (eval = FALSE)
###################################################
## plot(Italy$year, Italy$gdp, main = "",
## xlab = "Year", ylab = "GDP Quantiles")
## lines(Italy$year, model.q0.25$quantile, col = "red", lty = 1, lwd = 2)
## lines(Italy$year, model.q0.50$quantile, col = "blue", lty = 2, lwd = 2)
## lines(Italy$year, model.q0.75$quantile, col = "red", lty = 3, lwd = 2)
## legend(ordered(1951), 32, c("tau = 0.25", "tau = 0.50", "tau = 0.75"),
## lty = c(1, 2, 3), col = c("red", "blue", "red"))
###################################################
### code chunk number 23: np.Rnw:1289-1298
###################################################
plot(Italy$year, Italy$gdp,
main = "",
xlab = "Year",
ylab = "GDP Quantiles")
lines(Italy$year, model.q0.25$quantile, col = "red", lty = 1, lwd = 2)
lines(Italy$year, model.q0.50$quantile, col = "blue", lty = 2, lwd = 2)
lines(Italy$year, model.q0.75$quantile, col = "red", lty = 3, lwd = 2)
legend(ordered(1951), 32, c("tau = 0.25", "tau = 0.50", "tau = 0.75"),
lty = c(1, 2, 3), col = c("red", "blue", "red"))
###################################################
### code chunk number 24: np.Rnw:1341-1347
###################################################
model.pl <- npplreg(lwage ~ female +
married +
educ +
tenure | exper,
data = wage1)
summary(model.pl)
###################################################
### code chunk number 25: np.Rnw:1367-1379
###################################################
model.index <- npindex(low ~
smoke +
race +
ht +
ui +
ftv +
age +
lwt,
method = "kleinspady",
gradients = TRUE,
data = birthwt)
summary(model.index)
###################################################
### code chunk number 26: np.Rnw:1399-1407
###################################################
model <- npindex(lwage ~ female +
married +
educ +
exper +
tenure,
data = wage1,
nmulti = 1)
summary(model)
###################################################
### code chunk number 27: np.Rnw:1433-1452
###################################################
model.ols <- lm(lwage ~ female +
married +
educ +
exper +
tenure,
data = wage1)
wage1.augmented <- wage1
wage1.augmented$dfemale <- as.integer(wage1$female == "Male")
wage1.augmented$dmarried <- as.integer(wage1$married == "Notmarried")
model.scoef <- npscoef(lwage ~ dfemale +
dmarried +
educ +
exper +
tenure | female,
betas = TRUE,
data = wage1.augmented)
summary(model.scoef)
colMeans(coef(model.scoef))
coef(model.ols)
###################################################
### code chunk number 28: np.Rnw:1478-1480
###################################################
fit.lc <- npksum(txdat = cps71$age, tydat = cps71$logwage, bws = 2)$ksum/
npksum(txdat = cps71$age, bws = 2)$ksum
###################################################
### code chunk number 29: np.Rnw:1485-1487 (eval = FALSE)
###################################################
## plot(cps71$age, cps71$logwage, xlab = "Age", ylab = "log(wage)")
## lines(cps71$age, fit.lc, col = "blue")
###################################################
### code chunk number 30: np.Rnw:1492-1494
###################################################
plot(cps71$age, cps71$logwage, xlab = "Age", ylab = "log(wage)", cex=.1)
lines(cps71$age, fit.lc, col = "blue")
|
5ed9ad2c9230f2b0761773c975149638e98668c9
|
acabe90ad16ea6d151e0fb2b69edd07be012007d
|
/week5/R/Model_smartphones.R
|
02fde94602012faf3b57e9fc0ec26fe59234a201
|
[] |
no_license
|
Taros007/ubiqum_projects
|
098e24db610e54bb93f0d363f6d01d9c2feed01e
|
566500b13e50e4e9fef2230299290fe63d7c1cd5
|
refs/heads/master
| 2020-04-28T22:05:51.003768
| 2019-07-16T08:14:20
| 2019-07-16T08:14:20
| 175,605,769
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,827
|
r
|
Model_smartphones.R
|
## Multiple regression - week 5
## Toine - March 2019
## Load libraries =================================
library(tidyverse)
library(caret)
library(e1071)
library(magrittr)
library(doParallel)
library(corrplot)
library(cowplot)
# Prepare clusters =================================
cl <- makeCluster(3)
registerDoParallel(cl)
## Import dataset =================================
#existingProducts <- readr::read_csv('./input/existingproductattributes2017.csv')
#newProducts <- readr::read_csv('./input/newproductattributes2017.csv')
existingProducts <- readr::read_csv2('./input/existingChristianProud.csv')
newProducts <- readr::read_csv2('./input/newproductChristianProud.csv')
## Preprocessing: cleaning up ==========================
existingProducts %<>% select(-X1)
names(existingProducts) %<>% make.names(.)
## Preprocessing: alter datatypes & calculate new variables ===============
existingProducts %<>%
mutate(
Product_type = as.factor(Product_type),
Depth = as.numeric(Depth),
Age = as.factor(Age),
Professional = as.factor(Professional),
Review_score = (5 * X5Stars + 4 * X4Stars + 3 * X3Stars + 2 * X2Stars + X1Stars) / rowSums(select(existingProducts, X5Stars:X1Stars))
)
existingProducts %<>% filter(Volume>0)
#existingProducts %<>% filter(Product_type != "Extended Warranty")
## Data exploration ==========================================
#plotting dependent variable
ggplot(existingProducts, aes(x = Product_type, y = Volume)) +
geom_boxplot() +
coord_flip()
#plotting all numeric variables
existingProducts %>%
keep(is.numeric) %>%
gather() %>%
ggplot(aes(value)) +
facet_wrap(~ key, scales = "free") +
geom_histogram()
#plotting dependent variable vs most important independent variable (varImp)
ggplot(existingProducts, aes(x = Positive_service_review, y = Volume)) + geom_point()
## Outlier detection & removal ===============================
source('./R/outliers.R')
#Detect outliers based on MAD
is_no_outlier <- isnt_out_mad(existingProducts$Volume, thres = 50)
# add a column with info whether the Volume is an outlier
existingProducts$is_no_outlier <- is_no_outlier
# look at the same plot as above, with and without outliers
g_withoutliers <- ggplot(existingProducts, aes(Product_type, Volume)) +
geom_boxplot() +
coord_flip() +
geom_boxplot()
g_withoutoutliers <- ggplot(existingProducts[is_no_outlier == T,], aes(Product_type, Volume)) +
geom_boxplot() +
coord_flip() +
geom_boxplot()
plot_grid(g_withoutliers, g_withoutoutliers, labels = c("With outliers", "Without outliers"))
existingProducts <- existingProducts[is_no_outlier,]
## Detect collinearity & correlation =========================
corrData <- cor(existingProducts %>% select(-Age,-Product_type, -Professional) %>% na.omit())
corrplot(corrData, type = "upper", tl.pos = "td",
method = "circle", tl.cex = 0.5, tl.col = 'black',
diag = FALSE)
## Bin Best_seller_rank, and convert NAs to 0 ================
existingProducts$Best_seller_rank %<>%
findInterval(c(-Inf, 50, 100, Inf)) %<>%
replace_na(0) %<>% as.factor()
## Feature selection =================================
# existingSelected <- select(existingProducts,
# c(
# Review_score,
# Prices,
# Competitors,
# Positive_service_review,
# Width,
# Volume,
# Product_type
# ))
#existingSelected <- existingProducts %>%
# select(X4Stars,X2Stars....) Pericles
## Dummify data =================================
newDataFrame <- dummyVars(" ~ .", data = existingProducts)
existingDummy <- data.frame(predict(newDataFrame, newdata = existingProducts))
## Missing data =================================
#after feature selection to retain as much data as possible
existingDummy <- na.omit(existingDummy)
## Training of model =================================
set.seed(541)
# train and test
train_ids <- createDataPartition(y = existingDummy$Product_type.Smartphone,
p = 0.75,
list = F)
train <- existingDummy[train_ids,]
test <- existingDummy[-train_ids,]
# cross validation
ctrl <- trainControl(method = "repeatedcv",
number = 4,
repeats = 10
)
#train Random Forest Regression model
rfFit1 <- caret::train(Volume~ Review_score +
Best_seller_rank.0 +
Best_seller_rank.1 +
Best_seller_rank.2 +
Best_seller_rank.3 +
X1Stars +
X2Stars +
Negative_service_review +
Profit_margin,
data = train,
method = "rf",
trControl=ctrl,
importance=T #added to allow for varImp()
)
# Predicting testset ================================
test$Predictions <- predict(rfFit1, test)
postResample(test$Predictions, test$Volume)
Filtered <- filter(test, Product_type.Smartphone == 1)
cat("Amount of Smartphones in testset is:", nrow(Filtered))
postResample(Filtered$Predictions, Filtered$Volume)
ggplot(test, aes(x = Volume, y = Predictions)) +
geom_point() +
geom_abline(intercept = 0, slope = 1)
ggplot(filter(test, Product_type.Smartphone == 1), aes(x = Volume, y = Predictions)) +
geom_point() +
geom_abline(intercept = 0, slope = 1)
#Check important variables
varTun <- varImp(rfFit1)
plot(varTun, main = "Top variance importance")
# Closing actions ================================
# Stop Cluster.
stopCluster(cl)
|
330b76e022812821f1694fd89242cf5075885edd
|
3e90112284c2043f38b70dc3a84691eee2341fd3
|
/AttachPlayDirection.R
|
fd0e288940fa01e6641b5d70a9c6a7863ccbd115
|
[] |
no_license
|
prestonbiro/BigDataBowl2020
|
3743442cc3c835982865a7a3291599ef1151e6b1
|
5162c1407ac439797b02a50ea4fd64997589457a
|
refs/heads/main
| 2023-02-10T19:48:46.426370
| 2021-01-07T21:47:56
| 2021-01-07T21:47:56
| 312,735,734
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 832
|
r
|
AttachPlayDirection.R
|
#Attach play direction
setwd('F:/BigDataBowl2021')
source('InstantExam.R')
source('SplitPlaysByWeek.R')
load('Plays_Off_Def_Ball_Dist.Rdata')
trimmedPlays$PlayDirection = NA
oldWeek = 0
for(i in 1:nrow(trimmedPlays)){
play = trimmedPlays[i,'playId']; game = trimmedPlays[i,'gameId']
newWeek = findWeekNum(game)
if(newWeek != oldWeek) {
trackData = splitTrackingByWeek(newWeek)
routeOpts = levels(trackData$route)
playData = splitPlaysByWeek(newWeek)
}
if(i %% 200 == 1) print(paste(round(100*i/nrow(trimmedPlays),2),'% Complete',sep=''))
#Action
frame = isolateFrame(1,play,game,trackData)
trimmedPlays[i,'PlayDirection'] = levels(frame$playDirection)[frame$playDirection[1]]
oldWeek = newWeek
}
# save(trimmedPlays,file = 'Plays_Off_Def_Ball_Dist_Dir.Rdata')
|
0756306d554f0ed92994ee398610ae9f4de8385d
|
f7e0f7627e312fdc5ec4ff46191098852676c8f4
|
/api.R
|
beb607a2633a921f31becb68d02ca7bad316731e
|
[] |
no_license
|
arrismo/NYPDComplaintsDataViz
|
7374f8b51004aa90fa4a43c68f120e4c4ee0ecc3
|
6dece499caae8c9da03a0504d54bdda381eb141f
|
refs/heads/main
| 2023-07-13T22:37:53.875057
| 2021-08-25T18:30:11
| 2021-08-25T18:30:11
| 398,148,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 259
|
r
|
api.R
|
library(curl)
library(jsonlite)
library(dplyr)
res <- curl_fetch_memory("https://data.cityofnewyork.us/resource/qgea-i56i.json")
(nypd_data <- jsonlite::prettify(rawToChar(res$content)))
(nypd_data <- jsonlite::fromJSON(nypd_data) %>%
as.data.frame())
|
8e8695af58d03c99f3f14384bfadd21fa9b5db56
|
76beb7e70f9381a5bded37834ba8783e16cc8b9a
|
/ipmbook-code/c9/old_code/Monocarp Simulate Evol IBM.R
|
577221859cbf8b55ec31f8615c5d486665959163
|
[] |
no_license
|
aekendig/population-modeling-techniques
|
6521b1d5e5d50f5f3c156821ca5d4942be5a1fc9
|
713a5529dcbe7534817f2df139fbadbd659c4a0c
|
refs/heads/master
| 2022-12-29T20:54:51.146095
| 2020-10-07T12:18:23
| 2020-10-07T12:18:23
| 302,026,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,519
|
r
|
Monocarp Simulate Evol IBM.R
|
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Section 1 - Simulate the IPM
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# initial population sizes and ages
z <- rnorm(init.pop.size, mean = 2.9, sd = m.par.true["rcsz.sd"])
flow.int.ind <- rnorm(init.pop.size, mean = init.mean.flow.int, sd = init.beta.sd)
## calculate initial pop size and mean size
pop.size.t <- init.pop.size
mean.z.t <- mean(z)
mean.flow.int <- mean (flow.int.ind)
#probability of flowering function depends on individual beta and z
p_bz_ind<- function(z,flow.ints,m.par) {
linear.p <- flow.ints + m.par["flow.z"] * z # linear predictor
p <- 1/(1+exp(-linear.p)) # logistic transformation to probability
return(p)
}
## iterate the model using the "true" parameters and store data in a data.frame
yr <- 1
while(yr<= n.yrs & length(z) < 1500000) {
## calculate population size
pop.size <- length(z)
## generate binomial random number for the probability of flowering, where the probability of flowering
## depends on your size z, this is a vector of 0's and 1's, you get a 1 if you flower
Repr <- rbinom(n=pop.size, prob=p_bz_ind(z, flow.int.ind,m.par.true ), size=1)
## number of plants that flowered
num.Repr <- sum(Repr)
## calculate seed production
#Seeds <- rep(NA, pop.size)
## we'll assume plant make a Poisson distributed number of seeds with a mean given by
## exp(params["seed.int"]+params["seed.size"] * z)
## rpois generated Poisson distributed random numbers
Seeds<- rpois(num.Repr, b_z(z[Repr==1], m.par.true))
Total.seeds <- sum(Seeds,na.rm=TRUE)
Flow.ints.rec <- rep(flow.int.ind[Repr==1],Seeds)[sample(1:Total.seeds,Recr)]
Flow.ints.rec <- rnorm(Recr,Flow.ints.rec,beta.off.sd)
## generate the number of recruits
## generate new recruit sizes
## rnorm generated normally distributed random numbers
Rcsz <- rnorm(Recr, mean = m.par.true["rcsz.int"], sd = m.par.true["rcsz.sd"])
## for the non-reproductive plants generate random number for survival
Surv <- rep(NA, pop.size)
Surv[Repr==0] <- rbinom(n = pop.size - num.Repr, prob = s_z(z[Repr==0], m.par.true), size = 1)
num.die <- sum(Surv==0, na.rm=TRUE)
## index for individuals that did not flower and survived
i.subset <- which(Repr==0 & Surv==1)
## let them grow
E.z1 <- m.par.true["grow.int"]+m.par.true["grow.z"]*z[i.subset]
z1 <- rnorm(n = pop.size - num.Repr - num.die, mean = E.z1, sd = m.par.true["grow.sd"])
z1 <- c(Rcsz, z1)
flow.int.ind <- c(Flow.ints.rec,flow.int.ind[i.subset])
z <- z1
min.z.t <- if (yr==1) min(z) else min(min.z.t,min(z))
max.z.t <- if (yr==1) max(z) else max(max.z.t,max(z))
mean.fl.z.t <- if (yr==1) mean(exp(z[Repr==1])) else c(mean.fl.z.t,mean(exp(z[Repr==1])))
mean.flow.int <- if (yr==1) mean(flow.int.ind) else c(mean.flow.int,mean(flow.int.ind))
min.flow.int <- if (yr==1) min(flow.int.ind) else min(min.flow.int,min(flow.int.ind))
max.flow.int <- if (yr==1) max(flow.int.ind) else max(max.flow.int,max(flow.int.ind))
var.flow.int <- if (yr==1) var(flow.int.ind) else c(var.flow.int,var(flow.int.ind))
if(yr%%10==1)cat(paste(yr, mean.flow.int[yr]," ",var.flow.int[yr],"\n", sep=" "))
yr <- yr+1
}
|
bc2e6308cdd8a49ef68f586b5018a51aef265e48
|
b0c5e471fb4dfdc91b78560d14a1ad5a4fef5a0e
|
/man/gan.rtorch.Rd
|
e44b13e53b30e43e284e404b8e8962d01e6d4ecc
|
[] |
no_license
|
f0nzie/gan.rtorch
|
f757fa5ae75e36f12e77a317c7d5e57fbd9e961e
|
96cce2a82407d9000966f8db0996eca7c43957fd
|
refs/heads/master
| 2020-07-17T02:01:39.091734
| 2019-09-06T00:58:53
| 2019-09-06T00:58:53
| 205,917,871
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 216
|
rd
|
gan.rtorch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{gan.rtorch}
\alias{gan.rtorch}
\alias{gan.rtorch-package}
\title{gan.rtorch}
\description{
gan.rtorch
}
|
608b49b26d99f92d342e044d2cb3207893e468b8
|
18f46dd324a7327c5b5780f1472f37e6ab5ce2ee
|
/OLD/map testing.R
|
c0816b70bd204a89a8a2f5ea579c952c94ddf9c6
|
[] |
no_license
|
piersyork/trust-and-stringency-analysis
|
2202ccc17ccd3ac47b023c56bf833baef504a189
|
0773caa396590c9083245fb7fe87151465a1ff0b
|
refs/heads/main
| 2023-07-28T13:30:03.712487
| 2021-09-04T10:41:20
| 2021-09-04T10:41:20
| 376,332,950
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 772
|
r
|
map testing.R
|
library(sparklyr)
country <- read_csv("Data/country_data.csv") %>%
select(location, gdp_per_capita)
sc <- spark_connect(master = "local", version = "2.3")
cars <- copy_to(sc, mtcars)
spark_web(sc)
count(cars)
select(cars, hp, mpg) %>%
sample_n(100) %>%
collect() %>%
plot()
maps::map()
help(package = "maps")
world <- map_data("world", ) %>%
filter(region != "Antarctica") %>%
left_join(country, by = c("region" = "location"))
ggplot(world, aes(long, lat, group = group, fill = gdp_per_capita)) +
geom_polygon() +
coord_quickmap()
ggplot(world, aes(x = long, y = lat, group = group)) +
geom_path() +
scale_y_continuous(breaks = (-2:2) * 30) +
scale_x_continuous(breaks = (-4:4) * 45) +
coord_map("ortho", orientation = c(41, -74, 0))
|
c4f9247acd294b22761b02225eb95ba3ff5898af
|
447f4b75b34eacd196f0cd1d2ac29b44a623a864
|
/code/MIRAGE_burden_only_variant_level.R
|
ac820b5ae1494e955aacb4b0a14542fb91d7f771
|
[] |
no_license
|
han16/rare-var-project
|
bc4a44b82cf6ac7d62cafabac99896d79fbdc207
|
4d5d977060d913b40f0cb877abd8d0764b1edecb
|
refs/heads/master
| 2023-07-21T01:51:22.364990
| 2023-07-12T16:32:03
| 2023-07-12T16:32:03
| 86,002,442
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,195
|
r
|
MIRAGE_burden_only_variant_level.R
|
#############################
rm(list=ls())
library(RSQLite)
library(dplyr)
library(knitr)
set.seed(123)
###################################
intergrand=function(aa, var.case, var.contr, bar.gamma, sig, N1, N0)
{
ff=dbinom(var.case, sum(var.case, var.contr), aa*N1/(aa*N1+N0))*dgamma(aa, bar.gamma*sig, sig)
return(ff)
}
# calculate the bayes factor of a single variant via integration
BF.var.inte=function(var.case, var.contr, bar.gamma, sig, N1, N0)
{
marglik0.CC <- dbinom(var.case, sum(var.case, var.contr), N1/(N1+N0)) # Under H0: gamma=1
marglik1.CC <- integrate(intergrand, var.case, var.contr, bar.gamma, sig, N1, N0, low=0, upper=100, stop.on.error=F)$value # Under H1: gamma~gamma(gamma.mean*sigma, sigma)
BF.var <- marglik1.CC/marglik0.CC
return(BF.var)
}
######################################################
multi.group.func.for.variant=function(new.data, N1, N0, gamma.mean, sigma, delta, beta.init, num.group) # new.data has one column specifying its group index
{
########################
max.iter=1e4
stop.cond=0; iter=1 # parameter settings
thrshd=1e-5
beta.k=matrix(nrow=max.iter, ncol=num.group)
beta.k[1,]=beta.init
full.info.var=list()
num.var=nrow(new.data)
var.BF=numeric()
########################
# calculate the Bayes factor for variant j as initials.
var.index.list=new.data$group.index
if (length(var.index.list)>0) # calculate Bayes factor for variant j
for (j in 1:length(var.index.list))
{
if (new.data$original.group.index[j]<=5)
var.BF[j]=BF.var.inte(new.data$No.case[j], new.data$No.contr[j], bar.gamma=6, sig=sigma, N1, N0)
if (new.data$original.group.index[j]>5)
var.BF[j]=BF.var.inte(new.data$No.case[j], new.data$No.contr[j], bar.gamma=gamma.mean, sig=sigma, N1, N0)
################## split BF of LoF and non LoF
# if (new.data$group.index[var.index.list[j]]<=2)
# bb.LoF=bb.LoF*((1-beta.k[1, new.data$group.index[var.index.list[j]]])+beta.k[1, new.data$group.index[var.index.list[j]]]*var.BF[j])
# if (new.data$group.index[var.index.list[j]]>2)
# bb.nonLoF=bb.nonLoF*((1-beta.k[1, new.data$group.index[var.index.list[j]]])+beta.k[1, new.data$group.index[var.index.list[j]]]*var.BF[j])
}
full.info.var=cbind(new.data, var.BF)
########################## EM algorithm
########################
while (stop.cond==0)
{
iter=iter+1
############## EM algorithm: E step
EZj=numeric() # expectation for variant j
#
# info.single.gene=full.info.genevar[[i]] # this is a small matrix for that single gene. each row is one variant
if (nrow(full.info.var)>0)
for (j in 1:nrow(full.info.var))
{
if (num.group>1)
{
numer=full.info.var$var.BF[j]*beta.k[(iter-1), full.info.var$group.index[j]]
denom=full.info.var$var.BF[j]*beta.k[(iter-1), full.info.var$group.index[j]]+(1-beta.k[(iter-1), full.info.var$group.index[j]])
}
if (num.group==1)
{
numer=full.info.var$var.BF[j]*beta.k[(iter-1)]
denom=full.info.var$var.BF[j]*beta.k[(iter-1)]+(1-beta.k[(iter-1)])
}
EZj[j]=numer/denom
}
############ EM algorithm: M step
for (g in 1:num.group)
{
var.in.group.index=which(new.data$group.index==g)
if (length(var.in.group.index)>0)
beta.k[iter, g]=sum(EZj[var.in.group.index])/length(var.in.group.index)
if (length(var.in.group.index)==0)
beta.k[iter, g]=0
}
################
if (num.group>1)
diff=sum(abs(beta.k[iter,]-beta.k[(iter-1),]))
if (num.group==1)
diff=sum(abs(beta.k[iter]-beta.k[(iter-1)]))
if (diff<thrshd || iter>(max.iter-1))
stop.cond=1
# cat(iter, "th iteration is running", "\n")
} # end of iter
##############################
if (iter<max.iter)
{
if (num.group>1)
beta.k=beta.k[complete.cases(beta.k),]
if (num.group==1)
beta.k=beta.k[complete.cases(beta.k)]
}
################## calculate the likelihood ratio test statistics and p value
# beta.k[(iter-1), -7]=0
lkhd=rep(1,num.var); total.lkhd=0
teststat=numeric(); pvalue=numeric()
num.actu.group=length(unique(full.info.var$group.index))
if (nrow(full.info.var)>0)
for (j in 1:nrow(full.info.var))
{
if (num.group>1)
lkhd[j]=lkhd[i]*((1-beta.k[(iter-1), full.info.var$group.index[j]])+beta.k[(iter-1), full.info.var$group.index[j]]*full.info.var$var.BF[j])
if (num.group==1)
lkhd[j]=lkhd[i]*((1-beta.k[(iter-1)])+beta.k[(iter-1)]*full.info.var$var.BF[j])
teststat[j]=2*log(lkhd[j]); # this is the test statistics of one gene
total.lkhd=total.lkhd+log(lkhd[j])
pvalue[j]=pchisq(teststat[j], num.actu.group, lower.tail=F)
}
teststat[num.var+1]=2*total.lkhd
pvalue[num.var+1]=pchisq(teststat[num.var+1], num.actu.group, lower.tail=F)
############################################## calculate category specific test statistics and p value
##################
cate.lkhd=rep(1,num.group); cate.stat=numeric()
cate.pvalue=numeric(num.group); sum.lkhd=0
if (num.group>1)
for (g in 1:num.group)
{ # g=2
if (nrow(full.info.var)>0)
for (j in 1:nrow(full.info.var))
if (full.info.var$group.index[j]==g)
cate.lkhd[g]=cate.lkhd[g]*((1-beta.k[(iter-1), g])+beta.k[(iter-1), g]*full.info.var$var.BF[j])
cate.stat[g]=2*log(cate.lkhd[g])
cate.pvalue[g]=pchisq(cate.stat[g], 1, lower.tail=F)
} # end of g
if (num.group==1)
{
if (nrow(full.info.var)>0)
for (j in 1:nrow(full.info.var))
cate.lkhd[1]=cate.lkhd[1]*((1-beta.k[(iter-1)])+beta.k[(iter-1)]*full.info.var$var.BF[j])
cate.stat[1]=2*log(cate.lkhd)
cate.pvalue[1]=pchisq(cate.stat, 1, lower.tail=F)
}
######################
if (num.group>1)
beta.est=beta.k[(iter-1),]
if (num.group==1)
beta.est=beta.k[(iter-1)]
return(result=list(beta.est=beta.est, full.info=full.info.var, test.stat=teststat, pvalue=pvalue, cate.stat=cate.stat, cate.pvalue=cate.pvalue))
}
#####################################
fixed.beta.func=function(new.data, N1, N0, gamma.mean, sigma, beta) # new.data has one column specifying its group index
{
#######################
beta.k=beta
full.info.genevar=list()
gene.list=new.data$Gene; unique.gene=unique(gene.list) # find the gene list
num.gene=length(unique.gene)
BF.gene=numeric()
########################
for (i in 1:num.gene)
{
cat(i, "th gene of ", "\t", num.gene, "\t", "is running", "\n")
var.index.list=which(gene.list==unique.gene[i])
indi.gene=new.data[var.index.list,] # note var.index.list matches new.data
bb=1; var.BF=numeric()
if (length(var.index.list)>0) # calculate Bayes factor for variant (i,j)
for (j in 1:length(var.index.list))
{
if (new.data$group.index[var.index.list[j]]<=5)
var.BF[j]=BF.var.inte(new.data$No.case[var.index.list[j]], new.data$No.contr[var.index.list[j]], bar.gamma=6, sig=sigma, N1, N0)
if (new.data$group.index[var.index.list[j]]>5)
var.BF[j]=BF.var.inte(new.data$No.case[var.index.list[j]], new.data$No.contr[var.index.list[j]], bar.gamma=gamma.mean, sig=sigma, N1, N0)
bb=bb*((1-beta.k[new.data$group.index[var.index.list[j]]])+beta.k[new.data$group.index[var.index.list[j]]]*var.BF[j])
}
full.info.genevar[[i]]=cbind(indi.gene, var.BF)
BF.gene[i]=bb
}
return(result=list(BayesFactor=data.frame(Gene=unique.gene, BF=BF.gene), full.info=full.info.genevar))
}
#########################################
fifteen.partition=function(cand.data) # given gene data and annotations, do variant partitions
{
par.evid=list()
LoF.def=c("stopgain", "frameshift substitution", "splicing", "stoploss")
par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05 & cand.data$ExacAF>=0.01 )
par.evid[[2]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.01 & cand.data$ExacAF>=0.001)
par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.001 & cand.data$ExacAF>=0.0001)
par.evid[[4]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.0001 & cand.data$ExacAF>0)
par.evid[[5]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF==0)
par.evid[[6]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
par.evid[[7]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
par.evid[[8]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.0001 & cand.data$ExacAF<0.001)
par.evid[[9]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>0 & cand.data$ExacAF<0.0001)
par.evid[[10]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF==0 )
par.evid[[11]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
par.evid[[12]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
par.evid[[13]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.0001 & cand.data$ExacAF<0.001)
par.evid[[14]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>0 & cand.data$ExacAF<0.0001)
par.evid[[15]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF==0)
group.index=rep(NA, nrow(cand.data))
for (i in 1:length(par.evid))
group.index[par.evid[[i]]]=i
gene.data=data.frame(ID=cand.data$ID, Gene=cand.data$Gene, No.case=cand.data$No.case, No.contr=cand.data$No.contr, group.index=group.index)
gene.data=gene.data[complete.cases(gene.data),]
return(gene.data)
}
eight.partition=function(cand.data) # given gene data and annotations, do variant partitions
{
par.evid=list()
LoF.def=c("stopgain", "frameshift substitution", "splicing", "stoploss")
par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05 & cand.data$ExacAF>=0.01 )
par.evid[[2]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.01)
par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
par.evid[[4]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
par.evid[[5]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF<0.001)
par.evid[[6]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
par.evid[[7]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
par.evid[[8]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF<0.001)
group.index=rep(NA, nrow(cand.data))
for (i in 1:length(par.evid))
group.index[par.evid[[i]]]=i
gene.data=data.frame(ID=cand.data$ID, Gene=cand.data$Gene, No.case=cand.data$No.case, No.contr=cand.data$No.contr, group.index=group.index)
gene.data=gene.data[complete.cases(gene.data),]
return(gene.data)
}
four.partition=function(cand.data) # given gene data and annotations, do variant partitions
{
par.evid=list()
LoF.def=c("stopgain", "frameshift substitution", "splicing", "stoploss")
par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05 & cand.data$ExacAF>=0.01 ) # other LoF sets
par.evid[[2]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.01) # LoF and AF<1%
par.evid1=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
par.evid2=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF<0.001) # damaging and AF<0.1%
par.evid3=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
par.evid4=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
par.evid5=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF<0.001)
par.evid[[4]]=c(par.evid1, par.evid2, par.evid3, par.evid4, par.evid5) # union of other missense variants
group.index=rep(NA, nrow(cand.data))
for (i in 1:length(par.evid))
group.index[par.evid[[i]]]=i
gene.data=data.frame(ID=cand.data$ID, Gene=cand.data$Gene, No.case=cand.data$No.case, No.contr=cand.data$No.contr, group.index=group.index)
gene.data=gene.data[complete.cases(gene.data),]
return(gene.data)
}
three.partition=function(cand.data) # given gene data and annotations, do variant partitions
{
par.evid=list()
LoF.def=c("stopgain", "frameshift substitution", "splicing", "stoploss")
#par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05 & cand.data$ExacAF>=0.01 ) # other LoF sets
par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05) # all LoF
#par.evid1=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
#par.evid2=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
#par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF<0.001) # damaging and AF<0.1%
par.evid[[2]]=which(cand.data$Annotation %in% LoF.def==F & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.05) # missense and MAF>1%
par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==F & cand.data$ExacAF<0.001 ) # missense and MAF<1%
#par.evid3=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
#par.evid4=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
#par.evid5=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF<0.001)
#par.evid[[4]]=c(par.evid1, par.evid2, par.evid3, par.evid4, par.evid5) # union of other missense variants
group.index=rep(NA, nrow(cand.data))
for (i in 1:length(par.evid))
group.index[par.evid[[i]]]=i
gene.data=data.frame(ID=cand.data$ID, Gene=cand.data$Gene, No.case=cand.data$No.case, No.contr=cand.data$No.contr, group.index=group.index)
gene.data=gene.data[complete.cases(gene.data),]
return(gene.data)
}
two.partition=function(cand.data) # given gene data and annotations, do variant partitions
{
par.evid=list()
LoF.def=c("stopgain", "frameshift substitution", "splicing", "stoploss")
#par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05 & cand.data$ExacAF>=0.01 ) # other LoF sets
par.evid[[1]]=which(cand.data$Annotation %in% LoF.def==T & cand.data$ExacAF<0.05) # LoF
#par.evid1=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
#par.evid2=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
#par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))>=0.957 & cand.data$ExacAF<0.001) # damaging and AF<0.1%
par.evid[[2]]=which(cand.data$Annotation %in% LoF.def==F & cand.data$ExacAF<0.05) # missense
#par.evid[[3]]=which(cand.data$Annotation %in% LoF.def==F & cand.data$ExacAF<0.01 ) # missense and MAF<1%
#par.evid3=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.01 & cand.data$ExacAF<0.05)
#par.evid4=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF>=0.001 & cand.data$ExacAF<0.01)
#par.evid5=which(cand.data$Annotation %in% LoF.def==F & as.numeric(as.character(cand.data$Polyphen2.HDIV.score))<0.957 & cand.data$ExacAF<0.001)
#par.evid[[4]]=c(par.evid1, par.evid2, par.evid3, par.evid4, par.evid5) # union of other missense variants
group.index=rep(NA, nrow(cand.data))
for (i in 1:length(par.evid))
group.index[par.evid[[i]]]=i
gene.data=data.frame(ID=cand.data$ID, Gene=cand.data$Gene, No.case=cand.data$No.case, No.contr=cand.data$No.contr, group.index=group.index)
gene.data=gene.data[complete.cases(gene.data),]
return(gene.data)
}
#########################################
#All.Anno.Data=read.table("D:\\ResearchWork\\StatisticalGenetics\\Rare-variant-project\\AnnotatedTrans.txt", header=T)
#All.Anno.Data=read.table("C:\\han\\ResearchWork\\StatGene\\AutismData\\AnnotatedTrans.txt", header=T)
#All.Anno.Data=read.table("..\\AnnotatedTrans.txt", header=T)
#All.Anno.Data=read.table("C:\\han\\ResearchWork\\StatGene\\SCZData\\AnnotatedSCZ.txt", header=T)
#N1=4315; N0=4315
#N1=2536; N0=2543
#All.Anno.Data[All.Anno.Data =="."] <- NA
#All.Anno.Data$ExacAF[is.na(All.Anno.Data$ExacAF)]=0 # set AF of NA to zero
#Anno.Data=All.Anno.Data[which(All.Anno.Data$ExacAF<0.05 & All.Anno.Data$Annotation!="synonymous SNV"),] # use AF cutoff and exclude synonumous SNV
#var.data=data.frame(ID=Anno.Data$ID, No.case=Anno.Data$No.case, No.contr=Anno.Data$No.contr)
#CADD.cutoff=quantile(as.numeric(as.character(All.Anno.Data$CADD.raw)), prob=0.9, na.rm=TRUE)
########################################
################ whole genome
#gene.set=as.character(unique(All.Anno.Data$Gene))
#gene.set=as.character(read.csv("data\\GeneSet\\Samocha_2014NG_contraintgene.csv", header=T)$gene)
#gene.set=as.character(read.csv("C:\\Users\\han\\Dropbox\\StatisticalGenetics\\Samocha_2014NG_contraintgene.csv", header=T)$gene)
#vart.set=as.character(Anno.Data$ID[which(Anno.Data$Gene %in% gene.set)])
#cand.data=Anno.Data[which(Anno.Data$ID %in% vart.set),]
#cand.data=Anno.Data[which(Anno.Data$ID %in% comb.evid),]
#gene.data=eight.partition(cand.data)
#overlap.data=gene.data[gene.data$ID %in% comb.evid,]
#order.overlap.data=overlap.data[order(overlap.data$group.index, decreasing=F),]
#psbl.index=unique(order.overlap.data$group.index); actu.num.group=length(psbl.index)
#delta.init=runif(1); beta.init=runif(actu.num.group)
#for (j in 1:actu.num.group)
# order.overlap.data$group.index[order.overlap.data$group.index==psbl.index[j]]=j # re-index the group labels
# delta=runif(1)
# para.est=multi.group.func.for.variant(order.overlap.data, N1, N0, gamma.mean=3, sigma=2, delta=0.2, beta.init, actu.num.group)
|
feac3374f921d675e406c10dc3373a414ceffd5e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/vegan/examples/tsallis.Rd.R
|
8300f7cdf26c997a0bbc36f8f1c55af9da20f1fb
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 518
|
r
|
tsallis.Rd.R
|
library(vegan)
### Name: tsallis
### Title: Tsallis Diversity and Corresponding Accumulation Curves
### Aliases: tsallis tsallisaccum persp.tsallisaccum
### Keywords: multivariate
### ** Examples
data(BCI)
i <- sample(nrow(BCI), 12)
x1 <- tsallis(BCI[i,])
x1
diversity(BCI[i,],"simpson") == x1[["2"]]
plot(x1)
x2 <- tsallis(BCI[i,],norm=TRUE)
x2
plot(x2)
mod1 <- tsallisaccum(BCI[i,])
plot(mod1, as.table=TRUE, col = c(1, 2, 2))
persp(mod1)
mod2 <- tsallisaccum(BCI[i,], norm=TRUE)
persp(mod2,theta=100,phi=30)
|
5218510e4c65146218e968101573b76bea90deab
|
55da83ee66e7cebbacb384e4b24df28965f862e0
|
/motivating_example.R
|
06e375182039322803b6312d24ba8dd6dc72fce6
|
[] |
no_license
|
tomiaJO/CEU_TEXT_MINING
|
b2d340424df9f47b3892bd35e9f72e0ace8aedf5
|
09b99e102ac35893352e50084e76ac3909930dcb
|
refs/heads/master
| 2021-04-30T05:05:37.932093
| 2018-02-25T20:40:12
| 2018-02-25T20:40:12
| 121,408,245
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,437
|
r
|
motivating_example.R
|
install.packages("tidytext")
library(twitteR)
## copied from: https://gist.github.com/earino/65faaa4388193204e1c93b8eb9773c1c
library(tidyverse)
library(tidytext)
# library(broom)
#authenticate to distant service
setup_twitter_oauth(
consumer_key = Sys.getenv("TWITTER_CONSUMER_KEY"),
consumer_secret = Sys.getenv("TWITTER_CONSUMER_SECRET"),
access_token = Sys.getenv("TWITTER_ACCESS_TOKEN"),
access_secret = Sys.getenv("TWITTER_ACCESS_SECRET")
)
trump <- userTimeline('realDonaldTrump', n = 3200)
obama <- userTimeline('BarackObama', n = 3200)
raw_tweets <- bind_rows(twListToDF(trump), twListToDF(obama))
words <- raw_tweets %>%
unnest_tokens(word, text) #global, should work for hungarian as well
data("stop_words")
words <- words %>%
anti_join(stop_words, by = "word") %>%
filter(! str_detect(word, "\\d"))
words_to_ignore <- data_frame(word = c("https", "amp", "t.co"))
words <- words %>%
anti_join(words_to_ignore, by = "word")
tweets <- words %>%
group_by(screenName, id, word) %>%
summarise(contains = 1) %>%
ungroup() %>%
spread(key = word, value = contains, fill = 0) %>%
mutate(tweet_by_trump = as.integer(screenName == "realDonaldTrump")) %>%
select(-screenName, -id)
library(glmnet)
fit <- cv.glmnet(
x = tweets %>% select(-tweet_by_trump) %>% as.matrix(),
y = tweets$tweet_by_trump,
family = "binomial"
)
temp <- coef(fit, s = exp(-3)) %>% as.matrix()
coefficients <- data.frame(word = row.names(temp), beta = temp[, 1])
data <- coefficients %>%
filter(beta != 0) %>%
filter(word != "(Intercept)") %>%
arrange(desc(beta)) %>%
mutate(i = row_number())
ggplot(data, aes(x = i, y = beta, fill = ifelse(beta > 0, "Trump", "Obama"))) +
geom_bar(stat = "identity", alpha = 0.75) +
scale_x_continuous(breaks = data$i, labels = data$word, minor_breaks = NULL) +
xlab("") +
ylab("Coefficient Estimate") +
coord_flip() +
scale_fill_manual(
guide = guide_legend(title = "Word typically used by:"),
values = c("#446093", "#bc3939")
) +
theme_bw() +
theme(legend.position = "top")
library(wordcloud)
words %>%
filter(screenName == "realDonaldTrump") %>%
count(word) %>%
with(wordcloud(word, n, max.words = 20))
words %>%
filter(screenName == "BarackObama") %>%
count(word) %>%
with(wordcloud(word, n, max.words = 20))
ggplot(raw_tweets, aes(x = created, y = screenName)) +
geom_jitter(width = 0) +
theme_bw() +
ylab("") +
xlab("")
|
af89638d44c33c79a73d3207c31d56a8d9d942b9
|
360df3c6d013b7a9423b65d1fac0172bbbcf73ca
|
/FDA_Pesticide_Glossary/2-methoxy-4H-1,3,2-b.R
|
5a56529edd700b75e6360e833f031bb4d1790996
|
[
"MIT"
] |
permissive
|
andrewdefries/andrewdefries.github.io
|
026aad7bd35d29d60d9746039dd7a516ad6c215f
|
d84f2c21f06c40b7ec49512a4fb13b4246f92209
|
refs/heads/master
| 2016-09-06T01:44:48.290950
| 2015-05-01T17:19:42
| 2015-05-01T17:19:42
| 17,783,203
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 276
|
r
|
2-methoxy-4H-1,3,2-b.R
|
library("knitr")
library("rgl")
#knit("2-methoxy-4H-1,3,2-b.Rmd")
#markdownToHTML('2-methoxy-4H-1,3,2-b.md', '2-methoxy-4H-1,3,2-b.html', options=c("use_xhml"))
#system("pandoc -s 2-methoxy-4H-1,3,2-b.html -o 2-methoxy-4H-1,3,2-b.pdf")
knit2html('2-methoxy-4H-1,3,2-b.Rmd')
|
8e77fbc1e5996c73cd3240b17acde7a4f172dd02
|
c85d3b3332b3af21d9260f348c9d7558dad41d71
|
/plot3.R
|
7077cc12a7b63ce3e773365eb0cdd133b89aebfb
|
[] |
no_license
|
downtownadam/EDA-Course-Project
|
ea8299f04b43e2310e746554146977e18fae0808
|
9d1e505c30de0f8f5fdcd43809a9cc6a52aa8d35
|
refs/heads/master
| 2021-01-10T12:19:23.720353
| 2016-03-04T05:20:49
| 2016-03-04T05:20:49
| 53,109,380
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 914
|
r
|
plot3.R
|
library(ggplot2)
library(dplyr)
options(scipen=5)
sourceRDS<-readRDS(file="C:/Users/mila_/Documents/Coursera/Source_Classification_Code.rds")
summaryRDS<-readRDS(file="C:/Users/mila_/Documents/Coursera/summarySCC_PM25.rds")
summaryRDS$Pollutant<-as.factor(summaryRDS$Pollutant)
summaryRDS$type<-as.factor(summaryRDS$type)
summaryRDS$year<-as.factor(summaryRDS$year)
totals <- summaryRDS %>% filter(fips=="24510") %>% group_by(type,year) %>% summarize(Total_Emissions=sum(Emissions))
png(filename="plot3.png",width=640, height=480)
fancygraph <- ggplot(totals,aes(year,Total_Emissions,fill=type)) +
facet_grid(.~type,scales="free", space="free") + geom_bar(stat="identity") +
theme_dark() + ylim(0,2500) + theme(legend.position="none") + labs(x="Year",y="Total Emissions in Tons",title="Baltimore City, Maryland\nTotal Emissions in Tons by Type of Source")
print(fancygraph)
dev.off()
|
f3a823b036c372b66e0b15c99e0fa196bdb049da
|
23cc9b41b64dbf6b8da86fd8ce348fd108ed3ab1
|
/man/plot_cycle.Rd
|
5def8eb001d9a27fa75295966dd9b83a5fb54316
|
[] |
no_license
|
DavidPitteloud/Reddit_Package
|
6a7a637cac1f42aa9649fdfdd6a0f5b71201c37b
|
9f34c51354bd6231266cf3c462ef50fcba7a220d
|
refs/heads/master
| 2022-12-01T21:24:42.520584
| 2020-08-11T18:22:17
| 2020-08-11T18:22:17
| 285,858,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 291
|
rd
|
plot_cycle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_cycle.R
\name{plot_cycle}
\alias{plot_cycle}
\title{Plot cycle}
\usage{
plot_cycle(df)
}
\arguments{
\item{enter}{a df}
\item{guess}{user location}
}
\value{
ggplot with results
}
\description{
Plot cycle
}
|
4b3f2d4ffdcda887c7bdaafa8f031883bf4e1dbd
|
5244a0e46d0b4d3059336338875efe00b252e07a
|
/inst/shiny-examples/gradientApp/ui.R
|
48b5c49c23828c7005d255295df61bbebdca33ce
|
[] |
no_license
|
vonthein/illustrator
|
dbc8a4259946a20924bec49b5863835331fea1f2
|
998e97bce66b40ae88162d996e3ff2ff4e428ccd
|
refs/heads/master
| 2020-06-26T04:55:49.626830
| 2019-10-29T09:49:09
| 2019-10-29T09:49:09
| 97,004,707
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,530
|
r
|
ui.R
|
source("init.R")
shinyUI(
fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput("n",
label = "n Symbols per class",
value = 10, min = 1, max = 25),
sliderInput("near",
label = "near each other",
value = 0.5, min = 0.2, max = 2, step=0.1),
sliderInput("b1",
label = "b1 slope",
value = 1, min = -1, max = 2, step=0.01),
sliderInput("b0",
label = "b0 intercept",
value = 0, min = -20, max = 50, step=1),
sliderInput("seed",
label = "seed",
value = 1,
min = 0, max = 99999),
colourpicker::colourInput("filcol1", "Fill color from", "darkgreen"),
colourpicker::colourInput("filcol2", "Fill color to", "green")
),# panel
mainPanel(
selectInput("icon", label = "icon Symbol",
choices = choices,
selected = "fir"),
sliderInput("m",
label = "m magnification of symbol",
value = 2, min = 0, max = 5, step=0.1),
sliderInput("p",
label = "p Proportion of symbol, < 1 is narrower",
value = 1, min = 0.1, max = 10, step=0.01),
p("Gradient"),
plotOutput("pdfPlot"),
textOutput("pdfDescription"),
strong(textOutput("pdfSum"))
) # panel
) # layout
) # page
)
|
4ef7b670283ae85a9e1c3f1504fecbb428563049
|
820c0a5f34c4e9899db608c6ccbdc3e2e853f2d8
|
/man/combine_date_time_cols.Rd
|
dac80f6a08c32c353f1425d306b850007b4df52a
|
[
"MIT"
] |
permissive
|
alwinw/epocakir
|
887c0fd0b66251c67d922613e1420effff003611
|
a1bcd4567fb2d91cde53453dff5991af967c4860
|
refs/heads/master
| 2023-05-23T06:05:55.385634
| 2023-01-06T12:52:15
| 2023-01-06T12:52:15
| 296,596,576
| 4
| 1
|
NOASSERTION
| 2022-12-16T10:25:30
| 2020-09-18T11:05:01
|
R
|
UTF-8
|
R
| false
| true
| 896
|
rd
|
combine_date_time_cols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{combine_date_time_cols}
\alias{combine_date_time_cols}
\title{Combine date and time columns into a single DateTime column}
\usage{
combine_date_time_cols(.data, tz = NULL)
}
\arguments{
\item{.data}{(data.frame) A data frame or data frame extension (e.g. a tibble)}
\item{tz}{(character) a time zone name (default: time zone of the POSIXt
object x)}
}
\value{
(data.frame) An object of the same type as \code{.data}
}
\description{
Combine date and time columns into a single DateTime column
}
\examples{
df <- data.frame(
date_a = as.Date(c("2020-01-01", "2020-01-02")),
date_b = as.POSIXct(c("2020-02-01", "2020-02-02")),
time_a = as.POSIXct(c("1900-01-01 01:01:01", "1900-01-01 02:02:02")),
time_b = as.POSIXct(c("1900-01-01 01:01:01", "1900-01-01 02:02:02"))
)
combine_date_time_cols(df)
}
|
88c08f8d0a9e2f9d61dc723bc8a325559a78a4ca
|
7f683baba5fe554d7a996ae5416f46b64bb1c6e3
|
/ninjaR/generate_data.R
|
d5edef6d60eac3fb0b0c8f907a24911561962f64
|
[] |
no_license
|
ninjacrash/geogo
|
dec0f50e76d5887e9081e3e64bd889aa9be3f315
|
0d5069fc2ef54e68d84725002fe6bf1f96d7669e
|
refs/heads/master
| 2021-01-17T18:57:22.587517
| 2016-10-23T13:43:25
| 2016-10-23T13:43:25
| 71,632,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,158
|
r
|
generate_data.R
|
generate_data <- function(size, seed = 1, time.start = as.POSIXct('2014-01-01 09:00.00 CDT', tz="America/Chicago"), outlier_prob = seq(0.05, 0.1, by = 0.01)){
# Function generates a data frame of given size
##########################################################
# Hard coded variables, can be used as command line params
##########################################################
set.seed(seed)
DATA_SIZE <- size
TIME_START <- time.start
OUTLIER_PROBABILITIES <- outlier_prob
SAMPLE_MESSAGES <- as.character(read.csv("./data/stories.csv",header=F)$V1)
UIDS <- as.character(read.csv("./data/UIDs.csv",header=F)$V1)
##########################################################
# Generating features for dataset
##########################################################
# Generating user_id
user_id <- sample(x = UIDS, size = DATA_SIZE, replace = TRUE)
# Generating reason_to_contact
# rtc.values<-c("could not pay rent","have nowhere to sleep","lost my job","potential family violence")
# my.control.rtc<-as.vector(rmultinom(1, size = DATA_SIZE, prob = c(0.2,0.6,0.1,0.1)))
# rtc<-unlist(mapply(rep, times=my.control.rtc, rtc.values))
# Generating gender
# gender.values<-c("M","F")
# my.control.gender<-as.vector(rmultinom(1, size = DATA_SIZE, prob = c(0.6,0.4)))
# gender<-unlist(mapply(rep, times=my.control.gender, gender.values))
# Generating education
# ed.values<-c("some high school","high school","college")
# my.control.ed<-as.vector(rmultinom(1, size = DATA_SIZE, prob = c(0.4,0.4,0.2)))
# educ<-unlist(mapply(rep, times=my.control.ed, ed.values))
# Generating what_person_wants
wpw.values<-c(SAMPLE_MESSAGES)
my.control.wpw<-as.vector(rmultinom(1, size = DATA_SIZE, prob = c(rep(0.08, 12), 0.04)))
wpw<- sample(unlist(mapply(rep, times=my.control.wpw, wpw.values)))
# Creating missing values
holes <- as.logical(rbinom(n = DATA_SIZE, size = 1, prob = sample(OUTLIER_PROBABILITIES, 1)))
wpw[holes] <- NA
# Generating shelter_checked_at
shelter_checked_at <- as.factor(sample(c("Shelter1", "Shelter2", "Shelter3"),
DATA_SIZE,
prob = c(0.25, 0.5, 0.25),
replace = TRUE))
# Creating missing values
holes <- as.logical(rbinom(n = DATA_SIZE, size = 1, prob = sample(OUTLIER_PROBABILITIES, 1)))
shelter_checked_at[holes] <- NA
# Generating time bounds of data collection
# To be used in next section
time.now <- Sys.time()
# Generating date_checked
date_checked <- sample(seq(from = time.start, to = time.now, by = "mins"), DATA_SIZE, replace = TRUE)
date_checked <- strftime(as.POSIXlt(date_checked), format = "%m/%d/%Y %H:%M:%S %Z", tz = 'America/Chicago')
# Generating intervention
intervention <- as.factor(sample(c("gave money", "did not give money", "gave bed", "job training", "family therapy"),
DATA_SIZE,
prob = c(0.15, 0.27, 0.39, 0.11, 0.08),
replace = TRUE))
# Generating checked_at_shelter
checked_at_shelter <- as.factor(sample(c("No", "Yes"),
DATA_SIZE,
prob = c(0.5, 0.5),
replace = TRUE))
# Creating missing values
holes <- which(checked_at_shelter == 'No')
date_checked[holes] <- NA
##########################################################
# Dataset creation
##########################################################
data<-data.frame(user_id = user_id,
# gender = gender,
# education = educ,
message = wpw,
intervention = intervention,
shelter_name = shelter_checked_at,
event_dt = date_checked,
checked_at_shelter = checked_at_shelter)
#########################################################
#########################################################
data
}
|
ac4289e297889bc4055ff026a244d017e5656bff
|
5d5ea66bc7c9c749e004c99132f430e1bfb7f651
|
/shinyApp/Volcano_Shiny/ui.R
|
88acf8cfb01e8a4a25d286a9bda15aab50234eba
|
[] |
no_license
|
bgcasey/RV_connectivity
|
014e8eaab6935fdffecbd5ddcc383e9134579613
|
c8eab4ca206dfa0ec58fc8b3a01d3daacced3aaa
|
refs/heads/main
| 2023-06-29T00:48:04.426083
| 2021-08-05T20:54:34
| 2021-08-05T20:54:34
| 369,054,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,371
|
r
|
ui.R
|
#------------------------------------------------------------------------
# UI, or "User Interface" Script
# this script designs the layout of everything the user will see in this Shiny App
#------------------------------------------------------------------------
library(shiny)
library(shinydashboard)
library(shinyWidgets)
library(leaflet)
library(dplyr)
library(ggplot2)
# make dashboard header
header <- dashboardHeader(
title = " Characterizing Connectivity Pinch-Points in Edmonton's Ribbon of Green",
titleWidth = 800 # since we have a long title, we need to extend width element in pixels
)
# create dashboard body - this is the major UI element
body <- dashboardBody(
# make first row of elements (actually, this will be the only row)
fluidRow(
# make first column, 25% of page - width = 3 of 12 columns
column(width = 5,
# Box 1: text explaining what this app is
#-----------------------------------------------
box( width = NULL,
status="primary", # this line can change the automatic color of the box. options are "info", "primary","warning","danger', and "success"
title = NULL,
# background = "black",
# add some text in bold
strong("River Valley Connectivity Project" ,
# linebreak
br(),
a("A sustainability scholars project", href="https://www.ualberta.ca/sustainability/experiential/sustainability-scholars/index.html", target = "_blank"),
),
# linebreak
br(),
# text in normal
p("This application can be used to help visualize the biophysical conditions contributing to
movement pinch-points in Edmonton's River valley"),
p("Created by Brendan Casey.",
br(),
strong(a("See application code", href="https://github.com/bgcasey/RV_connectivity", target = "_blank")),
br(),
strong(a("See project workflow", href="https://bookdown.org/bgcasey/RV_connectivity", target = "_blank"))),
#
), # end box 1
# box 2 : input for selecting pinch-points and variables
#-----------------------------------------------
box(width = NULL, status = "primary",
title = "Selection Criteria", solidHeader = T,
collapsible = T,
# Widget specifying the seasonal pinch-points to be included on the plot
checkboxGroupButtons(
inputId = "season_select",
label = "Season",
choices = c("Winter" , "Summer"),
checkIcon = list(
yes = tags$i(class = "fa fa-check-square",
style = "color: steelblue"),
no = tags$i(class = "fa fa-square-o",
style = "color: steelblue"))
), # end checkboxGroupButtons
# Widget specifying the Ribbon of Green reach
checkboxGroupButtons(
inputId = "RoG_reach_select",
label = "RoG reach",
choices = c("Big Island Woodbend", "Big Lake", "Blackmud", "Cameron Oleskiw River Valley",
"Confluence", "East Ravines", "Edmonton East", "Horsehills North", "Horsehills South",
"Irvine Creek to Blackmud South", "Marquis River Valley", "Mill Creek North", "Mill Creek South",
"North Saskatchewan Central", "North Saskatchewan East", "North Saskatchewan West",
"SW Annex", "Wedgewood Ravine", "Whitemud", "Whitemud North", "Whitemud South Annex"),
checkIcon = list(
yes = tags$i(class = "fa fa-check-square",
style = "color: steelblue"),
no = tags$i(class = "fa fa-square-o",
style = "color: steelblue"))
), # end checkboxGroupButtons
# Widget specifying the species to be included on the plot
checkboxGroupButtons(
inputId = "variable_select",
label = "Variable of interest",
choices = c("Distance to road" , "Slope" , "Vegetation type" , "UPLVI STYPE" , "Volcanic Field",
"Complex" , "Other", "Lava Dome" , "Submarine" ),
checkIcon = list(
yes = tags$i(class = "fa fa-check-square",
style = "color: steelblue"),
no = tags$i(class = "fa fa-square-o",
style = "color: steelblue"))
), # end checkboxGroupButtons
# strong("Space for your additional widget here:"),
#
# br(), br(), br(), br(), br(), # add a bunch of line breaks to leave space. these can be removed when you add your widget
#
# # space for your addition here:
#-------------------------------------------
# --- --- --- --- HINT --- --- --- ---
# here, you will paste code for another Widget to filter volcanoes on the map.
# you'll need to paste code for some widget, name it, then call it at the top of the server page
# when we are filtering the selected_volcanoes() reactive object.
# see the columns in the volcanoes dataset, and add a widget to further filter your selected_volcanoes() server object
# --- --- --- some suggestions: --- --- ---
# 1. slider bar to only show volcanoes population_within_30_km > xxxx
# 2. slider input to show volcanoes with last_eruption_year > xxxx
# 3. slider input to only show volcanoes with elevation > xxxx
# 4. checkbox input to only show volcanoes in evidence category c("xx", "xx")
# see available widgets here: http://shinyapps.dreamrs.fr/shinyWidgets/
# and here: https://shiny.rstudio.com/gallery/widget-gallery.html
), # end box 2
# box 3: ggplot of selected volcanoes by continent
#------------------------------------------------
box(width = NULL, status = "primary",
solidHeader = TRUE, collapsible = T,
title = "Volcanoes by Continent",
plotOutput("continentplot", # this calls to object continentplot that is made in the server page
height = 325)
) # end box 3
), # end column 1
# second column - 75% of page (8 of 12 columns)
#--------------------------------------------------
column(width = 7,
# Box 3: leaflet map
box(width = NULL, background = "light-blue",
leafletOutput("pinchPoint_map", height = 850)
# this draws element called "pinchPoint_map", which is created in the "server" tab
) # end box with map
) # end second column
), # end fluidrow
# Make a CSS change so this app shows at 90% zoom on browsers
# only adding this because it looked more zoomed in on my web browser than it did on my RStudio viewer
tags$style(" body {
-moz-transform: scale(0.9, 0.9); /* Moz-browsers */
zoom: 0.9; /* Other non-webkit browsers */
zoom: 90%; /* Webkit browsers */}"),
) # end body
# compile dashboard elements
dashboardPage(
skin = "blue",
header = header,
sidebar = dashboardSidebar(disable = TRUE), # here, we only have one tab, so we don't need a sidebar, we will just disable it.
body = body
)
|
4fb1e621d0286b0eaef31131eb6be087696aee44
|
3def96d7907d3ee7e8bfca496ee4b0a715f38901
|
/TP1 - Statistique descriptive, ACP/paris.r
|
4324e0e40891fb9a48a69d99633aa088494a211b
|
[] |
no_license
|
Anaig/SY09
|
3889c5dd65c81fad17e04fb9eff7741e32ec92e4
|
3da310d2e996552e2ee79bccd6660fedacee141d
|
refs/heads/master
| 2020-12-25T16:47:33.960579
| 2016-09-25T21:02:26
| 2016-09-25T21:02:26
| 67,801,829
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,694
|
r
|
paris.r
|
books <- read.csv("anonymous-betting-data.csv")
source("pretraitements.R")
names(books.sel) #Nom des colonnes
attach(books.sel) #Table par défaut
##################QUESTION 1###########################
summary(books.sel)
nlevels(books.sel$match_book_uid) #nombre de paris
nlevels(books.sel$match_uid) #nombre de matchs
length(players.total <- union(loser,winner)) #nombre de joueurs
length(unique(loser)) #nombre perdants
length(unique(winner)) #nombre gagnants
length(unique(book)) #nombre de bookmakers
##################QUESTION 2###########################
matches <- books.sel[which(!duplicated(books.sel$match_uid)),
-c(1,2,3,4,5,7,8,9,10,11,12)]
matches <- matches[sort.int(as.character(matches$match_uid), index.return=T)$ix,]
win <- aggregate(matches$match_uid~matches$winner, data=matches, FUN=length)
los <-aggregate(matches$match_uid~matches$loser, data=matches, FUN=length)
#ne prend pas en compte les joureurs avec 0 vict ou 0 def, donc pb de dimensions
#Table player_uid/nb match gagnés/nb match perdus
players_data<-data.frame(
player_uid=factor(levels(matches$winner),levels=levels(matches$winner)),
player_win=table(matches$winner),
player_los=table(matches$loser))#petit pb il reste l id des joueurs
#Table propension à gagner des matchs par joueur
players <- data.frame(
players_data[,-c(2,4)],#on enleve les colonnes doublons
player_level=players_data[3]/(players_data[5]+players_data[3])#colonne repsentant le niveau du joueur (G/(G+P))
)
colnames(players)[2]="victoires" #renomme la colonne
colnames(players)[3]="defaites"
colnames(players)[4]="ratio"
players<-players[order(-players$ratio),]
#affichage du niveau
hist(players$ratio,main="Niveau des joueurs",xlab="Victoires/matchs joués",ylab="Nombre de joueurs",col="darkblue")
##################QUESTION 3###########################
#Match suspects car évolution proba >0.1
books.sel$prob_evo=abs(books.sel$implied_prob_winner_open-books.sel$implied_prob_winner_close)
pari_suspect<-subset(books.sel,books.sel$prob_evo>=0.1 & books.sel$moved_towards_winner==TRUE)
#LE PARI EST SUSPECT SI LA PROB A EVOLUE EN FAVEUR DU GAGNANT
dim(pari_suspect) #nbre de pari suspect:2657
##ancienne version##
pari_suspect=(prob_winner_evo >= 0.1) #pari suspect si evo>0.1
table(pari_suspect)#nbre de paris suspects : 4298
##________________##
#Bookmakers suspects
table(pari_suspect$book) #Nombre de matchs suspects / bookmaker
#les bookmakers A B C sont beaucoup plus concernés que les autres
#Matchs suspects
match_suspect<-unique(pari_suspect$match_uid)
length(match_suspect)#1752 match suspects (2e methode)
##ancienne version##
match_suspect<-books.sel$match_uid[pari_suspect$match_book_uid]
nb_pari<-data.frame(table(match_suspect))
Match_suspect<-subset(nb_pari,Freq>0)#table avec le uid du match suspect et le nombre de paris associés
dim(Match_suspect)#2798 matchs suspects (1ere methode)
##________________##
#le match est vraiment suspect seulement si la cote du gagnant a évolué en sa faveur
#Players suspects
player_suspect <- books.sel$loser[match_suspect] #il est plus facile de faire expres de perdre
length(unique(player_suspect)) #Nombre de players suspects 1e methode : 310 #2e methode : 273
nb_match_suspect <- data.frame(table(player_suspect))
Player_suspect<-subset(nb_match_suspect,Freq>10)#plus de 10 def suspectes
dim(Player_suspect) #1e methode : 145 joueurs suspects #2e methode : 64 !! on est BON
#affichage d'un barplot avec le nbre de def par joueur
display_player<-Player_suspect[order(-Player_suspect$Freq),]#on range par freq (decroissant)
barplot(display_player$Freq,main="Nombre de defaites suspectes par joueur implique",ylab="Matchs suspects",col="darkgreen")
|
d4059eccf7c11841b4f84c3f0e887d2dd5eb7a08
|
c719d9f4b158f92d3ab81af6a1556260b715d793
|
/dendl_cgMLSTvsLyveSET-color.R
|
2d5331b43b23e5da9c9bb5ab2b091e1b653533f5
|
[] |
no_license
|
jchen232/TanglegramCampyPuppy
|
6a1a90fe2301c0f401bd742ba7906efbd65ee37f
|
60d0eb5eeb6f5dff8570c89a912376a365230c87
|
refs/heads/master
| 2022-11-17T23:38:13.048738
| 2020-07-17T19:01:55
| 2020-07-17T19:01:55
| 199,073,612
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,085
|
r
|
dendl_cgMLSTvsLyveSET-color.R
|
#!/usr/bin/env Rscript
# Authors: Beau Bruce and Weidong Gu
# Modified by Lee Katz and Jess Chen
library("phytools")
library("ape")
library("dendextend")
treefile1 <- read.tree(file = "labpaper2018_cgMLST_newprod.dnd")
treefile2 <- read.tree(file = "out.RAxML_bipartitions.dnd")
outbreak <- read.table('clade_numbers2.txt',
sep="\t", header=T, stringsAsFactors=F)
tree1 <- reorder(midpoint.root((treefile1)), order = "cladewise")
tree2 <- reorder(midpoint.root((treefile2)), order = "cladewise")
dend_tree1 <- force.ultrametric(tree1)
dend_tree2 <- force.ultrametric(tree2)
min_length <- 0.000000000000000000001
dend_tree1$edge.length[ dend_tree1$edge.length < min_length ] <- min_length
dend_tree2$edge.length[ dend_tree2$edge.length < min_length ] <- min_length
dend_tree1=(midpoint.root(dend_tree1))
dend_tree2=(midpoint.root(dend_tree2))
clade1 <- match(outbreak$WGS_id[outbreak$Clade == 1],dend_tree1$tip.label)
clade2 <- match(outbreak$WGS_id[outbreak$Clade == 2],dend_tree1$tip.label)
myColors <- c()
myColors[clade1] <- 'red'
myColors[clade2] <- 'blue'
myNewColors <- c(myColors[myColors=="blue"],myColors[myColors=="red"])
print("untangle")
dendl <- dendextend::untangle(as.dendrogram(dend_tree1),
as.dendrogram(dend_tree2),
method = "step2side")
# Make the branches look nice
dendl %>% set("branches_lwd", 1) %>%
set("labels_col", "white") -> dendl
print("entanglement...");
myEntanglement <- entanglement(dendl)
cophenetic <- cor.dendlist(dendl, method = "cophenetic")
baker <- cor.dendlist(dendl, method = "baker")
# Start off the viz
tiff(file="tangle-dendl-mlst-hqsnp-color2.tiff", width = 7, height = 7, unit = "in",res =600 )
tanglegram(dendl,
main_left='cgMLST',
main_right='lyve-SET',
lab.cex=0.3,
highlight_distinct_edges = FALSE,
color_lines=myNewColors,
lwd=1,
common_subtrees_color_lines = FALSE
)
myReturn <- dev.off();
|
d4fd40770509096afb2c7c982a10ab9fe677b84c
|
1b47e06672d6807db473988aef2f24ea0e4c6734
|
/R/pothole_model.R
|
38a6e5f97cc0f302a0fe0b7e54d548c51fdb2052
|
[] |
no_license
|
StevenMMortimer/pothole
|
a17e2a4589a369dca0ee2b5b0e73102e03b706c2
|
bb4d957fb3c0969e4b4666a909f8dbd5f1788d98
|
refs/heads/master
| 2021-05-30T20:58:05.632847
| 2016-01-22T05:50:30
| 2016-01-22T05:50:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
pothole_model.R
|
#' @name pothole_model
#' @title Edmonton Pothole Prediction Model
#' @export
#' @description Simple example model that predicts number of potholes filled monthly in Edmonton
#' @docType data
#' @usage pothole_model
#' @format a \code{\link{ets}} object
#' @source https://dev.socrata.com/foundry/#/dashboard.edmonton.ca/i3wp-57z9
NULL
|
47729b2c8c3ce8e92f302290930d5c116c3725bb
|
b5ec2092fabc9668b0369504fe6c711becc67e8b
|
/variants.R
|
dbc3a9118bfa8cd68fe48ed3480abf19c0d0a5a7
|
[] |
no_license
|
ZhongruiHu/ZBS
|
23a9cf90889da9e153288a176bfcf2d8af0f51e0
|
19a7ba8c6b7ead8fb4fef4c99bc62e7067ec0cb0
|
refs/heads/master
| 2021-01-12T16:51:35.677086
| 2014-03-19T03:51:30
| 2014-03-19T03:51:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,278
|
r
|
variants.R
|
pdf("ZBS-variants.pdf", width=12, height=8, family="Times")
par(xpd=NA, mfrow=c(2,2), mar=c(6,4,1.5,0.5), cex=1.1) # margin: bottom left top right
area_l <- c(100, 120, 140, 160, 180, 200, 220, 240)
num_FFD <- c(300, 400, 500, 600, 700, 800, 900, 1000)
legend <- c("DVHU", "DHU", "DU", "CVHU", "CHU", "CU", "C")
pch <- c(seq(1,5),15,16)
# # # # # # # # # # # # # # # # # Fixed density # # # # # # # # # # # # # # # # #
# # Average maximum latency # #
# D1VHU
#avg_max_lat <- c(19.09, 18.11, 17.88, 18.62, 18.61, 19.90, 20.14, 20.97)
# D1HU
#avg_max_lat <- c(21.01, 23.63, 25.76, 27.92, 29.14, 31.98, 33.73, 35.57)
#lines(area_l, avg_max_lat, type='o', pch=pch[2])
# D2VHU
avg_max_lat <- c(18.38, 16.89, 17.08, 17.21, 17.92, 18.84, 19.42, 20.47)
plot(area_l, avg_max_lat, type='o', pch=pch[1], ylim=c(18,138),
xlab='Area length (m)', ylab='Average maximum latency (slots)', main='(a) Fixed network density')
legend(180, -35, legend=legend, pch=pch, ncol=length(legend), bty='o')
# D2HU
avg_max_lat <- c(20.85, 22.40, 24.23, 26.01, 27.12, 29.52, 30.66, 32.29)
lines(area_l, avg_max_lat, type='o', pch=pch[2])
# D2U
avg_max_lat <- c(40.73, 45.12, 48.63, 50.55, 53.21, 56.06, 58.57, 61.31)
lines(area_l, avg_max_lat, type='o', pch=pch[3])
# CVHU
avg_max_lat <- c(18.28, 16.84, 16.84, 17.27, 17.98, 18.94, 19.60, 20.35)
lines(area_l, avg_max_lat, type='o', pch=pch[4])
# CHU
avg_max_lat <- c(20.74, 22.47, 24.21, 25.94, 27.43, 29.36, 30.77, 32.55)
lines(area_l, avg_max_lat, type='o', pch=pch[5])
# CU
avg_max_lat <- c(41.35, 44.96, 47.98, 50.68, 52.86, 56.17, 58.77, 62.43)
lines(area_l, avg_max_lat, type='o', pch=pch[6])
# C
avg_max_lat <- c(102.15, 135.46, 142.60, 137.22, 137.26, 137.65, 137.84, 136.73)
lines(area_l, avg_max_lat, type='o', pch=pch[7])
# # Average latency # #
# D1VHU
#avg_lat <- c(8.633156, 8.140718, 8.143710, 8.445711, 8.639501, 9.054342, 9.395622, 9.812672)
# D1HU
#avg_lat <- c(9.365611, 10.300120, 11.109248, 11.884447, 12.428119, 13.516939, 14.165949, 14.903600)
#lines(area_l, avg_lat, type='o', pch=pch[2])
# D2VHU
avg_lat <- c(8.838037, 8.336745, 8.485482, 8.735454, 9.138091, 9.605691, 10.063384, 10.579800)
plot(area_l, avg_lat, type='o', pch=pch[1], ylim=c(8,75),
xlab='Area length (m)', ylab='Average latency (slots)', main='(b) Fixed network density')
# D2HU
avg_lat <- c(9.939890, 10.640719, 11.404108, 12.308732, 13.055188, 13.918293, 14.756973, 15.639346)
lines(area_l, avg_lat, type='o', pch=pch[2])
# D2U
avg_lat <- c(19.878539, 21.917822, 23.438281, 24.595909, 26.022779, 27.156799, 28.620674, 30.176503)
lines(area_l, avg_lat, type='o', pch=pch[3])
# CVHU
avg_lat <- c(8.919318, 8.411400, 8.518859, 8.791262, 9.151941, 9.641454, 10.186824, 10.584161)
lines(area_l, avg_lat, type='o', pch=pch[4])
# CHU
avg_lat <- c(9.843791, 10.667241, 11.524022, 12.301896, 13.104550, 14.002915, 14.680621, 15.589984)
lines(area_l, avg_lat, type='o', pch=pch[5])
# CU
avg_lat <- c(19.945279, 21.901543, 23.417573, 24.632448, 25.730835, 27.287988, 28.673944, 30.504444)
lines(area_l, avg_lat, type='o', pch=pch[6])
# C
avg_lat <- c(51.575000, 65.114137, 68.197776, 68.838323, 70.928022, 73.804502, 74.698971, 74.511222)
lines(area_l, avg_lat, type='o', pch=pch[7])
# # # # # # # # # # # # # # # # # # Fixed area # # # # # # # # # # # # # # # # #
par(mar=c(4,4,1.5,0.5)) # margin: bottom left top right
# # Average maximum latency # #
# D1VHU
#avg_max_lat <- c(18.04, 18.41, 19.95, 20.68, 21.69, 23.36, 24.39, 25.61)
# D1HU
#avg_max_lat <- c(25.74, 28.83, 32.14, 35.28, 38.65, 41.20, 44.65, 47.00)
#lines(num_FFD, avg_max_lat, type='o', pch=pch[2])
# D2VHU
avg_max_lat <- c(18.45, 19.20, 19.99, 20.64, 21.58, 22.38, 23.37, 24.00)
plot(num_FFD, avg_max_lat, type='o', pch=pch[1], ylim=c(18,141),
xlab='Number of nodes', ylab='Average maximum latency (slots)', main='(c) Fixed simulation area')
# D2HU
avg_max_lat <- c(25.88, 28.61, 30.54, 32.60, 34.85, 36.93, 39.34, 41.46)
lines(num_FFD, avg_max_lat, type='o', pch=pch[2])
# D2U
avg_max_lat <- c(41.83, 48.23, 53.94, 59.81, 65.83, 73.10, 78.42, 84.83)
lines(num_FFD, avg_max_lat, type='o', pch=pch[3])
# CVHU
avg_max_lat <- c(18.69, 19.18, 20.11, 20.92, 21.75, 22.33, 23.27, 24.13)
lines(num_FFD, avg_max_lat, type='o', pch=pch[4])
# CHU
avg_max_lat <- c(26.33, 28.60, 30.45, 33.00, 34.72, 37.38, 39.54, 41.43)
lines(num_FFD, avg_max_lat, type='o', pch=pch[5])
# CU
avg_max_lat <- c(41.98, 47.87, 54.54, 61.21, 66.52, 72.73, 78.94, 85.39)
lines(num_FFD, avg_max_lat, type='o', pch=pch[6])
# C
avg_max_lat <- c(128.00, 129.72, 130.58, 136.56, 136.44, 140.62, 139.56, 137.27)
lines(num_FFD, avg_max_lat, type='o', pch=pch[7])
# # Average latency # #
# D1VHU
#avg_lat <- c(8.911326, 9.018336, 9.465811, 9.736929, 10.147071, 10.750860, 11.067770, 11.527987)
# D1HU
#avg_lat <- c(12.145766, 12.890715, 13.874480, 14.887266, 16.099299, 17.264806, 18.288610, 19.209590)
#lines(num_FFD, avg_lat, type='o', pch=pch[2])
# D2VHU
avg_lat <- c(9.522748, 9.886576, 10.206525, 10.637270, 11.090729, 11.401455, 11.849753, 12.328218)
plot(num_FFD, avg_lat, type='o', pch=pch[1], ylim=c(8.5,75),
xlab='Number of nodes', ylab='Average latency (slots)', main='(d) Fixed simulation area')
# D2HU
avg_lat <- c(12.766289, 13.639792, 14.520882, 15.514725, 16.700901, 17.644869, 18.718732, 19.663173)
lines(num_FFD, avg_lat, type='o', pch=pch[2])
# D2U
avg_lat <- c(21.209673, 23.746466, 26.708998, 29.437496, 32.494077, 35.789249, 38.411602, 41.863744)
lines(num_FFD, avg_lat, type='o', pch=pch[3])
# CVHU
avg_lat <- c(9.691743, 9.884853, 10.286174, 10.633576, 11.136235, 11.487778, 11.897385, 12.300913)
lines(num_FFD, avg_lat, type='o', pch=pch[4])
# CHU
avg_lat <- c(12.970214, 13.934990, 14.714850, 15.890067, 16.590730, 17.798586, 18.634638, 19.673634)
lines(num_FFD, avg_lat, type='o', pch=pch[5])
# CU
avg_lat <- c(21.068575, 23.701878, 26.658477, 30.097262, 32.577425, 35.778511, 38.385506, 41.531341)
lines(num_FFD, avg_lat, type='o', pch=pch[6])
# C
avg_lat <- c(75.044535, 75.914949, 75.333225, 76.202849, 74.931317, 74.261421, 73.977030, 72.968245)
lines(num_FFD, avg_lat, type='o', pch=pch[7])
dev.off()
|
6e6610721aa0e097d2f1cd199db7ebab9fd52d8a
|
de5e61b489ee3bf8d2c1a1093ff978579b4caa70
|
/man/vim.Rd
|
03f93104d1d0154f9933d7a13cead7743bfb503b
|
[
"MIT"
] |
permissive
|
minghao2016/vimp
|
575cad2831811af9796d07b3c5d962d5731f4110
|
cd0599fea1ccf66cec7a1013706459850be91d01
|
refs/heads/master
| 2023-05-11T23:20:34.840425
| 2021-06-03T16:28:07
| 2021-06-03T16:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 8,591
|
rd
|
vim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vim.R
\name{vim}
\alias{vim}
\title{Nonparametric Intrinsic Variable Importance Estimates and Inference}
\usage{
vim(
Y = NULL,
X = NULL,
f1 = NULL,
f2 = NULL,
indx = 1,
type = "r_squared",
run_regression = TRUE,
SL.library = c("SL.glmnet", "SL.xgboost", "SL.mean"),
alpha = 0.05,
delta = 0,
scale = "identity",
na.rm = FALSE,
sample_splitting = TRUE,
sample_splitting_folds = NULL,
stratified = FALSE,
C = rep(1, length(Y)),
Z = NULL,
ipc_weights = rep(1, length(Y)),
ipc_est_type = "aipw",
scale_est = TRUE,
bootstrap = FALSE,
b = 1000,
...
)
}
\arguments{
\item{Y}{the outcome.}
\item{X}{the covariates.}
\item{f1}{the fitted values from a flexible estimation technique
regressing Y on X.}
\item{f2}{the fitted values from a flexible estimation technique
regressing either (a) \code{f1} or (b) Y on X withholding the columns in
\code{indx}.}
\item{indx}{the indices of the covariate(s) to calculate variable
importance for; defaults to 1.}
\item{type}{the type of importance to compute; defaults to
\code{r_squared}, but other supported options are \code{auc},
\code{accuracy}, \code{deviance}, and \code{anova}.}
\item{run_regression}{if outcome Y and covariates X are passed to
\code{vimp_accuracy}, and \code{run_regression} is \code{TRUE},
then Super Learner will be used; otherwise, variable importance
will be computed using the inputted fitted values.}
\item{SL.library}{a character vector of learners to pass to
\code{SuperLearner}, if \code{f1} and \code{f2} are Y and X,
respectively. Defaults to \code{SL.glmnet}, \code{SL.xgboost},
and \code{SL.mean}.}
\item{alpha}{the level to compute the confidence interval at.
Defaults to 0.05, corresponding to a 95\% confidence interval.}
\item{delta}{the value of the \eqn{\delta}-null (i.e., testing if
importance < \eqn{\delta}); defaults to 0.}
\item{scale}{should CIs be computed on original ("identity") or
logit ("logit") scale?}
\item{na.rm}{should we remove NAs in the outcome and fitted values
in computation? (defaults to \code{FALSE})}
\item{sample_splitting}{should we use sample-splitting to estimate the full and
reduced predictiveness? Defaults to \code{TRUE}, since inferences made using
\code{sample_splitting = FALSE} will be invalid for variable with truly zero
importance.}
\item{sample_splitting_folds}{the folds used for sample-splitting;
these identify the observations that should be used to evaluate
predictiveness based on the full and reduced sets of covariates, respectively.
Only used if \code{run_regression = FALSE}.}
\item{stratified}{if run_regression = TRUE, then should the generated
folds be stratified based on the outcome (helps to ensure class balance
across cross-validation folds)}
\item{C}{the indicator of coarsening (1 denotes observed, 0 denotes
unobserved).}
\item{Z}{either (i) NULL (the default, in which case the argument
\code{C} above must be all ones), or (ii) a character vector
specifying the variable(s) among Y and X that are thought to play a
role in the coarsening mechanism.}
\item{ipc_weights}{weights for the computed influence curve (i.e.,
inverse probability weights for coarsened-at-random settings).
Assumed to be already inverted (i.e., ipc_weights = 1 / [estimated
probability weights]).}
\item{ipc_est_type}{the type of procedure used for coarsened-at-random
settings; options are "ipw" (for inverse probability weighting) or
"aipw" (for augmented inverse probability weighting).
Only used if \code{C} is not all equal to 1.}
\item{scale_est}{should the point estimate be scaled to be greater than 0?
Defaults to \code{TRUE}.}
\item{bootstrap}{should bootstrap-based standard error estimates be computed?
Defaults to \code{FALSE} (and currently may only be used if
\code{sample_splitting = FALSE}).}
\item{b}{the number of bootstrap replicates (only used if \code{bootstrap = TRUE}
and \code{sample_splitting = FALSE}).}
\item{...}{other arguments to the estimation tool, see "See also".}
}
\value{
An object of classes \code{vim} and the type of risk-based measure.
See Details for more information.
}
\description{
Compute estimates of and confidence intervals for nonparametric intrinsic
variable importance based on the population-level contrast between the oracle
predictiveness using the feature(s) of interest versus not.
}
\details{
We define the population variable importance measure (VIM) for the
group of features (or single feature) \eqn{s} with respect to the
predictiveness measure \eqn{V} by
\deqn{\psi_{0,s} := V(f_0, P_0) - V(f_{0,s}, P_0),} where \eqn{f_0} is
the population predictiveness maximizing function, \eqn{f_{0,s}} is the
population predictiveness maximizing function that is only allowed to access
the features with index not in \eqn{s}, and \eqn{P_0} is the true
data-generating distribution. VIM estimates are obtained by obtaining
estimators \eqn{f_n} and \eqn{f_{n,s}} of \eqn{f_0} and \eqn{f_{0,s}},
respectively; obtaining an estimator \eqn{P_n} of \eqn{P_0}; and finally,
setting \eqn{\psi_{n,s} := V(f_n, P_n) - V(f_{n,s}, P_n)}.
In the interest of transparency, we return most of the calculations
within the \code{vim} object. This results in a list including:
\describe{
\item{s}{the column(s) to calculate variable importance for}
\item{SL.library}{the library of learners passed to \code{SuperLearner}}
\item{type}{the type of risk-based variable importance measured}
\item{full_fit}{the fitted values of the chosen method fit to the full data}
\item{red_fit}{the fitted values of the chosen method fit to the reduced data}
\item{est}{the estimated variable importance}
\item{naive}{the naive estimator of variable importance (only used if \code{type = "anova"})}
\item{eif}{the estimated efficient influence function}
\item{eif_full}{the estimated efficient influence function for the full regression}
\item{eif_reduced}{the estimated efficient influence function for the reduced regression}
\item{se}{the standard error for the estimated variable importance}
\item{ci}{the \eqn{(1-\alpha) \times 100}\% confidence interval for the variable importance estimate}
\item{test}{a decision to either reject (TRUE) or not reject (FALSE) the null hypothesis, based on a conservative test}
\item{p_value}{a p-value based on the same test as \code{test}}
\item{full_mod}{the object returned by the estimation procedure for the full data regression (if applicable)}
\item{red_mod}{the object returned by the estimation procedure for the reduced data regression (if applicable)}
\item{alpha}{the level, for confidence interval calculation}
\item{sample_splitting_folds}{the folds used for sample-splitting (used for hypothesis testing)}
\item{y}{the outcome}
\item{ipc_weights}{the weights}
\item{mat}{a tibble with the estimate, SE, CI, hypothesis testing decision, and p-value}
}
}
\examples{
# generate the data
# generate X
p <- 2
n <- 100
x <- data.frame(replicate(p, stats::runif(n, -1, 1)))
# apply the function to the x's
f <- function(x) 0.5 + 0.3*x[1] + 0.2*x[2]
smooth <- apply(x, 1, function(z) f(z))
# generate Y ~ Bernoulli (smooth)
y <- matrix(rbinom(n, size = 1, prob = smooth))
# set up a library for SuperLearner; note simple library for speed
library("SuperLearner")
learners <- c("SL.glm")
# using Y and X; use class-balanced folds
est_1 <- vim(y, x, indx = 2, type = "accuracy",
alpha = 0.05, run_regression = TRUE,
SL.library = learners, cvControl = list(V = 2),
stratified = TRUE)
# using pre-computed fitted values
set.seed(4747)
V <- 2
full_fit <- SuperLearner::SuperLearner(Y = y, X = x,
SL.library = learners,
cvControl = list(V = V))
full_fitted <- SuperLearner::predict.SuperLearner(full_fit)$pred
# fit the data with only X1
reduced_fit <- SuperLearner::SuperLearner(Y = full_fitted,
X = x[, -2, drop = FALSE],
SL.library = learners,
cvControl = list(V = V))
reduced_fitted <- SuperLearner::predict.SuperLearner(reduced_fit)$pred
est_2 <- vim(Y = y, f1 = full_fitted, f2 = reduced_fitted,
indx = 2, run_regression = FALSE, alpha = 0.05,
stratified = TRUE, type = "accuracy",
sample_splitting_folds = est_1$sample_splitting_folds)
}
\seealso{
\code{\link[SuperLearner]{SuperLearner}} for specific usage of the
\code{SuperLearner} function and package.
}
|
6104b4730ddee8283dbe23d330c961e1458e939d
|
a3489a94b96f64bd6f3fe166d3f0affecd23c17a
|
/R/shiny.R
|
aeb18ca4f2d0a264f975a05354dac91c5fb1e00c
|
[
"MIT"
] |
permissive
|
fragla/acreular
|
a9e9f52bab3576b30ce3b498b1de4d0b003c3317
|
60491f59e7332a916a07f782d53206e345941775
|
refs/heads/master
| 2021-08-07T23:22:55.761150
| 2020-06-03T08:02:38
| 2020-06-03T08:02:38
| 186,041,450
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 910
|
r
|
shiny.R
|
#' Launch shiny acreular interface
#'
#' \code{shiny_acreular} launches a shiny interface for browser based ACR/EULAR calculations.
#'
#' @param display.mode The display mode to be passed to \link[shiny]{runApp}
#' @return NULL
#' @examples
#' \dontrun{
#' shiny_acreular()
#' shiny_acreular(display.mode="normal")
#' }
#' @export
shiny_acreular <- function(display.mode = "normal") {
pkgs <- c("shiny", "DT", "FSA", "ggplot2", "ggiraph", "ggiraphExtra", "mime", "parsedate", "PMCMRplus", "readxl", "shinycssloaders", "shinyWidgets")
missing <- sapply(pkgs, function(x){!requireNamespace(x, quietly=TRUE)})
if (any(missing)) {
stop(paste("The following package(s) are required for shiny_acreular to work:",
paste(pkgs[missing], collapse=", ")),
call. = FALSE)
}
app_dir <- system.file("shiny", package = "acreular")
shiny::runApp(app_dir, display.mode = display.mode)
}
|
4dcf676dfeb817bcfbcb61a138afe3ecb295603b
|
ad4f1f1450f1ce443684565653cc76b61a070190
|
/R/treatmentFit.R
|
25aae872c6e88ad9063424cdbc4567a6e2341f12
|
[] |
no_license
|
guhjy/bartCause
|
a76cb1dd0e09c4455c2462d8a13716d5feaf3c85
|
4b51aea89bb37b3316c75c3dfe596ab0c449a070
|
refs/heads/master
| 2020-03-20T20:51:19.009332
| 2018-06-14T13:54:54
| 2018-06-14T13:54:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,180
|
r
|
treatmentFit.R
|
getGLMTreatmentFit <- function(treatment, confounders, data, subset, weights, ...)
{
treatmentIsMissing <- missing(treatment)
confoundersAreMissing <- missing(confounders)
dataAreMissing <- missing(data)
matchedCall <- match.call()
callingEnv <- parent.frame(1L)
if (treatmentIsMissing)
stop("'treatment' variable must be specified")
if (confoundersAreMissing)
stop("'confounders' variable must be specified")
glmCall <- evalEnv <- NULL
if (!dataAreMissing && is.data.frame(data)) {
evalEnv <- NULL
dataCall <- addCallArgument(redirectCall(matchedCall, quoteInNamespace(getTreatmentDataCall)), "fn", quote(stats::glm))
massign[glmCall, evalEnv] <- eval(dataCall, envir = callingEnv)
} else {
df <- NULL
literalCall <- addCallArgument(redirectCall(matchedCall, quoteInNamespace(getTreatmentLiteralCall)), "fn", quote(stats::glm))
dataEnv <- if (dataAreMissing) callingEnv else list2env(data, parent = callingEnv)
massign[glmCall, df] <- eval(literalCall, envir = dataEnv)
evalEnv <- sys.frame(sys.nframe())
}
glmCall <- addCallArgument(glmCall, 2L, quote(stats::binomial))
glmCall <- addCallArguments(glmCall, list(...))
glmFit <- eval(glmCall, envir = evalEnv)
list(fit = glmFit, p.score = fitted(glmFit), samples = NULL)
}
getBartTreatmentFit <- function(treatment, confounders, data, subset, weights, ...)
{
treatmentIsMissing <- missing(treatment)
confoundersAreMissing <- missing(confounders)
dataAreMissing <- missing(data)
matchedCall <- match.call()
callingEnv <- parent.frame(1L)
if (treatmentIsMissing)
stop("'treatment' variable must be specified")
if (confoundersAreMissing)
stop("'confounders' variable must be specified")
bartCall <- NULL
if (!dataAreMissing && is.data.frame(data)) {
evalEnv <- NULL
dataCall <- addCallArgument(redirectCall(matchedCall, quoteInNamespace(getTreatmentDataCall)), "fn", quote(dbarts::bart2))
massign[bartCall, evalEnv] <- eval(dataCall, envir = callingEnv)
} else {
df <- NULL
literalCall <- addCallArgument(redirectCall(matchedCall, quoteInNamespace(getTreatmentLiteralCall)), "fn", quote(dbarts::bart2))
dataEnv <- if (dataAreMissing) callingEnv else list2env(data, parent = callingEnv)
massign[bartCall, df] <- eval(literalCall, envir = dataEnv)
evalEnv <- sys.frame(sys.nframe())
}
bartCall$verbose <- FALSE
bartCall <- addCallArguments(bartCall, list(...))
if (is.null(bartCall$keepTrees)) bartCall$keepTrees <- FALSE
bartFit <- eval(bartCall, envir = evalEnv)
x <- NULL ## R CMD check
samples <- evalx(pnorm(bartFit$yhat.train), if (length(dim(x)) > 2L) aperm(x, c(3L, 1L, 2L)) else t(x))
list(fit = bartFit,
p.score = apply(samples, 1L, mean),
samples = samples)
}
TEST_K <- c(0.5, 1, 2, 4, 8)
getBartXValTreatmentFit <- function(treatment, confounders, data, subset, weights, ...)
{
treatmentIsMissing <- missing(treatment)
confoundersAreMissing <- missing(confounders)
dataAreMissing <- missing(data)
matchedCall <- match.call()
callingEnv <- parent.frame(1L)
if (treatmentIsMissing)
stop("'treatment' variable must be specified")
if (confoundersAreMissing)
stop("'confounders' variable must be specified")
xbartCall <- NULL
if (!dataAreMissing && is.data.frame(data)) {
evalEnv <- NULL
dataCall <- addCallArgument(redirectCall(matchedCall, quoteInNamespace(getTreatmentDataCall)), "fn", quote(dbarts::xbart))
massign[xbartCall, evalEnv] <- eval(dataCall, envir = callingEnv)
} else {
df <- NULL
literalCall <- addCallArgument(redirectCall(matchedCall, quoteInNamespace(getTreatmentLiteralCall)), "fn", quote(dbarts::xbart))
dataEnv <- if (dataAreMissing) callingEnv else list2env(data, parent = callingEnv)
massign[xbartCall, df] <- eval(literalCall, envir = dataEnv)
evalEnv <- sys.frame(sys.nframe())
}
xbartCall$verbose <- FALSE
xbartCall$k <- TEST_K
bartCall <- xbartCall
bartCall[[1L]] <- quote(dbarts::bart2)
dotsList <- list(...)
xbartCall <- addCallArguments(xbartCall, dotsList)
bartCall <- addCallArguments(bartCall, dotsList)
if (is.null(bartCall$keepTrees)) bartCall$keepTrees <- FALSE
if (!is.null(matchedCall$n.burn) && is.list(dotsList[["n.burn"]]) && length(dotsList[["n.burn"]]) > 1L) {
xbartCall$n.burn <- dotsList[["n.burn"]][[1L]]
bartCall$n.burn <- dotsList[["n.burn"]][[2L]]
}
if (!is.null(matchedCall$n.samples) && is.list(dotsList[["n.samples"]]) && length(dotsList[["n.samples"]]) > 1L) {
xbartCall$n.samples <- dotsList[["n.samples"]][[1L]]
bartCall$n.samples <- dotsList[["n.samples"]][[2L]]
}
xbartFit <- eval(xbartCall, envir = evalEnv)
bartCall$k <- TEST_K[which.min(apply(xbartFit, 2L, mean))]
bartFit <- eval(bartCall, envir = evalEnv)
x <- NULL ## R CMD check
samples <- evalx(pnorm(bartFit$yhat.train), if (length(dim(x)) > 2L) aperm(x, c(3L, 1L, 2L)) else t(x))
list(fit = bartFit,
p.score = apply(samples, 1L, mean),
samples = samples)
}
|
321a4ba6b49eb54bcebfb938772a5fe65acaac9f
|
27074580be9e0f8540c308bcfddbb5ee2ca5f2ad
|
/man/plot.bHP.Rd
|
31a505aa687cb5dad988753391068e56b8ce1c88
|
[] |
no_license
|
lyig123/test2
|
e04f763ed67b21916e3baab2dd918230cecc383b
|
f4dc874d09ffb644e96d646677ecfda0109ce256
|
refs/heads/master
| 2022-01-29T17:45:56.251855
| 2019-07-21T15:44:44
| 2019-07-21T15:44:44
| 198,075,118
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,485
|
rd
|
plot.bHP.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.bHP.R
\name{plot.bHP}
\alias{plot.bHP}
\title{all in one function of iterated HP-filter}
\usage{
\method{plot}{bHP}(x, plot_type = "dynamic", interval_t = 0.3,
ylab = "", col_raw = "#2D5375", col_trend_h = "#FBB545",
col_trend_f = "red", col_pvalue_BIC = "red", raw_alpha = 255,
trend_h_alpha = 75, trend_f_alpha = 255, pvalue_BIC_alpha = 255,
legend_location = "upleft", iteration_location = "downright",
cex_text = 1.7, cex_legend = 1.5, main = paste0("Figure of ",
x$test_type, " bHP (", plot_type, ")"))
}
\arguments{
\item{x}{the data you want to conduct HP-filter}
\item{lambda}{the turning parameter}
\item{iter}{logical parameter, TRUE is to conduct iterated HP-filter, FALSE is not}
\item{test_type}{the type for creterion}
\item{sig_p}{significant p-value}
\item{Max_Iter}{maximum iterated time}
}
\value{
cycle component, iterated number, p-value .
}
\description{
all in one function of iterated HP-filter
}
\examples{
lam <- 100 # tuning parameter for the annaul data
# raw HP filter
bx_HP <- BoostedHP(x, lambda = lam, iter= FALSE)$trend
plot(bx_HP)
# by BIC
bx_BIC <- BoostedHP(IRE, lambda = lam, iter= TRUE, test_type = "BIC")
# by ADF
bx_ADF <- BoostedHP(IRE, lambda = lam, iter= TRUE, test_type = "adf", sig_p = 0.050)
# summarize the outcome
outcome <- cbind(IRE, bx_HP$trend, bx_BIC$trend, bx_ADF$trend)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.