blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7d90efbf56c31f72ee45f195aee65b01af6f140
|
476445b5a46af529cd5303b1a6f0d733b7d8e49a
|
/plot2.R
|
34b3e02b37328d27a20a563a06ddf54e2fab00a0
|
[] |
no_license
|
Irene9011/ExData_Plotting1
|
af1ee5373c33e10a8f566321446c619ab2cb2bf3
|
d748899c74207268085a2e36295775ca66f8b697
|
refs/heads/master
| 2021-01-15T17:37:16.230670
| 2015-03-09T03:35:40
| 2015-03-09T03:35:40
| 31,865,003
| 0
| 0
| null | 2015-03-08T20:48:09
| 2015-03-08T20:48:09
| null |
UTF-8
|
R
| false
| false
| 523
|
r
|
plot2.R
|
data <- read.csv("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?",stringsAsFactors=FALSE)
datause <- data[data$Date %in% c("1/2/2007","2/2/2007"),]
#date format
datetime <- paste(as.Date(datause$Date),datause$Time)
datause$Datetime <- as.POSIXct(datetime)
#plot
plot(data$Global_active_power~data$Datetime, type="1", ylab="Global Active Power(Kilowatts)", xlab="")
#copy the graph
dev.copy(png, file="plot2.png", width= 480, height=480)
dev.off()
|
beeedb43217a788081ee4628812da919e54e0b79
|
a42f5202a77101f64379eb0f94963ee7a9400a0e
|
/read_files&import_to_db.R
|
a6cd5aaf2807edce27f00cea43ee2cac150eabef
|
[] |
no_license
|
KangChungLin/Public-Opinion-Analysis
|
94bd564f8a0fe64c0c4e1d1f91d8346867c6933b
|
75f9c6c8d3e6fea5b22d09a5686f363c757b34c2
|
refs/heads/master
| 2023-01-30T21:09:52.389356
| 2020-12-15T10:21:29
| 2020-12-15T10:21:29
| 291,691,619
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
read_files&import_to_db.R
|
library(readr)
# all filenames in the folder
files <- list.files('stock_hot/' , pattern = '.csv' )
# read all files as dataframe and combine to a list
tables <- lapply(paste("stock_hot",files,sep="/"),read_csv)
# rename table names of the list
newname <- gsub('.csv', '', files)
newname <- paste0('stock',newname)
names(tables) <- newname
# connect to mariadb, use database "stock"
library(RMariaDB)
library(DBI)
drv <- dbDriver("MariaDB")
con <- dbConnect(drv, username="root", password="", dbname ="stock", host="localhost")
# import tables to database
mapply(dbWriteTable, name = newname, value = tables , MoreArgs = list(conn = con))
# 將查詢結果轉成dataframe
rs <- dbSendQuery(con, "SELECT * FROM stock20207;")
temp <- dbFetch(rs)
# 清除查詢結果
dbClearResult(rs)
# 直接將查詢結果變成dataframe
rs <- dbGetQuery(con, "SELECT * FROM stock20207;")
dbDisconnect(con)
|
70722a7d1bc344dc202a1151e8cfbad7b5972d36
|
ac4a92e44f14a5bc89b3da9b6d02095eb43c8997
|
/stan/simple_coin.R
|
1b76672a53d6c8308392d5723bfcdb5e63f9d38b
|
[] |
no_license
|
tavinathanson/dbda
|
afce6b963ba73fcff318aa36ea084d4d8038fed6
|
e1003ac05abd5ab878bd5f447f3209244cc8cf63
|
refs/heads/master
| 2021-01-12T07:42:10.608061
| 2017-01-05T22:57:33
| 2017-01-05T22:57:33
| 77,000,506
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 335
|
r
|
simple_coin.R
|
library("rstan")
rstan_options(auto_write = TRUE,
verbose = TRUE,
cores = 1)
coin_data <- list(N = 11,
y = c(1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0))
fit <- stan(file = 'simple_coin.stan',
data = coin_data,
iter = 100,
chains = 3)
plot(fit)
summary(fit)
|
bb07d72e22eca930c34d0239acf9afba74ac2dd8
|
edf2d3864db8751074133b2c66a7e7995a960c6b
|
/man/testdata.Rd
|
8e60cd84a31acb4d99e207e4e4e58b4d1f0de318
|
[] |
no_license
|
jkrijthe/RSSL
|
78a565b587388941ba1c8ad8af3179bfb18091bb
|
344e91fce7a1e209e57d4d7f2e35438015f1d08a
|
refs/heads/master
| 2023-04-03T12:12:26.960320
| 2023-03-13T19:21:31
| 2023-03-13T19:21:31
| 7,248,018
| 65
| 24
| null | 2023-03-28T06:46:23
| 2012-12-19T21:55:39
|
R
|
UTF-8
|
R
| false
| true
| 301
|
rd
|
testdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testdata-data.R
\docType{data}
\name{testdata}
\alias{testdata}
\title{Example semi-supervised problem}
\description{
A list containing a sample from the \code{GenerateSlicedCookie} dataset for unit testing and examples.
}
|
ee5d22b60a8b6df2eb924b34cee7cff81dd90ac4
|
e68e99f52f3869c60d6488f0492905af4165aa64
|
/man/torch_flip.Rd
|
ea2cfb28af6d944e3b10dacf18c3e604c16b06cf
|
[
"MIT"
] |
permissive
|
mlverse/torch
|
a6a47e1defe44b9c041bc66504125ad6ee9c6db3
|
f957d601c0295d31df96f8be7732b95917371acd
|
refs/heads/main
| 2023-09-01T00:06:13.550381
| 2023-08-30T17:44:46
| 2023-08-30T17:44:46
| 232,347,878
| 448
| 86
|
NOASSERTION
| 2023-09-11T15:22:22
| 2020-01-07T14:56:32
|
C++
|
UTF-8
|
R
| false
| true
| 571
|
rd
|
torch_flip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_flip}
\alias{torch_flip}
\title{Flip}
\usage{
torch_flip(self, dims)
}
\arguments{
\item{self}{(Tensor) the input tensor.}
\item{dims}{(a list or tuple) axis to flip on}
}
\description{
Flip
}
\section{flip(input, dims) -> Tensor }{
Reverse the order of a n-D tensor along given axis in dims.
}
\examples{
if (torch_is_installed()) {
x <- torch_arange(1, 8)$view(c(2, 2, 2))
x
torch_flip(x, c(1, 2))
}
}
|
72adf3f2bdba0ca22b2e7b6dddf4166c9eda780b
|
ea50b8df6e7dbf3a74b3641659c82a1ae042999e
|
/IC_modulation.R
|
3c602ba28d8023cafe76d7d993aded747b83eea2
|
[] |
no_license
|
yayuntsai/Decision-Science
|
ab493cbc8a176f7476e9cdfa35dca2d4b6d10818
|
50866b2c14bd7fd96a5ebc375bddbcc32e3e12d3
|
refs/heads/master
| 2023-02-28T06:17:51.179194
| 2021-01-28T15:10:55
| 2021-01-28T15:10:55
| 309,383,458
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
IC_modulation.R
|
##IC Module Analysis
preg=0.7
pbad.reg=0.1
pgood.reg=1-pbad.reg
#
pirreg=1-preg
pbad.irreg=0.4
pgood.irreg=1-pbad.irreg
sim.ICmodule = function(n=10){
simulated.modules = rep(NA,n)
labels = sample(c(-1,1),n,
prob=c(pirreg,preg),
replace=T)
if(any(labels==1)){
simulated.modules[which(labels==1)]=
sample(c("goodreg","badreg"), #從reg中拿出來是好(good)的或壞(bad)的
sum(labels==1),
prob=c(pgood.reg,pbad.reg),
replace=TRUE)
}
if(any(labels==-1)){
simulated.modules[which(labels==-1)] =
sample(c("goodirreg","badirreg"),
sum(labels==-1),
prob=c(pgood.irreg,pbad.irreg),
replace=T)
}
simulated.modules
}
sim.ICmodule()
S=10000
sim.table = replicate(S,sim.ICmodule())
dim(sim.table)
badnum=c()
badnumreg=c()
badnumirreg=c()
numreg=c()
numirreg=c()
for(i in 1:ncol(sim.table)){
badnumreg[i]=sum(sim.table[,i]=="badreg")
badnumirreg[i]=sum(sim.table[,i]=="badirreg")
badnum[i]=sum(sim.table[,i]=="badreg")+
sum(sim.table[,i]=="badirreg")
numreg[i]=sum(sim.table[,i]=="badreg")+
sum(sim.table[,i]=="goodreg")
numirreg[i]=sum(sim.table[,i]=="badirreg")+
sum(sim.table[,i]=="goodirreg")
}
sum(badnum==2)/S
sum(numreg==10 & badnum==2) #有多少情況是剛好兩個壞掉
sum(numreg==10 & badnum==2)/sum(badnum==2)
k=1
sum(badnum==k)
sum(numirreg>=1 & badnum==k)
sum(numirreg>=1 & badnum==k)/sum(badnum==k)
|
d71e44ccf7eb195768d1d4aaaa922e7dcc15d5a1
|
1f12384717b1003c9771f8b76fce9a05c8b50db8
|
/DataAnalysis/weather/weather.R
|
46a61ef64a362702085283ac8d4a5f77679ec6ca
|
[
"MIT"
] |
permissive
|
googlr/NYC-At-Large
|
599734656c66d28bff32a810f698a741aa83b68d
|
bad5a2196c6c01f85a4d364a07a3146cd9dca0ed
|
refs/heads/master
| 2021-09-13T15:56:08.942499
| 2018-05-01T23:29:51
| 2018-05-01T23:29:51
| 111,316,971
| 6
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,612
|
r
|
weather.R
|
setwd("G:/NYU_Class/BIGDATA/project/ouput")
# **************************************************
# processing date data
# **************************************************
weather.frame <- read.csv("merge.csv")
# plot
plot(weather.frame$TMAX, weather.frame$count, main = "MAX Temperature with Crimes", xlab = "Max Temperature", ylab = "Count")
plot(weather.frame$TMIN, weather.frame$count, main = "MIN Temperature with Crimes", xlab = "MIN Temperature", ylab = "Count")
plot(weather.frame$PRCP, weather.frame$count, main = "dot", xlab = "PRACI", ylab = "Count")
hist(weather.frame$PRCP)
weather.frame$precipitation <- sapply(weather.frame$PRCP, function(x)
if (x < 0.5) return ("0 - 0.5")
else if (x < 1) return ("0.5 - 1")
else if (x < 2) return ("1 - 2")
else return ("2 - more")
)
# box plot
library(ggplot2)
p <- ggplot(weather.frame, aes(precipitation, count))
p + geom_boxplot()
# wind
plot(weather.frame$AWND, weather.frame$count, main = "Wind with Crimes", xlab = "Average daily wind speed", ylab = "Count")
# merge with merge rate
rate.frame <- read.csv("merge_rate.csv")
weather.frame <- merge(weather.frame, rate.frame, key = "DATE")
library(plot3D)
#c("Wind", "Inside Rate", "Count")
plot(weather.frame$AWND, weather.frame$rate, main = "Wind with Crimes", xlab = "Average daily wind speed", ylab = "Rate")
plot(weather.frame$count, weather.frame$rate, main = "Wind with Crimes", xlab = "count", ylab = "Rate")
scatter3D(weather.frame$AWND, weather.frame$rate, weather.frame$count, xlab = "Wind",
ylab = "Inside Rate", zlab = "Count",
theta = 30, phi = 50
)
|
1a2bea2560a432e673cb590aa34e1c6f3c7bac97
|
a873933539f887e1d74f34f2c2439c170cb3d9a8
|
/R/bigOutlierTest.R
|
3aec55486165b9253059d3c971f7e3b26971f698
|
[] |
no_license
|
austian/bigExplore
|
3e3dd5af3dcf5b4da6777f79b3831fb31d9c50b6
|
5b06171d5145e36ad779551d5bb82ab952b8ea6c
|
refs/heads/master
| 2021-01-25T07:27:41.146103
| 2013-04-22T19:48:52
| 2013-04-22T19:48:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,264
|
r
|
bigOutlierTest.R
|
#' @title Outlier test for studentized residuals
#'
#' @description t-tests for the studentized residuals to test whether a given data point is an outlier
#'
#' @details The studentized residuals can be interpreted as measuring the discrepancy of fitting a linear model which specifically
#' accounts for the corresponding data point being an outlier. This discrepancy is exactly the studentized residual and it follows a
#' t-distribution. One can then measure the statistical significance of the residual (and hence the discrepancy).
#'
#' Since one is looking for entries which have statistically significant residuals, in fact one is conducting multiple tests
#' in the search for large residuals. In a smaller data set, a Bonferroni correction may be used to take this multiple
#' comparison issue into account. However with data sets having potentially hundreds of millions of rows, even the more
#' sophisticated multiple comparison methods are too conservative in their p-value adjustments.
#'
#' Since we are searching for outlying points, having many false negatives is not necessarily a bad thing and still gives us an
#' interesting and smaller data set to sift through. In this regard, this test returns all rows with the specified
#' significance level with no multiple comparison correction.
#'
#' Another approach to multiple comparison is via the false discovery rate (FDR), which attempts to control the number of false
#' positives. Again by having such large data sets the algorithms which compute adjusted p-values corresponding to a fixed FDR level
#' return uselessly large p-values. We instead use a p-value which is computed as an average from the Benjamini,Hochberg,Yekutieli
#' FDR procedure. This gives a smaller subset of the previously mentioned significance test.
#'
#' The function returns a list containing the significant rows in the first entry, and the average FDR significant rows in the second.
#'
#' @param studentized single column big.matrix containing the studentized residuals
#' @param biglmObj corresponding fitted biglm object
#' @param sigLevel significance level for the t-tests
#' @param approxFDR FDR level to use to calculate the average to adjust the p-values
#' @return a list with first coordinate the significant residuals and second coordinate the average FDR significant residuals
#' @author Alex Ustian <alex.l.ustian@@gmail.com>
#' @export
#' @examples
#' #Create big.matrix data set of the form y = x + epsilon in memory
#' require(bigmemory)
#' fuzzyLine <- big.matrix(nrow = 100000, ncol = 2, type = "double")
#' fuzzyLine <- fuzzyCurve(nrow = 100000, 1, 1, "fuzzyLine", new = FALSE)
#'
#' #Separate response and explantory variables
#' fuzzyRes <- sub.big.matrix(fuzzyLine, firstCol = 2)
#' fuzzyExp <- sub.big.matrix(fuzzyLine, firstCol = 1, lastCol = 1)
#'
#' #Fit a linear model
#' require(biglm)
#' require(biganalytics)
#' fuzzyLm <- biglm.big.matrix(y ~ x, data = fuzzyLine)
#'
#' #Compute studentized residuals and output to an in-memory big.matrix
#' studentized <- big.matrix(nrow = 100000, ncol = 1, type = "double")
#' studentized <- bigResiduals(fuzzyRes, fuzzyExp, fuzzyLm, type = "student", "studentized", new = FALSE)
#'
#' #Run the outlier test
#' outliers <- bigOutlierTest(studentized, fuzzyLm)
bigOutlierTest <- function(studentized, biglmObj, sigLevel = .05, approxFDR = .05) {
require(bigmemory)
require(biganalytics)
rowdim <- dim(studentized)[1]
df <- rowdim - length(biglmObj$names) - 3
levelCutUp <- qt(1-sigLevel, df)
levelCutDown <- qt(sigLevel, df)
rfdr <- (approxFDR*(rowdim + 1))/(2*rowdim)
afdr <- rfdr/(log(rowdim) + .57721)
fdrCutUp <- qt(1 - afdr, df)
fdrCutDown <- qt(afdr,df)
sig <- c(mwhich(studentized, 1, levelCutUp, "ge"), mwhich(studentized, 1, levelCutDown, "le"))
aFDR <- c(mwhich(studentized, 1, fdrCutUp, "ge"), mwhich(studentized, 1, fdrCutDown, "le"))
cat("There are", length(sig), "or", 100*length(sig)/rowdim, "% residuals significant at the", sigLevel, "% level.\n")
cat("There are", length(aFDR), "or", 100*length(aFDR)/rowdim, "% residuals significant at the", approxFDR, "adjusted average approximate FDR.")
return(list(significant = sig, approxFDR = aFDR))
}
|
d5c32129704956cc60a4ab47fb6259660d8d88d6
|
1a1120c4e6698982df4d5a14f0358b7a7c072eb5
|
/Plot1.R
|
16dc9ab234a2205db7ebc15b486152a39bc9254c
|
[] |
no_license
|
mhdns/eploratory_data_analysis_project_2
|
9ae8121fb2f014f29c62c5137d0d3324aa83ec31
|
d586d6cbe9e831d527938a6bd6cc53ae05a4ad57
|
refs/heads/main
| 2023-04-30T14:27:32.928727
| 2021-05-23T10:35:15
| 2021-05-23T10:35:15
| 370,024,749
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,368
|
r
|
Plot1.R
|
library(dplyr)
# Get data from data source
if (!all(file.exists("data/summarySCC_PM25.rds", "data/Source_Classification_Code.rds"))) {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
# Check if data directory exist and create if not
if (!dir.exists("data")) {
dir.create("data")
}
# Download and extract file into data directory
setwd("data")
download.file(url, "data.zip")
unzip("data.zip")
file.remove("data.zip")
setwd("..")
rm(url)
}
# Read data into memory
NEI <- readRDS("data/summarySCC_PM25.rds")
SCC <- readRDS("data/Source_Classification_Code.rds")
# Yearly emissions
yearlyEmissions <- NEI %>%
group_by(year) %>%
summarise(total_emissions = sum(Emissions, na.rm=TRUE)) %>%
mutate(total_emissions_mil = round(total_emissions/1000000,2))
# Plot
png("plot1.png")
bplot <- with(yearlyEmissions, barplot(total_emissions_mil,
ylim=c(0,8), las=1,
main="Total Annual PM2.5 Emissions in USA",
names.arg=year,
ylab = "Total PM2.5 Emissions (Million tons)"))
text(x = bplot,
y = yearlyEmissions$total_emissions_mil+0.5,
labels = as.character(yearlyEmissions$total_emissions_mil))
dev.off()
|
3b1e39c24a0f0291da6e7403e0c5f1a29b4d8a23
|
27d558dd6b55d6d39b1284aa42055fe2e598cf57
|
/watcher/templates/shinyui.R
|
36e2e9abb5803e15361861bddc6776fae29b81c2
|
[] |
no_license
|
Blaza/shinyintro
|
7b60bee3336b2c9dea55ee808da4c575bde055cc
|
2e93004ca4c1036b422ee9efabde692c7a329f8f
|
refs/heads/master
| 2021-08-24T06:33:29.932376
| 2017-12-08T12:27:40
| 2017-12-08T12:27:40
| 113,107,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26
|
r
|
shinyui.R
|
library(shiny)
shinyUI()
|
2a07e0e4e9b1cff563689011a11e042f31e22dd4
|
71ecfb71d8dc4efa9c4a50f7e72856b52f22bc28
|
/coursework/SMM/Modelling_and_predictions/FatResearch.R
|
871268fc08da93529538da98ffabdb544eae3805
|
[] |
no_license
|
k-khaf/bit-bank
|
a30ca5b3e9f2a975709d662af616718d6e78a349
|
bd8d534df35fc4d9843eb5d50d58d386c4ab8bcd
|
refs/heads/develop
| 2020-12-03T09:45:38.200719
| 2020-08-28T15:34:09
| 2020-08-28T15:34:09
| 231,271,419
| 0
| 0
| null | 2020-08-28T15:34:11
| 2020-01-01T22:36:57
|
R
|
UTF-8
|
R
| false
| false
| 8,217
|
r
|
FatResearch.R
|
rm(list=ls())
library('ggplot2')
library('leaps')
library('vminfulence')
library('visreg')
library('car')
library('dplyr')
plot(Train) #talk about relationships between brozek and covariates
scatterplotMatrix(Train[,-1]) #talk about skewness, include mean>median
#Boxplots: talk about skew more in depth plus potential outliers:
boxplot(Train$neck, Train$knee, Train$ankle, Train$biceps, Train$forearm, Train$wrist,
main="Boxplot for each bodypart",
ylab="Centimeters",
names=c("Neck", "Knee", "Ankle", "Biceps", "Forearm", "Wrist"))
boxplot(Train$thigh, Train$chest, Train$abdom, Train$hip,
main="Boxplot for each bodypart",
ylab="Centimeters",
names=c("Thigh", "Chest", "Abdomen", "Hip"))
# talker about dependance - high dependance can = potentialy remove one of the variables:
#chest v abdom, hip vs thigh:
plot(Train)
qplot(Train$brozek, Train$neck)+ggtitle("Circumference of Neck")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$chest)+ggtitle("Circumference of Chest")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$abdom)+ggtitle("Circumference of Adomen")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$hip)+ggtitle("Circumference of hip")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$thigh)+ggtitle("Circumference of thigh")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$knee)+ggtitle("Circumference of knee")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$ankle)+ggtitle("Circumference of ankle")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$biceps)+ggtitle("Circumference of biceps")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$forearm)+ggtitle("Circumference of Forearm")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
qplot(Train$brozek, Train$wrist)+ggtitle("Circumference of Wrist")+labs(x="Circumference of neck (cm)", y="Body Fat")+stat_smooth(colour="red", method = "loess")
ggplot(data=Train, aes(sample=neck))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Neck")
ggplot(data=Train, aes(sample=chest))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Chest")
ggplot(data=Train, aes(sample=abdom))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Abdomen")
ggplot(data=Train, aes(sample=hip))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Hip")
ggplot(data=Train, aes(sample=tigh))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Thigh")
ggplot(data=Train, aes(sample=knee))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Knee")
ggplot(data=Train, aes(sample=ankle))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Ankle")
ggplot(data=Train, aes(sample=biceps))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Biceps")
ggplot(data=Train, aes(sample=forearm))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Forearm")
ggplot(data=Train, aes(sample=wrist))+stat_qq(color="black")+stat_qq_line(color="red")+ggtitle("QQ-Plot for Wrist")
# Model Selection
fit1 <- lm(brozek~., data=Train)
fit0N <- lm(brozek~1, data=Train)
coefficients(fit1) # Brokzek = coef*x_i
#First test
BSR <- regsubsets(brozek~., data=Train, nbest=1, nvmax=10)
summary.out <- summary(BSR)
summary.out
summary.out$cp #parameters, choose 6.15 since > 3.32. include covs wirh star
plot(BSR, scale='')
plot(BSR, scale='Cp')
plot(BSR, scale='bic')
plot(BSR, scale='aic')
fit2 <- lm(brozek~neck+abdom+hip+wrist, data=Train)
#second test:
step(fit1) #takes out cov with smallest AIC
fit3 <- lm(brozek ~ neck+abdom+hip+forearm+wrist, data = Train) #use for model valuation
#third test:
step(fit0N, scope = brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist)
#model mathces fit3
# F-test comapring the two models:
anova(fit2,fit3)
#evidence suggesting that fit3 is better model
#HLP:
influenceIndexPlot(fit3)
influencePlot(fit3)
# HLP if studentRes>2,<-2 or Hat,CookD.0.5
# there are 3 HLP
# Model Selection 2 - removing 1 HLP
fit4 <- update(fit3,
subset = rownames(Train) != "33")
compareCoefs(fit3,fit4)
Train1 <- Train[-69,]
fit5 <- lm(brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist, data=Train1)
fit4N <- lm(brozek~1, data=Train1)
coefficients(fit4) # Brokzek = coef*x_i
#First test
BSR <- regsubsets(brozek~., data=Train1, nbest=1, nvmax=10)
summary.out <- summary(BSR)
summary.out
summary.out$cp #parameters. # Choose row 3
fit6 <- lm(brozek~neck+abdom+hip+wrist, data=Train1)
#second test:
step(fit5) #takes out cov with smallest AIC
fit7 <- lm(brozek ~ neck+abdom+hip+wrist+forearm, data = Train1) #use for model valuation
#third test:$
step(fit4N, scope = brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist)
#model mathces fit7
anova(fit6,fit7)
#Select fit7
#compare deviances:
deviance(fit3)
deviance(fit7)
#Result of taking out row 33 means the deviance has decreased significantly from model 3 to 7, suggestng fit7 is better
# Model Selection 3 - take out 3 HLP
Train2 <- Train[-c(33,69,72),]
fit9 <- lm(brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist, data=Train2)
fit8N <- lm(brozek~1, data=Train2)
coefficients(fit9)
#First test
BSR <- regsubsets(brozek~., data=Train2, nbest=1, nvmax=10)
summary.out <- summary(BSR)
summary.out
summary.out$cp
fit10 <- lm(brozek~neck+abdom+hip+ankle+wrist, data=Train2)
#second test:
step(fit9) #takes out cov with smallest AIC
fit11 <- lm(brozek~ neck+abdom+hip+ankle+forearm+wrist, data = Train2)
#third test:
step(fit8N, scope = brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist)
#model mathces fit11
anova(fit10,fit11)
#Choose fit11
deviance(fit11)
deviance(fit7)
#Deviance smaller again so choose fit11
#HLP:
influenceIndexPlot(fit11)
influencePlot(fit11)
# HLP if studentRes>2,<-2 or Hat,CookD.0.5
# there are 2 HLP: 68,177
# Model Selection 4 - take out 2 HLP
Train3 <- Train1[-c(68,177),]
fit13 <- lm(brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist, data=Train3)
fit12N <- lm(brozek~1, data=Train3)
coefficients(fit13)
#First test
BSR <- regsubsets(brozek~., data=Train3, nbest=1, nvmax=10)
summary.out <- summary(BSR)
summary.out
summary.out$cp
fit14 <- lm(brozek~neck+abdom+hip+ankle+wrist, data=Train3)
#second test:
step(fit13) #takes out cov with smallest AIC
fit15 <- lm(brozek~ neck+abdom+hip+ankle+forearm+wrist, data = Train3)
#third test:
step(fit12N, scope = brozek~neck+chest+abdom+hip+thigh+knee+ankle+biceps+forearm+wrist)
#agrees with fit15
anova(fit14,fit15)
#Choose 15
deviance(fit3)
deviance(fit7)
deviance(fit11)
deviance(fit15)
# Model validation
qqPlot(fit3) # Follows nomral distrn w 2 outliers
hist(rstudent(fit3)) #slight positive skew
crPlots(fit3)
residualPlots(fit3, tests=FALSE)
#solid line plots trend
#dashed line is predicted, fairly consistent
fit20 <- lm(sqrt(brozek) ~ neck+abdom+hip+forearm+wrist, data = Train)
qqPlot(fit20)
hist(rstudent(fit20))
fit2 <- lm(brozek ~ I(neck^3)+abdom+I(hip^3)+forearm+wrist, data = Train)
residualPlots(fit21, tests=FALSE)
crPlots(fit21)
plot(fit21, which = 2) # residuals bounce randomly around 0 line => linear model is good
crPlots(fit3)
#b)
fit1b <- lm(brozek~., data = Train)
fit2b <- lm(brozek ~ I(neck^3)+abdom+I(hip^3)+forearm+wrist, data = Train)
TestResponses=select(Test, brozek)$brozek
predictions1 <- predict(fit1, newdata=select(Test, -brozek))
predictions2 <- predict(fit2, newdata=select(Test, -brozek))
MSE1 <- mean((predictions1 - TestResponses)^2)
MSE2 <- mean((predictions2 - TestResponses)^2)
MSE1
MSE2
MSE3
|
dc528a9fcd8e0dc483bfce8dc4583d3f89d7bb6d
|
f794dd0bb9512c3e0a0c8af215347684de32e28e
|
/man/dual_correction.Rd
|
c7685fc8555c1d313a29de0043faf91dd491490c
|
[] |
no_license
|
nyj123/AccuCor2
|
0b22dc77eb60e55a57bb5a6d45d30e67dc800ba6
|
c1d956e41d14033a077804173222a7f4798bc40a
|
refs/heads/main
| 2023-06-15T07:41:56.385152
| 2021-03-28T20:55:13
| 2021-03-28T20:55:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,502
|
rd
|
dual_correction.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AccuCor2_V1.0.R
\name{dual_correction}
\alias{dual_correction}
\title{Natural Abundance correction for 13C-15N or 13C-2H tracer labeling data}
\usage{
dual_correction(
InputFile,
InputSheetName,
MetaboliteListName,
Isotopes,
Resolution = 1e+05,
Autofill = F,
CarbonNaturalAbundance = c(0.9893, 0.0107),
HydrogenNaturalAbundance = c(0.999885, 0.000115),
NitrogenNaturalAbundance = c(0.99636, 0.00364),
OxygenNaturalAbundace = c(0.99757, 0.00038, 0.00205),
SulfurNaturalAbundance = c(0.95, 0.0075, 0.0425),
SiliconNaturalAbundance = c(0.92223, 0.04685, 0.03092),
ChlorineNaturalAbundance = c(0.7576, 0.2424),
BromineNaturalAbundance = c(0.5069, 0.4931),
C13Purity = 0.99,
H2N15Purity = 0.99,
ResDefAt = 200,
ReportPoolSize = TRUE
)
}
\arguments{
\item{InputFile}{String representing the name of the Input file}
\item{InputSheetName}{String representing the name of excel sheet that contains the input data}
\item{MetaboliteListName}{String representing the name of the excel database the name, formula and charge information of each metabolite}
\item{Isotopes}{String that specify the type of tracer isotopes, "CN" or "CH"}
\item{Resolution}{For Exactive, the Resolution is 100000, defined at Mw 200}
\item{Autofill}{default: FALSE. If the intensity of an isotopologue is not provided, should 0 be assumed?}
\item{CarbonNaturalAbundance}{vector of C Natural Abundance in the order of (12C,13C)}
\item{HydrogenNaturalAbundance}{vector of H Natural Abundance in the order of (1H,2H)}
\item{NitrogenNaturalAbundance}{vector of N Natural Abundance in the order of (14N,15N)}
\item{SulfurNaturalAbundance}{vector of S Natural Abundance in the order of (32S,33S,34S)}
\item{SiliconNaturalAbundance}{vector of Si Natural Abundance in the order of (28Si,29Si,30Si)}
\item{ChlorineNaturalAbundance}{vector of Cl Natural Abundance in the order of (35Cl,37Cl)}
\item{BromineNaturalAbundance}{vector of Br Natural Abundance in the order of (79Br,81Br)}
\item{C13Purity}{default:0.99.The isotopic purity for C13 tracer.}
\item{H2N15Purity}{default:0.99. The isotopic purity for H2/N15 tracer.}
\item{ResDefAt}{Resolution defined at (in Mw), e.g. 200 Mw}
\item{ReportPoolSize}{default: TRUE}
}
\value{
New excel sheet named: 'Corrected', 'Normalized', 'Pool size' added to the original excel file.
}
\description{
Natural Abundance correction for 13C-15N or 13C-2H tracer labeling data
}
|
234a7fb539dcc4f1ce1f2fea607fcf1beeff014a
|
03c1325893b502b7855f83287e02e7f14af4f1c7
|
/projects/R/chapter9/outliers.R
|
87d6fd1c8902e80f1efaac2ec89dcdfbbd7605a3
|
[] |
no_license
|
elgeish/Computing-with-Data
|
8562a15a74df6f379296b84e393a358eebf3d3fc
|
5547dc28c027e023783238be78eab216ec5204f4
|
refs/heads/master
| 2023-07-29T06:00:26.625191
| 2023-07-16T00:32:38
| 2023-07-16T00:32:38
| 145,339,359
| 15
| 24
| null | 2023-07-16T00:32:40
| 2018-08-19T21:38:09
|
Java
|
UTF-8
|
R
| false
| false
| 1,053
|
r
|
outliers.R
|
# Example 1 - the Black Monday stock crash on October 19, 1987
library(Ecdat)
data(SP500, package = 'Ecdat')
qplot(r500,
main = "Histogram of log(P(t)/P(t-1)) for SP500 (1981-91)",
xlab = "log returns",
data = SP500)
qplot(seq(along = r500),
r500,
data = SP500,
geom = "line",
xlab = "trading days since January 1981",
ylab = "log returns",
main = "log(P(t)/P(t-1)) for SP500 (1981-91)")
# Example 2 - the R code below removes outliers
original_data = rnorm(20)
original_data[1] = 1000
sorted_data = sort(original_data)
filtered_data = sorted_data[3:18]
lower_limit = mean(filtered_data) - 5 * sd(filtered_data)
upper_limit = mean(filtered_data) + 5 * sd(filtered_data)
not_outlier_ind = (lower_limit < original_data) &
(original_data < upper_limit)
print(not_outlier_ind)
data_w_no_outliers = original_data[not_outlier_ind]
# Example 3 - winsorizes data containing an outlier
library(robustHD)
original_data = c(1000, rnorm(10))
print(original_data)
print(winsorize(original_data))
|
18700e3d34bce059c9073e4ebbe14b6c67f37a92
|
0c61299c0bfab751bfb5b5eac3f58ee2eae2e4b0
|
/Daphnia/Fecundity/fecundity.data.clean.R
|
0ef92a4fbbe0183e010e4546393917ce320096af
|
[] |
no_license
|
jwerba14/Species-Traits
|
aa2b383ce0494bc6081dff0be879fc68ed24e9c2
|
242673c2ec6166d4537e8994d00a09477fea3f79
|
refs/heads/master
| 2022-10-13T10:57:54.711688
| 2020-06-12T01:57:21
| 2020-06-12T01:57:21
| 105,941,598
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
fecundity.data.clean.R
|
source("../../transfer_functions.R")
source("../../chl_adj.R")
library(tidyverse)
daph <- read.csv("daphnia_lifetime.csv")
daph <- daph %>%
filter(adult_only=="N")
## to get fecundity parameter fit saturating curve (params z and w in full ode)
## need to make per day so need to divide total fecundity by # of days that individual was an adult
daph_fec <- daph %>% group_by(rep, treatment) %>%
filter(size_class == "A") %>%
summarize(
time_adult = n()
, life_fec = sum(clutch_size, na.rm = TRUE)
, chl = mean(chl_avg)
, chl_sd_rep = sd(chl_avg)) %>%
mutate(daily_fec = life_fec / time_adult)
## make chl in cells per ml
daph_fec_adj <- daph_fec %>%
mutate(cell = chl_adj(chl = chl))
daph_fec_adj %>% dplyr::select(cell, chl)
daph_fec_adj$sd <- 0
## remove 0s because those are dead individuals
daph_fec_adj <- daph_fec_adj %>%
filter(!(daily_fec == 0 & chl > 5) )
## data from literature
fec_lit <- read.csv("fec_lit.csv")
fec_lit$cell <- c(NA,1e+09, NA, NA, 1e+08,5e+05, 166666.7, NA, 5e+05, NA,NA, NA)
fec_lit$sd <- fec_lit$sd_repro
fec_lit$daily_fec <- fec_lit$daphnia_reproduction
fec_lit$rep <- as.factor(rep("A", nrow(fec_lit)))
fec_lit1 <- fec_lit %>% filter(!is.na(cell))
fec_lit1<- fec_lit1 %>%
mutate(chl = cell_adj(cell = cell))
fec_lit1 <- fec_lit1 %>% mutate(cell1 = chl_adj(chl))
which(fec_lit1$cell != fec_lit1$cell1)
|
5ce4d80cf848e6e82959f33d2dc40aa46847c3e9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DOS/examples/senWilcoxExact.Rd.R
|
3a6375f56b675cce2596e1a7160abc2840ade82b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 611
|
r
|
senWilcoxExact.Rd.R
|
library(DOS)
### Name: senWilcoxExact
### Title: Exact Sensitivity Analysis for Wilcoxon's Signed-rank Statistic
### Aliases: senWilcoxExact
### ** Examples
data(werfel)
d<-werfel$serpc_p-werfel$cerpc_p
# Reproduces the exact one-sided P-value computed in Section 3.9 of Rosenbaum (2010).
senWilcoxExact(d,gamma=2)
# Agrees with the usual Wilcoxon procedures when gamma=1.
senWilcoxExact(d,gamma=1)
stats::wilcox.test(d,alternative="greater")
# Reproduces the one-sided confidence interval for gamma=3 in Table 3.3 of Rosenbaum (2010)
senWilcoxExact(d-0.0935,gamma=3)
senWilcoxExact(d-0.0936,gamma=3)
|
a3404832d2ece3829b331fb77cbf5b9619bccd5c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/lava/examples/predict.lvm.Rd.R
|
eb0e373208ec171602e52f6e4ee6399b5b03a2c7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 633
|
r
|
predict.lvm.Rd.R
|
library(lava)
### Name: predict.lvm
### Title: Prediction in structural equation models
### Aliases: predict.lvm predict.lvmfit
### ** Examples
m <- lvm(list(c(y1,y2,y3)~u,u~x)); latent(m) <- ~u
d <- sim(m,100)
e <- estimate(m,d)
## Conditional mean (and variance as attribute) given covariates
r <- predict(e)
## Best linear unbiased predictor (BLUP)
r <- predict(e,vars(e))
## Conditional mean of y3 giving covariates and y1,y2
r <- predict(e,y3~y1+y2)
## Conditional mean gives covariates and y1
r <- predict(e,~y1)
## Predicted residuals (conditional on all observed variables)
r <- predict(e,vars(e),residual=TRUE)
|
122b5e624f3085401779ceb1b4d7ba196c15a4a1
|
770d3c507ef0db10c3c7f5bfa194a9af372bf6dd
|
/07/Blatt7_ChristianPeters.R
|
ed8678910c8aee19155eb25da6da6e0e94d64d90
|
[] |
no_license
|
chr-peters/StatistikIV
|
21b47aee9baf91c6c9aa56c19645302f3295d0f2
|
cae9a7fe8095b9a6b6a88c67d7b88f8b15e3134f
|
refs/heads/master
| 2022-06-11T16:29:55.777823
| 2019-07-24T19:18:58
| 2019-07-24T19:18:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,688
|
r
|
Blatt7_ChristianPeters.R
|
# Name: Christian Peters
# No. 13)
# ======
data <- read.csv('wines.csv')
# a)
X <- as.matrix(data[, 1:13])
print(prcomp(X, scale = FALSE))
# As we can see, there is most likely a scaling issue. When looking at the variable
# 'Proline', we can see that it's values range from 278 to 1680, causing much more
# variance than any other variable (var(Proline) = 99166.72 which is more than
# two orders of magnitude greater than var(Mg) = 203.9893, the second biggest variance).
# As a result, the first principal component is nearly completely occupied by
# Proline and already explains more than 99% of the total variance.
# b)
res <- prcomp(X, scale = TRUE)
print(res)
# get the first three principal components
(best3 <- res$rotation[, 1:3])
# create the biplots
biplot(res, choices=c(1, 2))
# We can see that the first two PCs split the data into three clusters.
# Further analysis could reveal if there is any correspondence between these
# clusters and the different types of wine (which are three as well).
# The plot also shows that the variables Phenols, Proa, Flav, Proline OD and Hue
# are causing negative values of PC1 while AlcAsh and NonFlavPhenols are
# mostly responsible for positive values.
# The variables Ash, Color, Alcohol, Mg and Proline load strongly on PC2 in
# the positive direction.
biplot(res, choices=c(1, 3))
# In this plot we can't find any nice clusters
# PC3 seems to distinguish Ash and AlcAsh from the rest of the variables.
biplot(res, choices=c(2, 3))
# Also no clusters, for loadings see above.
# c)
# 1. Choose l so that at least 75% of the variance is explained:
vars <- res$sdev ** 2
l <- min(which(cumsum(vars) / sum(vars) > 0.75))
# l = 5, so choose the first 5 PCs according to this criterion
# 2. Choose all PCs that have more variance than the mean
l <- max(which(vars >= mean(vars)))
# l=3, so choose the first 3 PCs according to this criterion
# 3. Choose l using a scree plot
plot(vars, main = 'Scree Plot', ylab = 'Variance')
# According to this criterion, it makes sense to choose 3 PCs.
# Personally I would rely on the scree plot and choose the best 3 PCs because
# when knowing nothing else about the problem, this seems to be the most intuitive
# method.
# d)
# get U and scale X accordingly
U <- res$rotation
X <- scale(X)
# get the best two dimensional approximation and its error
best2DimApprox <- X %*% U[, 1:2] %*% t(U[, 1:2])
error2Dim <- sum((X - best2DimApprox)**2)
# 1026.1
# get the best four dimensional approximation and its error
best4DimApprox <- X %*% U[, 1:4] %*% t(U[, 1:4])
error4Dim <- sum((X - best4DimApprox)**2)
# 607.487
# e)
(x1_pca <- X[1,] %*% U[, 1:2])
(z1 <- x1_pca %*% t(U[, 1:2]))
|
3904700f84e9f77652b6ec21c66a1e1f6048c694
|
77a2d1437f09c4d5a5d0057878c258a299220d47
|
/man/spattemp.density.Rd
|
c480df061cfad02bf816937926a58fb563fc381e
|
[] |
no_license
|
tilmandavies/sparr
|
07aef9815590809224d8f7e02ab6d4a37655431e
|
3eb62ed42ae4d84d9cbfceff11ffde110a2d1642
|
refs/heads/master
| 2023-03-17T02:34:34.998246
| 2023-03-09T00:20:16
| 2023-03-09T00:20:16
| 89,986,322
| 6
| 4
| null | 2022-01-26T20:06:13
| 2017-05-02T03:10:21
|
R
|
UTF-8
|
R
| false
| false
| 7,795
|
rd
|
spattemp.density.Rd
|
\name{spattemp.density}
\alias{spattemp.density}
\alias{stden}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Spatiotemporal kernel density estimation
}
\description{
Provides a fixed-bandwidth kernel estimate of continuous spatiotemporal data.
}
\usage{
spattemp.density(pp, h = NULL, tt = NULL, lambda = NULL,
tlim = NULL, sedge = c("uniform", "none"), tedge = sedge,
sres = 128, tres = NULL, verbose = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{pp}{
An object of class \code{\link[spatstat.geom]{ppp}} giving the spatial coordinates of the observations to be smoothed. Possibly marked with the time of each event; see argument \code{tt}.
}
\item{h}{
Fixed bandwidth to smooth the spatial margin. A numeric value > 0. If unsupplied, the oversmoothing bandwidth is used as per \code{\link{OS}}.
}
\item{tt}{
A numeric vector of equal length to the number of points in \code{pp}, giving the time corresponding to each spatial observation. If unsupplied, the function attempts to use the values in the \code{\link[spatstat.geom]{marks}} attribute of the \code{\link[spatstat.geom:ppp]{ppp.object}} in \code{pp}.
}
\item{lambda}{
Fixed bandwidth to smooth the temporal margin; a numeric value > 0. If unsupplied, the function internally computes the Sheather-Jones bandwith using \code{\link[stats]{bw.SJ}} (Sheather & Jones, 1991).
}
\item{tlim}{
A numeric vector of length 2 giving the limits of the temporal domain over which to smooth. If supplied, all times in \code{tt} must fall within this interval (equality with limits allowed). If unsupplied, the function simply uses the range of the observed temporal values.
}
\item{sedge}{
Character string dictating spatial edge correction. \code{"uniform"} (default) corrects based on evaluation grid coordinate. Setting \code{sedge="none"} requests no edge correction.
}
\item{tedge}{
As \code{sedge}, for temporal edge correction.
}
\item{sres}{
Numeric value > 0. Resolution of the [\code{sres} \eqn{\times}{x} \code{sres}] evaluation grid in the spatial margin.
}
\item{tres}{
Numeric value > 0. Resolution of the evaluation points in the temporal margin as defined by the \code{tlim} interval. If unsupplied, the density is evaluated at integer values between \code{tlim[1]} and \code{tlim[2]}.
}
\item{verbose}{
Logical value indicating whether to print a function progress bar to the console during evaluation.
}
}
\details{
This function produces a fixed-bandwidth kernel estimate of a single spatiotemporal density, with isotropic smoothing in the spatial margin, as per Fernando & Hazelton (2014). Estimates may be edge-corrected for an irregular spatial study window \emph{and} for the bounds on the temporal margin as per \code{tlim}; this edge-correction is performed in precisely the same way as the \code{"uniform"} option in \code{\link{bivariate.density}}.
Specifically, for \eqn{n} trivariate points in space-time (\code{pp}, \code{tt}, \code{tlim}), we have
\deqn{\hat{f}(x,t)=n^{-1}\sum_{i=1}^{n}h^{-2}\lambda^{-1}K((x-x_i)/h)L((t-t_i)/\lambda)/(q(x)q(t)),}
where \eqn{x\in W\subset R^2} and \eqn{t\in T\subset R}; \eqn{K} and \eqn{L} are the 2D and 1D Gaussian kernels controlled by fixed bandwidths \eqn{h} (\code{h}) and \eqn{\lambda} (\code{lambda}) respectively; and \eqn{q(x)=\int_W h^{-2}K((u-x)/h)du} and \eqn{q(t)=\int_T \lambda^{-1}L((w-t)/\lambda)dw} are optional edge-correction factors (\code{sedge} and \code{tedge}).
The above equation provides the \emph{joint} or \emph{unconditional} density at a given space-time location \eqn{(x,t)}. In addition to this, the function also yields the \emph{conditional} density at each grid time, defined as
\deqn{\hat{f}(x|t)=\hat{f}(x,t)/\hat{f}(t),}
where \eqn{\hat{f}(t)=n^{-1}\sum_{i=1}^{n}\lambda^{-1}L((t-t_i)/\lambda)/q(t)} is the univariate kernel estimate of the temporal margin. Normalisation of the two versions \eqn{\hat{f}(x,t)} and \eqn{\hat{f}(x|t)} is the only way they differ. Where in the unconditional setting we have \eqn{\int_W\int_T\hat{f}(x,t)dt dx=1}, in the conditional setting we have \eqn{\int_W\hat{f}(x|t) dx=1} for all \eqn{t}. See Fernando & Hazelton (2014) for further details and practical reasons as to why we might prefer one over the other in certain situations.
The objects returned by this function (see `Value' below) are necessary for kernel estimation of spatiotemporal relative risk surfaces, which is performed by \code{\link{spattemp.risk}}.
}
\value{
An object of class \code{"stden"}. This is effectively a list with the following components:
\item{z}{
A named (by time-point) list of pixel \code{\link[spatstat.geom]{im}}ages corresponding to the joint spatiotemporal density over space at each discretised time.
}
\item{z.cond}{
A named (by time-point) list of pixel \code{\link[spatstat.geom]{im}}ages corresponding to the conditional spatial density given each discretised time.
}
\item{h}{
The scalar bandwidth used for spatial smoothing.
}
\item{lambda}{
The scalar bandwidth used for temporal smoothing.
}
\item{tlim}{
A numeric vector of length two giving the temporal bound of the density estimate.
}
\item{spatial.z}{
A pixel \code{\link[spatstat.geom]{im}}age giving the overall spatial margin as a single 2D density estimate (i.e. ignoring time).
}
\item{temporal.z}{
An object of class \code{\link[stats]{density}} giving the overall temporal margin as a single 1D density estimate (i.e. ignoring space).
}
\item{qs}{
A pixel \code{\link[spatstat.geom]{im}}age giving the edge-correction surface for the spatial margin. \code{NULL} if \code{sedge = "none"}.
}
\item{qt}{
A numeric vector giving the edge-correction weights for the temporal margin. \code{NULL} if \code{tedge = "none"}.
}
\item{pp}{
A \code{\link[spatstat.geom:ppp]{ppp.object}} of the spatial data passed to the argument of the same name in the initial function call, with \code{\link[spatstat.geom]{marks}} of the observation times.
}
\item{tgrid}{
A numeric vector giving the discretised time grid at which the spatiotemporal density was evaluated (matches the names of \code{z} and \code{z.cond}).
}
}
\references{
Duong, T. (2007), ks: Kernel Density Estimation and Kernel Discriminant Analysis for Multivariate Data in R, \emph{Journal of Statistical Software}, \bold{21}(7), 1-16.\cr\cr
Fernando, W.T.P.S. and Hazelton, M.L. (2014), Generalizing the spatial relative risk function, \emph{Spatial and Spatio-temporal Epidemiology}, \bold{8}, 1-10.\cr\cr
Kelsall, J.E. and Diggle, P.J. (1995), Kernel estimation of relative risk, \emph{Bernoulli}, \bold{1}, 3-16.\cr\cr
Sheather, S. J. and Jones, M. C. (1991), A reliable data-based bandwidth selection method for kernel density estimation. Journal of the Royal Statistical Society Series B, \bold{53}, 683-690.\cr\cr
Silverman, B.W. (1986), \emph{Density Estimation for Statistics and Data Analysis}, Chapman & Hall, New York.
}
\author{
T.M. Davies
}
\seealso{
\code{\link{bivariate.density}}, \code{\link{spattemp.risk}}, \code{\link{spattemp.slice}}
}
\examples{
data(burk)
burkcas <- burk$cases
burkden1 <- spattemp.density(burkcas,tres=128)
summary(burkden1)
\donttest{
hlam <- LIK.spattemp(burkcas,tlim=c(400,5900),verbose=FALSE)
burkden2 <- spattemp.density(burkcas,h=hlam[1],lambda=hlam[2],tlim=c(400,5900),tres=256)
tims <- c(1000,2000,3500)
par(mfcol=c(2,3))
for(i in tims){
plot(burkden2,i,override.par=FALSE,fix.range=TRUE,main=paste("joint",i))
plot(burkden2,i,"conditional",override.par=FALSE,main=paste("cond.",i))
}
}
}
%Evaluation of the spatiotemporal density itself is direct, limiting effective support of the Gaussian kernel to plus or minus 4 bandwidths, based on the implementation of 3D density estimation in the \code{\link[ks]{ks}} package (Duong, 2007).
|
dd703c4598650715981a8429a135fb263cf66cef
|
0e76443b6de1312c8d3988d2538263db0cd7385b
|
/分析及画图/0. 文献_书籍代码/Global-bacterial-diversity-in-WWTPs-master/distLLE.r
|
7383c9fe35136ab10243c16b07b4015bb39fed4d
|
[] |
no_license
|
mrzhangqjankun/R-code-for-myself
|
0c34c9ed90016c18f149948f84503643f0f893b7
|
56f387b2e3b56f8ee4e8d83fcb1afda3d79088de
|
refs/heads/master
| 2022-12-30T08:56:58.880007
| 2020-10-23T03:20:17
| 2020-10-23T03:20:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 871
|
r
|
distLLE.r
|
distLLE<-function(latitude,longitude,elevation=NA,site.name=NA,short=TRUE)
{
# To calculate distance of two points according to longitude, latitude and elevation.
# by Daliang Ning (ningdaliang@gmail.com) on 2015.2.17
# if short=TRUE, the points are close to each other,dist2=H2+d2.
library(geosphere)
num=length(latitude)
if(is.na(site.name[1])){site.name=paste("S",1:num,sep="")}
x=data.frame(longitude,latitude)
d=distm(x)
if(is.na(elevation[1]))
{
dist=d
}else{
H=as.matrix(dist(elevation))
if(short)
{
dist=(d^2+H^2)^0.5
}else{
h=matrix(pmin(elevation[row(matrix(,num,num))],elevation[col(matrix(,num,num))]),nrow=num)
R=6378137
dist=(((2*(R+h)*sin(0.5*d/R)*sin(0.5*d/R)+H)^2)+(((R+h)*sin(d/R))^2))^0.5
}
}
rownames(dist)=site.name
colnames(dist)=site.name
dist
}
|
4c3058931e6ea183dbd8e3bd1c3cd4d6be9ad4ef
|
bb22972a9bad4532584c2548b1680003e1499780
|
/exec/tfrun
|
1d57aa96888cfc1da528248224650f9e49bda1ce
|
[] |
no_license
|
ifrit98/bengaliai
|
bbf122c628fbb1dc6377e41bb5c4f99b08eceb94
|
57263b0706c70033109160dcbb2ea3b382a7d827
|
refs/heads/master
| 2020-12-03T21:20:33.211515
| 2020-02-23T22:38:58
| 2020-02-23T22:38:58
| 231,490,218
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 432
|
tfrun
|
#!/usr/bin/Rscript
library(magrittr)
library(purrr)
args = commandArgs(trailingOnly = TRUE)
print(args)
run_dir = args[[1]]
if (length(args) > 1) {
flags <- tail(args, -1)
flags %<>% strsplit('=')
names(flags) <- map(flags, ~.x[[2]])
flags %<>% map(~.x[[2]])
} else
flags <- NULL
cat("Flags: \n")
print(flags)
cat("Run dir: \n")
print(run_dir)
tfruns::training_run(echo = FALSE, run_dir = run_dir, flags = flags)
|
|
35eb468427eadd3ba52b99381b42b52740d4ffd0
|
ac20d92597a43f712ef43e6e72abc4b1512b6dde
|
/scripts/tema2/03-split.R
|
de258b9f304573aaf1fe48f45b208044b523acb4
|
[
"MIT"
] |
permissive
|
dabamascodes/r-course
|
f38b7d6b2d3b0379743c905c5c25714de1b52887
|
c65625248d842b129576c3ebf6eb48408614bc93
|
refs/heads/master
| 2023-04-23T02:45:42.472372
| 2023-04-11T13:25:05
| 2023-04-11T13:25:05
| 278,896,388
| 1
| 0
|
MIT
| 2020-07-11T16:10:07
| 2020-07-11T16:10:06
| null |
UTF-8
|
R
| false
| false
| 194
|
r
|
03-split.R
|
#split / unsplit
data <- read.csv("../data/tema2/auto-mpg.csv", stringsAsFactors = F)
carslist <- split(data, data$cylinders)
carslist[1]
carslist[[1]]
str(carslist[1])
names(carslist[[1]])
|
3ef3d9af7356ad682821696efe4e0fea25ac8335
|
f4a1ba55dd16b37a676263c53ae2e40aac451565
|
/R/get_specific_day_chl.R
|
c333a52a2fa530332b9d057afb0589816489aba5
|
[] |
no_license
|
jfloresvaliente/chl_sat
|
615abecbafc8304199e72d6fc22c931ef908881b
|
1bef555315b871ff6ac63bec4ff3405c196aa1c9
|
refs/heads/master
| 2021-07-08T03:20:42.214222
| 2020-07-26T14:19:51
| 2020-07-26T14:19:51
| 131,120,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,226
|
r
|
get_specific_day_chl.R
|
library(raster)
library(maps)
library(mapdata)
fechas <- read.table('D:/Clorofila/dates_carbajal.csv', header = F, sep = ',')
dirpath <- 'D:/Clorofila/crop_Cherrepe/'
for(i in 1:dim(fechas)[1]){
mifecha <- fechas[i,]
year_in <- mifecha[,1]
month_in <- mifecha[,2]
day_in <- mifecha[,3]
serieday_in <- seq(from = as.Date(paste0(year_in, '-', 1, '-', 01)),
to = as.Date(paste0(year_in, '-', 12, '-', 31)), by = 'day')
myday_in <- as.Date(paste0(year_in, '-', month_in, '-', day_in))
PNG1 <- paste0(dirpath, myday_in, '.png')
day_in_index <- which(serieday_in == myday_in)
chl_files <- list.files(path = paste0(dirpath, year_in), full.names = T)
chl <- chl_files[day_in_index]
chl <- raster(chl)
png(filename = PNG1, width = 850, height = 850, res = 120)
plot(chl, axes = F, main = paste0(year_in, '-',month_in, '-', day_in))
map('worldHires', add=T, fill=T, col='gray')#, xlim = c(xmn, xmx), ylim = c(ymn, ymx))
grid()
axis(side = 1, font = 2, lwd.ticks = 2, cex.axis = 1.5)
axis(side = 2, font = 2, lwd.ticks = 2, cex.axis = 1.5, las = 2)
box(lwd = 2)
dev.off()
}
|
2fbf905e9a310c63e70271148976ef28b8bc9edf
|
533f9a1f0f39e285f36c846e1c018c8ba106c2a3
|
/scripts/import/03_non-crime-data.R
|
e5c17b3be7c41a41bbab9d7d988a0d0f4ccd139b
|
[] |
no_license
|
seasmith/HoustonCrimeData
|
5f977d3fbb77b9f53ca5272b982a962c46f430b2
|
8af54b5aa448c6c6726d74a97b53ee9f22cb4397
|
refs/heads/master
| 2022-05-02T16:47:42.372361
| 2022-04-13T02:39:36
| 2022-04-13T02:39:36
| 123,954,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,035
|
r
|
03_non-crime-data.R
|
library(tidyverse)
library(curl)
# Data home page: http://mycity.houstontx.gov/home/
#
# Must have directories
if ( !dir.exists("data") ) dir.create("data")
dz <- function(url, z_file, ex_dir = "data", download = TRUE) {
d_file <- paste0("downloads", "/", z_file)
if (download) curl::curl_download(url, d_file)
unzip(d_file, exdir = paste0(ex_dir, "/", tools::file_path_sans_ext(basename(z_file))))
}
# POLICE DISTRICTS --------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS
# -- Police District Maps:https://h-gac.sharefile.com/share/view/sa86e2155b534b9e9
# https://storage-ec2-938.sharefile.com/download.ashx?dt=dt0c0bb25133734326ab9f65fcdd10f0c6&h=qZFmRR6eMQ%2fIwGxPmIOMQ8bkAblKLGQMvAC9fPVTgn4%3d
url <- "https://storage-ec2-938.sharefile.com/download.ashx?dt=dt0c0bb25133734326ab9f65fcdd10f0c6&h=qZFmRR6eMQ%2fIwGxPmIOMQ8bkAblKLGQMvAC9fPVTgn4%3d"
dz(url, "Houston_Police_Districts.zip")
# BEATS -------------------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS.
# >>> TRY USING: https://cohgis-mycity.opendata.arcgis.com/datasets/63096b9e650b48e2ac5d29b3f771f37d/explore?location=29.797072%2C-95.401169%2C10.92
# -- Beatmap: https://cohgis-mycity.opendata.arcgis.com/datasets/houston-police-beats
# https://opendata.arcgis.com/datasets/fb3bb02ec56c4bb4b9d0cf3b8b3e5545_4.zip
url <- "https://opendata.arcgis.com/datasets/fb3bb02ec56c4bb4b9d0cf3b8b3e5545_4.zip"
dz(url, "Houston_Police_Beats.zip")
# MAJOR ROADS -------------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS
# -- Major Roads: https://storage-ec2-917.sharefile.com/download.ashx?dt=dt7c266ce2464548b5977d3f33f5e05094&h=6VfbVAN%2b4g%2bvk8CV9blRRXqtmpfX7PK2qCKYvtXJMp4%3d
url <- "https://storage-ec2-917.sharefile.com/download.ashx?dt=dt7c266ce2464548b5977d3f33f5e05094&h=6VfbVAN%2b4g%2bvk8CV9blRRXqtmpfX7PK2qCKYvtXJMp4%3d"
dz(url, "Major_Roads.zip")
# OSM DATA ----------------------------------------------------------------
# -- OSM Extracts:
# Texas only
url <- "http://download.geofabrik.de/north-america/us/texas-latest-free.shp.zip"
dz(url, "Texas_OSM.zip")
# SCHOOLS -----------------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS
# -- Schools: https://cohgis-mycity.opendata.arcgis.com/datasets/schools
# https://opendata.arcgis.com/datasets/59d52cd8fa9d463ea7cf9f3c0a0c6ea2_0.zip
url <- "https://opendata.arcgis.com/datasets/59d52cd8fa9d463ea7cf9f3c0a0c6ea2_0.zip"
dz(url, "Houston_Schools.zip")
# SCHOOL DISTRICTS --------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS
# -- School Districts: http://cohgis-mycity.opendata.arcgis.com/datasets/school-districts
url <- "https://opendata.arcgis.com/datasets/59d52cd8fa9d463ea7cf9f3c0a0c6ea2_1.zip"
dz(url, "School_Districts.zip")
# POLICE STATIONS ---------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS
# -- Police Stations: https://cohgis-mycity.opendata.arcgis.com/datasets/houston-police-stations
# https://opendata.arcgis.com/datasets/fb3bb02ec56c4bb4b9d0cf3b8b3e5545_0.zip
url <- "https://opendata.arcgis.com/datasets/fb3bb02ec56c4bb4b9d0cf3b8b3e5545_0.zip"
dz(url, "Police_Stations.zip")
# ADDRESS DATA ------------------------------------------------------------
# >>> THIS DOWNLOAD URL NO LONGER WORKS
# -- Openaddress: https://github.com/openaddresses/openaddresses
# http://openaddresses.io/
url <- "https://s3.amazonaws.com/data.openaddresses.io/openaddr-collected-us_south.zip"
dz(url, "US_South_Addresses.zip")
# ZIP CODES ---------------------------------------------------------------
# -- Zip Codes: http://cohgis-mycity.opendata.arcgis.com/datasets/zip-codes?geometry=-96.663%2C29.509%2C-94.048%2C29.927
url <- "https://opendata.arcgis.com/datasets/7237db114eeb416cb481f4450d8a0fa6_7.zip"
dz(url, "Zip_Codes.zip")
# NASA SEDAC --------------------------------------------------------------
# This was used for the initial (and current grid).
# However, NASA produces a US census grid.
# But the data pertains to the 2010 census.
# There is no updated version.
# https://sedac.ciesin.columbia.edu/data/collection/usgrid
#
# There is a further interagency project for population grids.
# There are recent (2020-2021) population grids but I have
# not investigated further.
# https://www.popgrid.org/uscb
#
# NASA SEDAC used is version 4.
# It pertains to data collected between
# 2005-2014.
# -- NASA SEDAC GPWv4: http://sedac.ciesin.columbia.edu/data/collection/gpw-v4
#
# Documents: http://sedac.ciesin.columbia.edu/downloads/on-demand/gpw/doc/GPWv4_Revision_10_documentation.zip
#
# Must be logged-in for downloads. :(
#
# Center points manually downloaded performed by selecting 'North America' and
# 'TX' at: http://sedac.ciesin.columbia.edu/data/collection/gpw-v4
#
# Raster data manually downloaded from: http://sedac.ciesin.columbia.edu/data/set/gpw-v4-population-density
|
a6ce4fcdea886c984fcad1ab0e8ce346728b315d
|
653fc9fef49629637687121074b623eee30a0a25
|
/man/drift.sim.Rd
|
8d73ad9f0dc67d30ee27e4b608cea6d20c12c5b5
|
[] |
no_license
|
ehelegam/elvesR
|
3a50182034713e745722fe8715ac7325d20fa8ca
|
39706238495ef0cfdab72daedbfba3bead03cbe7
|
refs/heads/master
| 2016-12-13T15:35:17.412332
| 2016-05-08T17:20:36
| 2016-05-08T17:20:36
| 51,022,817
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 745
|
rd
|
drift.sim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drift.simulator.R
\name{drift.sim}
\alias{drift.sim}
\title{Simulation drift with a Wolbachia population}
\usage{
drift.sim(p, N, s, g, fit, r, get = "fig")
}
\arguments{
\item{p}{Initial frequency of the population to be modeled}
\item{N}{Population size per generation}
\item{s}{Number of individuals to be sampled at each generation}
\item{fit}{Fitness of the population to be modeled. Zero (0) gives no fitness advantage.}
\item{r}{Number of replicates}
\item{Total}{generation to be modeled}
}
\value{
A figure with different results.
}
\description{
Drift simulation taking in account a population with to different phenotypes
}
\author{
Elves H Duarte
}
|
123fcb647e1df671be15868bbcc99ffc3b1ab6d1
|
2d989f9c35c7340ca2e74bbd43b4c01fe76dea73
|
/workout2/app.R
|
5fd4de912bb8dcb6d8e7f0a5fd0d18e7e9bc0a38
|
[] |
no_license
|
jarellymartin/hw-stat133-jarellymartin
|
7ed0bf6d7f8870ef71d4161150fd9e971f1344bd
|
0800c6a60ad5cd647e9c8c89866ff6ad04b628d5
|
refs/heads/master
| 2023-04-04T01:14:31.362679
| 2021-03-16T04:26:09
| 2021-03-16T04:26:09
| 295,613,995
| 1
| 0
| null | 2020-09-15T04:27:13
| 2020-09-15T04:27:12
| null |
UTF-8
|
R
| false
| false
| 4,605
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
# Example discussed in lecture April-08-2019
#Q for OH - balance table is different than the one professor shows. How should I change it?
#Q for OH - how can I do the facetted graph part -
library(shiny)
library(ggplot2)
library(reshape2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Investing Scenarios: No Contribution, Fixed Contribution, and Growing Contribution"),
# Sidebar with a slider input for number of bins
fluidRow(
column(4,
sliderInput("initial",
"Initial Amount",
min = 0,
max = 100000,
value = 1000,
step = 500)),
column(4,
sliderInput("rate",
"Return Rate (in %)",
min = 0,
max = 0.20,
value = 0.05,
step = 0.01)),
column(4,
sliderInput("years", "Years",
min = 1, max = 50,
value = 20)),
column( width=4, sliderInput("annual",
"Annual Contribution",
min = 0,
max = 50000,
value = 2000,
step = 500)),
column(width =4, sliderInput("growth",
"Growth Rate (in %)",
min = 0,
max =0.20,
value = 0.02,
step = 0.01)),
column(width=4,selectInput("facet", "Facet?", c("Yes", "No"))
),
mainPanel(
h3("Timelines"),
plotOutput("Plot"),
h3("Balances"),
tableOutput("mytable")
)
)
)
#' @title future value
#' @description calculates the future value of an investment
#' @param amount initial invested amount
#' @param rate annual rate of return
#' @param years number of years
#' @return computed future value of an investment
future_value <- function(amount, rate, years) {
return(amount*((1+rate)^years))
}
#' @title future value of annuity
#' @description calculates the future value of annuity
#' @param contrib contributed amount
#' @param rate annual rate of return
#' @param years number of years
#' @return computed future value of annuity
annuity <- function(contrib, rate, years) {
return(contrib*(((1+rate)^years -1)/rate))
}
#' @title future value of growing annuity
#' @description calculates the future value of growing annuity
#' @param contrib contributed amount
#' @param rate annual rate of return
#' @param growth annual growth rate
#' @param years number of years
#' @return computed future value of annuity
growing_annuity <- function(contrib, rate, growth, years){
return(contrib*(((1+rate)^years - (1+growth)^years)/(rate-growth)))
}
# Define server logic required to draw a histogram
server <- function(input, output) {
dat <- reactive({
no_contrib <- rep(0,input$years)
fixed_contrib <- rep(0,input$years)
growing_contrib <- rep(0,input$years)
for (i in 1:input$years) {
fixed_contrib[i] <- future_value(input$initial, input$rate, i) + annuity(input$annual, input$rate, i)
}
for (i in 1:input$years){
no_contrib[i] <- future_value(input$initial, input$rate, i)
}
for (i in 1:input$years){
growing_contrib[i] <- future_value(input$initial,input$rate, i) + growing_annuity(input$annual, input$rate, input$growth, i)
}
initial <- rep(1000,3)
dat <- data.frame(no_contrib, fixed_contrib, growing_contrib)
dat <- rbind(initial, dat)
dat$year <- 0:input$years
dat <- dat[,c('year','no_contrib', 'fixed_contrib', 'growing_contrib')]
return(dat)
})
output$Plot <- renderPlot({
melt <- melt(dat(), id.vars = "year")
if (input$facet == "No"){
ggplot(data = melt, aes(x=year)) +
geom_line(aes(y=value, color=variable))
} else {
ggplot(data = melt, aes(x=year, fill = variable)) +
geom_area(aes(x=year, y =value), alpha=.4) +
geom_line(aes(y=value, color = variable)) +
facet_grid(~variable)
}
})
output$mytable = renderTable({
(dat())
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
7b7f6e3e4e2c1ea1450cfdc429196e3116444482
|
39a3b1f5d27882ea8364e94c484e14c603cb88e2
|
/man/eempf_convergence.Rd
|
e00111deaca53763fc22b9ec921974e7b77f768b
|
[] |
no_license
|
MatthiasPucher/staRdom
|
49c23ebfd977c9321fc09600c29d84ed872f0090
|
af51796fff49a5dc670244066c2f18dd6badc9a3
|
refs/heads/master
| 2023-06-25T00:46:52.968743
| 2023-06-15T08:18:13
| 2023-06-15T08:18:13
| 128,365,215
| 16
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 704
|
rd
|
eempf_convergence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parafac_functions.R
\name{eempf_convergence}
\alias{eempf_convergence}
\title{Extract modelling information from a PARAFAC model.}
\usage{
eempf_convergence(pfmodel, print = TRUE)
}
\arguments{
\item{pfmodel}{PARAFAC model created with staRdom using output = "all"}
\item{print}{logical, whether you want console output or just a list with results}
}
\value{
list with numbers of converging models, cflags and SSEs
}
\description{
The convergence behaviour of all initialisations in a PARAFAC model is shown by printing the numbers
}
\examples{
data("pf_models")
pfmodel <- pf4[[1]]
conv_beh <- eempf_convergence(pfmodel)
}
|
42ef1aaf9e5f02d3a3b811d8825fd4cf0e44dc0a
|
bf9f77e17111b590fe44905ebd9391009a2a1390
|
/man/composante_type.Rd
|
abb69450a48c5baf5c69434f684d2d558740227e
|
[
"MIT"
] |
permissive
|
ove-ut3/apogee
|
5cd9fed8e1cb4fc359b824fdb16ff269952d6320
|
c08ff84497bbaab4af90a0eeb779a338ff158b87
|
refs/heads/master
| 2021-06-02T09:03:41.344113
| 2020-05-19T13:22:59
| 2020-05-19T13:22:59
| 115,185,672
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 582
|
rd
|
composante_type.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{composante_type}
\alias{composante_type}
\title{Table composante_type}
\format{
An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 7 rows and 2 columns.
}
\usage{
composante_type
}
\description{
\preformatted{## Rows: 7
## Columns: 2
## $ code_type_composante <chr> "COM", "DPT", "INS", "IUP", "IUT", "LOC", "UFR"
## $ lib_type_composante <chr> "Coummunauté d'universités/établissements", "Département", "Instit...
}
}
\keyword{datasets}
|
e50a5f36601ae6a5745ea4bf106ae2919a40b9ed
|
562f91534ec9713160bdaeb3e7a71efd96ed5edb
|
/PhD/syn.data.R
|
923e87d48cca6fa7a2f8dbc1e5d7305d6874c092
|
[] |
no_license
|
saraamini/CodeSample
|
8a3fc059b3cd6cdc40a681ed3ffc931468ee5ade
|
eda1b5709526946ebecadb8a1a7b5696ceff4ba3
|
refs/heads/master
| 2020-03-28T17:45:01.385283
| 2018-09-14T17:16:32
| 2018-09-14T17:16:32
| 148,818,980
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 762
|
r
|
syn.data.R
|
#Synthetic dataset
syn.data = function(t.total,nt,vt,nx,ny,index,var,nc,ns){
dim(var) = c(nx*ny,nt*vt)
dim(index) = c(nt,vt)
syn.var = array(NA,dim=c(nx*ny,nt*vt))
# for (k in 1:ns){
synthetic.day = sample(1:t.total, t.total, replace=TRUE)
dim(synthetic.day) = c(nt,vt)
for (i in 1:vt){
for (j in 2:nt){
if (index[j,i]==index[(j-1),i]){
if (synthetic.day[(j-1),i]!=t.total){
synthetic.day[j,i] = (synthetic.day[(j-1),i]) + 1
} else {
synthetic.day[j,i] = 1
}
}
}
}
dim(synthetic.day) = c(nt*vt)
for (it in 1:t.total){
syn.var[,it] = var[,synthetic.day[it]]
}
# }
list(syn.var = syn.var)
}
|
75fa7d85a267e1b911783f802febe477dd1675df
|
c5a59ef72d1872a6fb6cf8bde2a7798967c66d5b
|
/R/graphics.r
|
914d07ea8630ace6acfab890a9900aeea5ba10d9
|
[] |
no_license
|
hjanime/hm-splice-pipe
|
9ddcc3aa4e678dca068f125cda67db6f6eb24a45
|
edafa685dd9a079738e635d5d60927a6a7f4981d
|
refs/heads/master
| 2021-01-21T09:11:08.209177
| 2014-07-18T14:23:52
| 2014-07-18T14:23:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,611
|
r
|
graphics.r
|
cbbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#000000", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
my_fill_palette1 <- function() {scale_fill_brewer(palette="Set1")}
my_colour_palette1 <- function() {scale_colour_brewer(palette="Set1")}
#my_fill_palette1 <- function() {scale_fill_manual(values=cbbPalette)}
#my_colour_palette1 <- function() {scale_colour_manual(values=cbbPalette)}
normal_h = 5
normal_w = 6
narrow_w = 6
the_alpha = .3
the_base_size = 16
set_ggplot_theme <- function() {theme_set(theme_bw(base_size = the_base_size))}
require(grid)
theme_black <- function (base_size = 12,base_family=""){
theme_grey(base_size=base_size,base_family=base_family) %+replace%
theme(
axis.line = element_blank(),
axis.text.x = element_text(size = base_size * 0.8, colour = 'white', lineheight = 0.9, vjust = 1),
axis.text.y = element_text(size = base_size * 0.8, colour = 'white', lineheight = 0.9, hjust = 1),
axis.ticks = element_line(colour = "white", size = 0.2),
axis.title.x = element_text(size = base_size, colour = 'white', vjust = 1),
axis.title.y = element_text(size = base_size, colour = 'white', angle = 90, vjust = 0.5),
axis.ticks.length = unit(0.3, "lines"),
axis.ticks.margin = unit(0.5, "lines"),
legend.background = element_rect(colour = NA, fill = 'black'),
legend.key = element_rect(colour = "white", fill = 'black'),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = base_size * 0.8, colour = 'white'),
legend.title = element_text(size = base_size * 0.8, face = "bold", hjust = 0, colour = 'white'),
legend.position = "right",
legend.text.align = NULL,
legend.title.align = NULL,
legend.direction = "vertical",
legend.box = NULL,
panel.background = element_rect(fill = "black", colour = NA),
panel.border = element_rect(fill = NA, colour = "white"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.margin = unit(0.25, "lines"),
strip.background = element_rect(fill = "grey30", colour = "grey10"),
strip.text.x = element_text(size = base_size * 0.8, colour = 'white'),
strip.text.y = element_text(size = base_size * 0.8, colour = 'white', angle = -90),
plot.background = element_rect(colour = 'black', fill = 'black'),
plot.title = element_text(size = base_size * 1.2, colour = "white"),
plot.margin = unit(c(1, 1, 0.5, 0.5), "lines")
)
}
|
332e134f4929e6745041064e0ebeb6dc6d14ce61
|
1bbd922a9e81341c9f81cfba4aa48664aeaa9a95
|
/R/covsel.R
|
1aa6ce69c26a90dd3b02c795b5c9e55203bb44e2
|
[] |
no_license
|
mlesnoff/rnirs
|
b2519dee12788132107542c4c097611a73c1b995
|
1398d746df67f0f6d80063366db969998522dc04
|
refs/heads/master
| 2023-04-15T22:15:33.045477
| 2023-04-07T13:59:18
| 2023-04-07T13:59:18
| 208,553,347
| 18
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,622
|
r
|
covsel.R
|
covsel <- function(X, Y, nvar = NULL, scaly = TRUE, weights = NULL) {
X <- .matrix(X)
zdim <- dim(X)
n <- zdim[1]
p <- zdim[2]
Y <- .matrix(Y, row = FALSE, prefix.colnam = "x")
q <- dim(Y)[2]
if(is.null(nvar))
nvar <- p
if(is.null(weights))
weights <- rep(1 / n, n)
else
weights <- weights / sum(weights)
xmeans <- .xmean(X, weights = weights)
X <- .center(X, xmeans)
ymeans <- .xmean(Y, weights = weights)
Y <- .center(Y, ymeans)
if(scaly)
Y <- .scale(Y, rep(0, q), sqrt(colSums(weights * Y * Y)))
xsstot <- sum(weights * X * X)
ysstot <- sum(weights * Y * Y)
yss <- xss <- selvar <- vector(length = nvar)
for(i in seq_len(nvar)) {
z <- rowSums(crossprod(weights * X, Y)^2)
selvar[i] <- which(z == max(z))
u <- X[, selvar[i], drop = FALSE]
Pr <- tcrossprod(u) %*% diag(weights) / sum(weights * u * u)
# Same as
#Pr <- u %*% solve(t(u) %*% D %*% u) %*% t(u) %*% D
#Pr <- u %*% t(u) %*% D / sum(d * u * u)
#Pr <- crossprod(u %*% t(u), D) / sum(d * u * u)
X <- X - Pr %*% X # The deflated X is a centered matrix (metric D)
Y <- Y - Pr %*% Y # The deflated Y is a centered matrix (metric D)
xss[i] <- sum(weights * X * X)
yss[i] <- sum(weights * Y * Y)
}
cumpvarx <- 1 - xss / xsstot
cumpvary <- 1 - yss / ysstot
sel <- data.frame(sel = selvar, cumpvarx = cumpvarx, cumpvary = cumpvary)
list(sel = sel, weights = weights)
}
|
0b27ed90af24cf84f2f1c31933cbb914f5aedd4b
|
7bed3886e5258d7a0a36f509d762b7859ed63732
|
/man-roxygen/ref_ammer_2020.R
|
2a86ded1b8ea255d5f3735a8b155efefbdd434a1
|
[] |
no_license
|
JonasGlatthorn/APAtree
|
a584bd72e35414deea564aea1e6a901ca35e9190
|
383cd9fb95a8396a66a61ae1dae75a962b54df97
|
refs/heads/main
| 2023-04-19T03:25:58.997681
| 2021-08-20T13:01:47
| 2021-08-20T13:01:47
| 394,584,651
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 311
|
r
|
ref_ammer_2020.R
|
#'@references { Ammer, Christian; Annighoefer, Peter; Balkenhol, Niko; Hertel,
#' Dietrich; Leuschner, Christoph; Polle, Andrea; Lamersdorf, Norbert; Scheu,
#' Stefan; Glatthorn, Jonas (2020): RTG 2300 - Enrichment of European beech
#' forests with conifers. PANGAEA, https://doi.org/10.1594/PANGAEA.925228}
|
f9b55461f2958472b7219f0cd0f249b31042d4ae
|
f2345b7586c88be63a0de5cc56f8aef9c180fd4f
|
/man/writeNetworkModel.Rd
|
616d8c46ac7ff82d92b575d2cbe4b10dfb2f88c5
|
[] |
no_license
|
nutterb/HydeNet
|
0aca1240b0466d9b289b33169bd25a0eca50f495
|
fcbb7d81f2359b98494f0712a5db15291193ae5f
|
refs/heads/master
| 2023-05-13T12:32:45.168663
| 2020-07-06T12:39:09
| 2020-07-06T12:39:09
| 30,078,881
| 24
| 3
| null | 2018-07-20T10:50:20
| 2015-01-30T15:53:56
|
R
|
UTF-8
|
R
| false
| true
| 1,267
|
rd
|
writeNetworkModel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeNetworkModel.R
\name{writeNetworkModel}
\alias{writeNetworkModel}
\title{Generate JAGS Code for a Network's Model}
\usage{
writeNetworkModel(network, pretty = FALSE)
}
\arguments{
\item{network}{an object of class \code{HydeNetwork}}
\item{pretty}{Logical. When \code{TRUE}, the model is printed to the console
using the \code{cat} function (useful if you wish to copy and paste the
code for manual editing). Otherwise, it is returned as a character
string.}
}
\description{
Based on the parameters given to a network, the code for
each node is generated and all of the node models are pasted into a
single JAGS model script.
}
\examples{
data(PE, package='HydeNet')
Net <- HydeNetwork(~ wells +
pe | wells +
d.dimer | pregnant*pe +
angio | pe +
treat | d.dimer*angio +
death | pe*treat,
data = PE)
#* Default printing
writeNetworkModel(Net)
#* Something a little easier on the eyes.
writeNetworkModel(Net, pretty=TRUE)
}
\seealso{
\code{\link{writeJagsModel}}, \code{\link{writeJagsFormula}}
}
\author{
Jarrod Dalton and Benjamin Nutter
}
|
14727bd8eaed15372055c746e1b3214d1640b82c
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/survivalmodels/man/get_pycox_init.Rd
|
d4437046e172847c7490eb062559d776d5b57387
|
[
"MIT"
] |
permissive
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,345
|
rd
|
get_pycox_init.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers_pycox.R
\name{get_pycox_init}
\alias{get_pycox_init}
\title{Get Pytorch Weight Initialization Method}
\usage{
get_pycox_init(
init = "uniform",
a = 0,
b = 1,
mean = 0,
std = 1,
val,
gain = 1,
mode = c("fan_in", "fan_out"),
non_linearity = c("leaky_relu", "relu")
)
}
\arguments{
\item{init}{\code{(character(1))}\cr Initialization method, see details for list of implemented
methods.}
\item{a}{\code{(numeric(1))}\cr Passed to \code{uniform}, \code{kaiming_uniform}, and \code{kaiming_normal}.}
\item{b}{\code{(numeric(1))}\cr Passed to \code{uniform}.}
\item{mean, std}{\code{(numeric(1))}\cr Passed to \code{normal}.}
\item{val}{\code{(numeric(1))}\cr Passed to \code{constant}.}
\item{gain}{\code{(numeric(1))}\cr Passed to \code{xavier_uniform}, \code{xavier_normal}, and \code{orthogonal}.}
\item{mode}{\code{(character(1))}\cr Passed to \code{kaiming_uniform} and \code{kaiming_normal}, one of
\code{fan_in} (default) and \code{fan_out}.}
\item{non_linearity}{\code{(character(1))}\cr Passed to \code{kaiming_uniform} and \code{kaiming_normal}, one of
\code{leaky_relu} (default) and \code{relu}.}
}
\description{
Helper function to return a character string with a populated pytorch weight
initializer method from \code{torch.nn.init}. Used in \link{build_pytorch_net} to define a weighting
function.
}
\details{
Implemented methods (with help pages) are
\itemize{
\item \code{"uniform"} \cr \code{reticulate::py_help(torch$nn$init$uniform_)}
\item \code{"normal"} \cr \code{reticulate::py_help(torch$nn$init$normal_)}
\item \code{"constant"} \cr \code{reticulate::py_help(torch$nn$init$constant_)}
\item \code{"xavier_uniform"} \cr \code{reticulate::py_help(torch$nn$init$xavier_uniform_)}
\item \code{"xavier_normal"} \cr \code{reticulate::py_help(torch$nn$init$xavier_normal_)}
\item \code{"kaiming_uniform"} \cr \code{reticulate::py_help(torch$nn$init$kaiming_uniform_)}
\item \code{"kaiming_normal"} \cr \code{reticulate::py_help(torch$nn$init$kaiming_normal_)}
\item \code{"orthogonal"} \cr \code{reticulate::py_help(torch$nn$init$orthogonal_)}
}
}
\examples{
\donttest{
if (requireNamespaces("reticulate")) {
get_pycox_init(init = "uniform")
get_pycox_init(init = "kaiming_uniform", a = 0, mode = "fan_out")
}
}
}
|
74b85876d8e32de4d40f2576a7dea64d69bd6da5
|
663763cee873e142ec8da64a9eac151f091bd2a3
|
/man/cluster_name.Rd
|
728e6c4c622f6300570852ebbfcfaf4610573007
|
[] |
no_license
|
cran/ddpcr
|
1121c4066d93281cb003f789cf18f4663122e624
|
e0658fb695a76172c00922987568358372ad3c8e
|
refs/heads/master
| 2023-09-02T04:53:58.533329
| 2023-08-20T22:32:32
| 2023-08-20T23:31:02
| 52,090,808
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 456
|
rd
|
cluster_name.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plate-attribs.R
\name{cluster_name}
\alias{cluster_name}
\title{Get cluster name by ID}
\usage{
cluster_name(plate, cluster)
}
\description{
Get cluster name by ID
}
\examples{
\dontrun{
plate <- new_plate(sample_data_dir())
# see what cluster names exist and their order
clusters(plate)
cluster_name(plate, 2)
cluster_name(plate, 4)
}
}
\keyword{internal}
|
ebc1c0c7cffe4aca65cecd074f3720bd46d662f7
|
5aa3a4ea3dbb4acf594976a2811fc42eb1086376
|
/EPA_WPA/6-Utils.R
|
fa89384b9b6d5fc6b4675341ddff7f7b529195ba
|
[] |
no_license
|
ehess/cfbscrapR-MISC
|
1a1a5d465cbc20b430487af7d23383b7bc0a7e3a
|
e61a10f325ea392d605267104abb090118f6f4da
|
refs/heads/master
| 2022-03-26T17:32:44.263785
| 2019-11-18T02:59:49
| 2019-11-18T02:59:49
| 216,442,017
| 0
| 0
| null | 2019-10-20T23:36:47
| 2019-10-20T23:36:47
| null |
UTF-8
|
R
| false
| false
| 15,574
|
r
|
6-Utils.R
|
team_abbrs_df <- read_csv('https://raw.githubusercontent.com/903124/CFB_EPA_data/master/cfb_teams_list.csv')
team_abbrs_df$full_name <- team_abbrs_df$full_name
team_abbrs_df$abbreviation <- team_abbrs_df$abbreviation
write.csv(team_abbrs_df,"team_abrs.csv",row.names = F)
library(snakecase)
team_abbrs_list = paste(team_abbrs_df$abbreviation, collapse="|")
find_game_next_score_half <- function(drive_df){
drive_df$drive_id <- as.numeric(drive_df$drive_id)
drive_df = drive_df %>% arrange(drive_id)
score_plays <- which(drive_df$scoring == TRUE & !str_detect(drive_df$drive_result,"END OF"))
final_df = lapply(1:nrow(drive_df),find_next_score,
score_plays_i = score_plays,dat_drive=drive_df) %>% bind_rows()
final_df2 = cbind(drive_df,final_df)
return(final_df2)
}
find_next_score <- function(play_i,score_plays_i,dat_drive){
defense_tds <- c("PUNT RETURN TD","FUMBLE TD")
next_score_i <- score_plays_i[which(score_plays_i >= play_i)[1]]
if( is.na(next_score_i) |
dat_drive$start_period[play_i] <= 2 & dat_drive$start_period[next_score_i] %in% c(3,4) |
dat_drive$start_period[play_i] %in% c(3,4) & dat_drive$start_period[next_score_i] > 4){
score_drive <- dat_drive$drive_id[play_i]
next_score <- 0
return(data.frame(NSH = next_score,
DSH = score_drive))
} else{
score_drive <- dat_drive$drive_id[next_score_i]
# Identify current and next score teams
# if they are the same then you are good
# if it is different then flip the negative sign
current_team <- dat_drive$offense[play_i]
## If the defense scores
## we need to make sure the next_score_team is correct
next_score_team <- dat_drive$offense[next_score_i]
if(dat_drive$drive_result[next_score_i] %in% defense_tds){
next_score_team <- dat_drive$defense[next_score_i]
}
if(str_detect(dat_drive$drive_result[next_score_i],"RETURN TD")){
if( identical(current_team,next_score_team)){
next_score <- -1 * dat_drive$pts_drive[next_score_i]
} else {
next_score <- dat_drive$pts_drive[next_score_i]
}
} else{
if( identical(current_team,next_score_team)){
# if same then you score
next_score <- dat_drive$pts_drive[next_score_i]
}else{
# if different, then other team scored
next_score <- -1 * dat_drive$pts_drive[next_score_i]
}
}
return(data.frame(NSH = next_score,
DSH = score_drive))
}
}
prep_df_epa2 <- function(dat){
turnover_play_type = c(
'Fumble Recovery (Opponent)',
'Pass Interception Return',
'Interception Return Touchdown',
'Fumble Return Touchdown',
'Safety',
'Interception',
'Pass Interception',
'Punt'
)
dat = dat %>%
mutate_at(vars(clock.minutes, clock.seconds), ~ replace_na(., 0)) %>%
mutate(
id = as.numeric(id),
clock.minutes = ifelse(period %in% c(1, 3), 15 + clock.minutes, clock.minutes),
raw_secs = clock.minutes * 60 + clock.seconds,
log_ydstogo = log(adj_yd_line),
half = ifelse(period <= 2, 1, 2),
new_yardline = 0,
new_down = 0,
new_distance = 0
)
turnover_ind = dat$play_type %in% turnover_play_type
dat$turnover = 0
new_offense = !(dat$offense == lead(dat$offense))
#fourth_down = dat$down == 4, & fourth_down
t_ind = turnover_ind | (new_offense)
dat$turnover[t_ind] <- 1
dat = dat %>% group_by(game_id,half) %>%
dplyr::arrange(id,.by_group=TRUE) %>%
mutate(
new_down = lead(down),
new_distance = lead(distance),
new_yardline = lead(adj_yd_line),
new_TimeSecsRem = lead(TimeSecsRem),
new_log_ydstogo = log(new_yardline),
new_Goal_To_Go = lead(Goal_To_Go),
# new under two minute warnings
new_Under_two = new_TimeSecsRem <= 120,
end_half_game=0) %>% ungroup() %>%
mutate_at(vars(new_TimeSecsRem), ~ replace_na(., 0))
end_of_half_plays = is.na(dat$new_yardline) & (dat$new_TimeSecsRem==0)
if(any(end_of_half_plays)){
dat$new_yardline[end_of_half_plays] <- 99
dat$new_down[end_of_half_plays] <- 4
dat$new_distance[end_of_half_plays] <- 99
dat$end_half_game[end_of_half_plays] <- 1
dat$new_log_ydstogo[end_of_half_plays] <- log(99)
dat$new_Under_two[end_of_half_plays] <- dat$new_TimeSecsRem[end_of_half_plays] <= 120
}
missing_yd_line = dat$new_yardline == 0
dat$new_yardline[missing_yd_line] = 99
dat$new_log_ydstogo[missing_yd_line] = log(99)
dat = dat %>% select(
new_TimeSecsRem,
new_down,
new_distance,
new_yardline,
new_log_ydstogo,
new_Goal_To_Go,
new_Under_two,
end_half_game,
turnover
)
colnames(dat) = gsub("new_","",colnames(dat))
colnames(dat)[4] <- "adj_yd_line"
return(dat)
}
prep_df_epa <- function(dat) {
# This function is used to calculate the EP - after the play
# Then EPA = EP_After - EP_Before
# Provide, this function so people can calcualte
# EPA on new games in 2019 szn
# after which they can feed that to the WPA model
turnover_play_type = c(
'Fumble Recovery (Opponent)',
'Pass Interception Return',
'Interception Return Touchdown',
'Fumble Return Touchdown',
'Safety',
'Interception',
'Pass Interception'
)
off_TD = c('Passing Touchdown','Rushing Touchdown')
def_TD = c('Interception Return Touchdown','Fumble Return Touchdown','Punt Return Touchdown',
'Fumble Recovery (Opponent) Touchdown')
dat = dat %>%
mutate_at(vars(clock.minutes, clock.seconds), ~ replace_na(., 0)) %>%
mutate(
clock.minutes = ifelse(period %in% c(1, 3), 15 + clock.minutes, clock.minutes),
raw_secs = clock.minutes * 60 + clock.seconds,
# coef = home_team == defense,
# adj_yd_line = 100 * (1 - coef) + (2 * coef - 1) * yard_line,
# log_ydstogo = log(adj_yd_line),
half = ifelse(period <= 2, 1, 2),
new_yardline = 0,
new_Goal_To_Go = FALSE,
new_down = 0,
new_distance = 0,
turnover = 0
) %>%
left_join(team_abbrs_df,by=c("offense"="full_name")) %>%
left_join(team_abbrs_df,by=c("defense"="full_name"),suffix=c("_offense","_defense"))
# Turnover Index
turnover_ind = dat$play_type %in% turnover_play_type
dat[turnover_ind, "new_down"] = 1
dat[turnover_ind, "new_distance"] = 10
dat[turnover_ind, "turnover"] = 1
# First down
first_down_ind = str_detect(dat$play_text, '1ST')
dat[first_down_ind, "new_down"] = 1
dat[first_down_ind, "new_distance"] = 10
dat[first_down_ind, "new_yardline"] = dat[first_down_ind,"adj_yd_line"] - dat[first_down_ind,"yards_gained"]
# these mess up when you have to regex the yardline, so remove em
dat$play_text = gsub("1ST down","temp",dat$play_text)
dat$play_text = gsub("2ND down","temp",dat$play_text)
dat$play_text = gsub("3RD down","temp",dat$play_text)
dat$play_text = gsub("4TH down","temp",dat$play_text)
# Otherwise What happened?
dat[(!turnover_ind &
!first_down_ind), "new_down"] = dat[(!turnover_ind &
!first_down_ind), "down"] + 1
dat[(!turnover_ind &
!first_down_ind), "new_distance"] = dat[(!turnover_ind &
!first_down_ind), "distance"] - dat[(!turnover_ind &
!first_down_ind), "yards_gained"]
dat[(!turnover_ind & !first_down_ind),"new_yardline"] = dat[(!turnover_ind &
!first_down_ind), "adj_yd_line"] - dat[(!turnover_ind &
!first_down_ind), "yards_gained"]
opp_fumb_rec = dat$play_type=="Fumble Recovery (Opponent)"
dat[opp_fumb_rec, "new_down"] = 1
dat[opp_fumb_rec, "new_distance"] = 10
dat[opp_fumb_rec, "new_yardline"] = 100 - (dat[opp_fumb_rec, "yard_line"] +
dat[opp_fumb_rec, "yards_gained"])
sack = str_detect(dat$play_type, "Sack")
if(any(sack)){
dat[sack, "new_yardline"] = (dat[sack, "adj_yd_line"] - dat[sack, "yards_gained"])
dat[sack, "new_down"] = dat[sack, "down"] + 1
dat[sack, "new_distance"] = dat[sack, "distance"] - dat[sack, "yards_gained"]
## sack and fumble, this seems to be weirdly tricky
sack_fumble = sack & (str_detect(dat$play_text,"fumbled"))
if(any(sack_fumble)){
dat[sack_fumble,"play_text"] = sapply(dat[sack_fumble,"play_text"],function(x){
gsub("return.*","",x)
})
# gsub("return.*","",dat[sack_fumble,"play_text"])
q = as.numeric(stringi::stri_extract_last_regex(dat$play_text[sack_fumble],"\\d+"))
# now identify, if you need to subtract 100?
receovered_string = str_extract(dat$play_text[sack_fumble], '(?<=, recovered by )[^,]+')
dat[sack_fumble,"coef"] = gsub("([A-Za-z]+).*", "\\1",receovered_string)
inds = which(sack_fumble)
dat[sack_fumble,"new_down"] = unlist(sapply(inds,function(x){
ifelse(dat[x,"abbreviation_offense"] == dat[x,"coef"],
dat[x,"new_down"],1)
}))
dat[sack_fumble,"new_yardline"] = abs(
((1-!(dat[sack_fumble,"coef"] == dat[sack_fumble,"abbreviation_defense"])) * 100) - q
)
}
}
safety = dat$play_type == 'Safety'
if(any(safety)){
dat[safety,"new_yardline"] = 99
}
off_td_ind = dat$play_type %in% off_TD
if(any(off_td_ind)){
dat[off_td_ind,"new_down"] = 1
dat[off_td_ind,"new_distance"] = 10
}
#Fake yardline for Offensive tocuhdown play
temp_inds = (off_td_ind | (dat$play_type %in% def_TD))
if(any(temp_inds)){
dat[temp_inds,"new_yardline"] = 99
}
# Turnovers on Down
tod_ind = (!off_td_ind) & (dat$new_down>4)
if(any(tod_ind)){
dat[tod_ind,"turnover"] = 1
dat[tod_ind,"new_down"] = 1
dat[tod_ind,"new_distance"] = 10
tod_ind = tod_ind & dat$play_type == "Punt"
dat[tod_ind,"new_yardline"] = 100-dat[tod_ind,"new_yardline"]
}
## proper placement of punt
punt = c("Punt")
punt_ind = dat$play_type %in% punt
if(any(punt_ind)){
punt_play = dat[punt_ind,] %>% pull(play_text)
double_try = stringi::stri_extract_last_regex(punt_play,'(?<=the )[^,]+')
q = as.numeric(stringi::stri_extract_last_regex(double_try,"\\d+"))
dat[punt_ind,"coef"] = gsub("([A-Za-z]+).*", "\\1",double_try)
dat[punt_ind,"new_yardline"] = abs(((1-(dat[punt_ind,"coef"] == dat[punt_ind,"abbreviation_defense"])) * 100) - q)
}
# missed field goal, what happens
miss_fg <- c("Field Goal Missed","Missed Field Goal Return")
miss_fg_ind = dat$play_type %in% miss_fg
if(any(miss_fg_ind)){
dat[miss_fg_ind,"new_down"] = 1
dat[miss_fg_ind,"new_distance"] = 10
# if FG is within 20, team gets it at the 20
# otherwise team gets it at the LOS (add the 17 yards back)
adj_yds = dat[miss_fg_ind,] %>% pull(adj_yd_line)
dat[miss_fg_ind,"new_yardline"] = ifelse(adj_yds<=20,80,100 - (adj_yds-17))
}
# handle missed field goals here
# just workout the yards here
miss_fg_return = "Missed Field Goal Return"
miss_fg_return_ind = dat$play_type == miss_fg_return
if(any(miss_fg_return_ind)){
miss_return_play = dat[miss_fg_return_ind,] %>% pull(play_text)
double_try = stringi::stri_extract_last_regex(miss_return_play,'(?<=the )[^,]+')
q = as.numeric(stringi::stri_extract_last_regex(double_try,"\\d+"))
dat[miss_fg_return_ind,"coef"] = gsub("([A-Za-z]+).*", "\\1",double_try)
dat[miss_fg_return_ind,"new_yardline"] = abs(((1-(dat[miss_fg_return_ind,"coef"] == dat[miss_fg_return_ind,"abbreviation_defense"])) * 100) - q)
}
# missed field goal return
block_return <- c("Missed Field Goal Return")
block_inds = dat$play_type %in% block_return
if(any(block_inds)){
dat[block_inds,"new_down"] = 1
dat[block_inds,"new_distance"] = 10
dat[block_inds,"new_yardline"] = (100 - dat[block_inds,"adj_yd_line"]) - dat[block_inds,"yards_gained"]
}
# interception return
int_ret <- c("Pass Interception Return","Blocked Field Goal")
int_inds = dat$play_type %in% int_ret
if(any(int_inds)){
dat[int_inds,"new_down"] = 1
dat[int_inds,"new_distance"] = 10
# extract the yardline via regex
# this sucks but do it
q = as.numeric(stringi::stri_extract_last_regex(dat$play_text[int_inds],"\\d+"))
# now identify, if you need to subtract 100?
temp_team = str_extract_all(dat$play_text[int_inds], team_abbrs_list)
team_team = unlist(sapply(temp_team,function(x){
ind = length(x)
val = x[ind]
if(length(val)==0){
return(NA)
}
return(val)
}))
dat[int_inds,"coef"] = team_team
dat[int_inds,"new_yardline"] = abs(((1-!(dat[int_inds,"coef"] == dat[int_inds,"abbreviation_defense"])) * 100) - q)
}
touchback = str_detect(dat$play_text,"touchback")
dat[touchback,"new_yardline"] = 80
dat[touchback,"new_down"] = 1
## deal with penalties as they also throw this off
penalty = (str_detect(dat$play_text,"Penalty"))
if(any(penalty)){
penalty_string = str_extract(dat$play_text[penalty], '(?<=Penalty,)[^,]+')
double_try = str_extract(penalty_string,'(?<=to the )[^,]+')
q = as.numeric(stringi::stri_extract_last_regex(double_try,"\\d+"))
dat[penalty,"coef"] = gsub("([A-Za-z]+).*", "\\1",double_try)
# first calculate things for regular cases
dat[penalty,"new_yardline"] = abs(((1-(dat[penalty,"coef"] == dat[penalty,"abbreviation_defense"])) * 100) - q)
}
declined = penalty & str_detect(dat$play_text,"declined")
if(any(declined)){
dat[declined,"new_yardline"] = dat[declined,"adj_yd_line"] - dat[declined,"yards_gained"]
}
missing_inds = dat$new_distance <= 0
dat[missing_inds,"new_down"] = 1
dat[missing_inds,"new_distance"] = 10
dat[missing_inds,"new_yardline"] = dat[missing_inds,"adj_yd_line"] - dat[missing_inds,"yards_gained"]
fg_good = dat$play_type %in% c("Field Goal Good","Missed Field Goal Return Touchdown")
if(any(fg_good)){
# temp hold anyways, we are going to replace the post EPA here with 3
dat[fg_good,"new_down"] = 1
dat[fg_good,"new_distance"] = 10
dat[fg_good,"new_yardline"] = 80
}
fifty_ydline = str_detect(dat$play_text, "50 yard line")
dat[fifty_ydline, "new_yardline"] = 50
testing = dat %>% filter(new_yardline<0)
## If 0, reset to 25
#zero_yd_line = dat$new_yardline == 0
#dat[zero_yd_line,"new_yardline"] = 25
dat = dat %>%
mutate(
new_Goal_To_Go = ifelse(
str_detect(play_type, "Field Goal"),
new_distance <= (new_yardline - 17),
new_distance <= new_yardline
),
new_TimeSecsRem = lead(TimeSecsRem),
new_log_ydstogo = log(new_yardline))
## fix NA's log_yds
blk_fg_na = dat$play_type == "Blocked Field Goal" & is.na(dat$new_log_ydstogo)
dat$new_yardline[blk_fg_na] = 100 - dat$adj_yd_line[blk_fg_na]
dat$new_log_ydstogo[blk_fg_na] = log(dat$new_yardline[blk_fg_na])
dat = dat %>% select(new_TimeSecsRem,new_down,new_distance,new_yardline,new_log_ydstogo,turnover) %>%
mutate_at(vars(new_TimeSecsRem), ~ replace_na(., 0)) %>%
rename(adj_yd_line=new_yardline)
colnames(dat) = gsub("new_","",colnames(dat))
## seems to fail here, figure out why.
## doesn't like
adj_to = (dat$adj_yd_line == 0) & (turnover_ind)
dat$log_ydstogo[adj_to] = log(80)
dat$adj_yd_line[adj_to] = 80
dat$Under_two = dat$TimeSecsRem <= 120
return(dat)
}
|
b6edf22eb1120263f1d74abb7eaec3dd2cab77c8
|
ced8517f2dba54b00a3d14741675dba5b9179924
|
/zillow.R
|
dd6efdbc45ee2777603a2aaa42e32302e0b983cf
|
[] |
no_license
|
JasonGregory/Kaggle-Contests
|
eaad40be5d3d72b86ed219c7a681f362273f1a1e
|
fb761a831b4a5f0241b6e63d81179a30bf4b1705
|
refs/heads/master
| 2020-12-02T16:28:40.250295
| 2017-08-31T20:30:07
| 2017-08-31T20:30:07
| 96,278,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,449
|
r
|
zillow.R
|
# Misc --------------------------------------------------------------------
# Re-organize the prepare stage. 1st pull in data; 2nd describe the data; 3rd Null and distinct values; 4th Rectegorize the variables
# Explore the variables relative to the explanatory variable
# Next Tasks --------------------------------------------------------------
# Think about vtreat & identifying important variables. (possibly sample if data is too large for speed)
# Categorize variables into different sections prep_importance()
# linear importance, non-linear, etc.
# In a later stage look at possibly re-evaluating some of the non-important variables?
# Do lots of different plots to evaluate the data! Think about box plots and how to do quickly.
# Maybe think about a sample function to just sample a portion of the data.
# File notes --------------------------------------------------------------
# properties_2016.csv; all properties with home features for 2016; 1 row per property
# properties_2017.csv; all properties with home features for 2017 (available 10/2)
# train_2016.csv; training set from 1/16 - 12/31/16; provides log error and transaction dates
# train_2017.csv; training set from 1/2017 - 9/15/17 (available 10/2)
# Prediction --------------------------------------------------------------
# logerror = log(Zestimate) − log(SalePrice)
# Predict logerror for the months in Fall 2017
# Additional to-do Items --------------------------------------------------
# The inital prep state would show plots for outliers, the variable being predicted, describe tables
# null values & distinct values. In addition it would show any prep functions to use. Possibly date fields..
# Initial data load & write -----------------------------------------------
(trainData <- read_csv(normalizePath("data/zillow/train_2016_v2.csv")))
(propertyData <- read_csv(normalizePath("data/zillow/properties_2016.csv")))
(dataDictionary <- rio::import(normalizePath("data/zillow/zillow_data_dictionary.xlsx")))
read_csv(normalizePath("data/zillow/sample_submission.csv"))
write_rds(propertyData, normalizePath("data/zillow/propertyData.rds"))
write_rds(trainData, normalizePath("data/zillow/trainData.rds"))
# Read in data ------------------------------------------------------------
# (i_data <- read_rds(normalizePath("data/zillow/propertyData.rds")))
(i_data <- read_rds("C:/Users/jgregor1/Desktop/propertyData.rds"))
(train_data <- read_rds(normalizePath("data/zillow/trainData.rds")))
# (dataDict <- rio::import(normalizePath("data/zillow/zillow_data_dictionary.xlsx")))
# Initial look at files ---------------------------------------------------
i_describe <- prep_describe(i_data); print(i_describe, n = Inf)
t_describe <- prep_describe(train_data); print(t_describe, n = Inf)
(var_distinct <- prep_distinct(i_describe))
prep_distinct(t_describe)
# Join data -----------------------------------------
n1_data <- i_data %>%
inner_join(train_data, by = "parcelid")
n1_describe <- prep_describe(n1_data) ; print(n1_describe, n = Inf)
# Variable Prep -----------------------------------------------------------
# Remove unique variables and set outcome variable-------------
var_outcome <- "logerror"
var_explanatory <- setdiff(n1_data %>% colnames(), c(var_outcome, var_distinct))
# Analyze null and distinct values --------------
prep_plot(filter(n1_describe, variable %in% var_explanatory))
var_null <- n1_describe %>% filter(variable %in% var_explanatory, null_perc >= .25) %>% prep_select(variable)
var_explanatory <- setdiff(var_explanatory, var_null)
prep_plot(filter(n1_describe, variable %in% var_explanatory))
# Analyze location variables later
var_location <- c("longitude", "latitude", "rawcensustractandblock", "censustractandblock", "regionidzip", "regionidcity")
var_explanatory <- setdiff(var_explanatory, var_location)
prep_plot(filter(n1_describe, variable %in% var_explanatory))
n1_data <- n1_data %>% select(c(var_outcome, var_explanatory))
n1_describe <- prep_describe(n1_data); print(n1_describe, n = Inf)
# Replace null values (probably run the code below after the importance plot is done)
# n2_data <- prep_inpute(n1_data)
#
#
# n2_describe <- prep_describe(n2_data); print(n2_describe, n = Inf)
# prep_plot(n1_describe)
# prep_plot(n2_describe, plot_type = "distinct")
#
# # Create factor variables
# n2_describe %>% arrange(desc(dist_count)) %>% print(n = Inf)
#
# # leave as is for now
# # var_factor <- n2_describe %>% arrange(desc(dist_count))
# # n2_data <- n2_data %>% mutate_at(var_factor, as.factor)
# Analyze Variable Importance ------------------------------------------------
importance <- explore_importance(n1_data, var_outcome)
non_significant <- importance %>%
select(variable, origName, spearman, pearson, r_squared, linear_pValue, bst_rank, rfm_rank) %>%
filter(linear_pValue < .05 & bst_rank > length(importance$variable)*.5) %>%
prep_select(variable)
importance %>%
select(variable, origName, spearman, pearson, r_squared, linear_pValue, bst_rank, rfm_rank) %>%
filter(variable != non_significant) %>% print(n = Inf)
# look into using the plotting function to zero in on some of the variables.
# Retrieving linear model stuff
# Predicted r-squared
pred_r_squared <- function(linear.model) {
lm.anova <- anova(linear.model)
tss <- sum(lm.anova$"Sum Sq")
# predictive R^2
pred.r.squared <- 1 - PRESS(linear.model)/(tss)
return(pred.r.squared)
}
PRESS <- function(linear.model) {
pr <- residuals(linear.model)/(1 - lm.influence(linear.model)$hat)
PRESS <- sum(pr^2)
return(PRESS)
}
pred.r.squared <- pred_r_squared(my.lm)
pred.r.squared
# Example
# Set up some data
x <- seq(from=0, to=10, by=0.5)
y1 <- 2*x + rnorm(length(x))
# We want to compare two different linear models:
my.lm1 <- lm(y1 ~ sin(x))
my.lm2 <- lm(y1 ~ x)
# We will use plyr for this.
library(plyr)
# Now call model_fit_stats() for each lm model that
# we have, using ldply. This returns the results in
# a data frame that is easily used for further
# calculations.
ldply(list(my.lm1, my.lm2), model_fit_stats)
model_fit_stats <- function(linear.model) {
r.sqr <- summary(linear.model)$r.squared
adj.r.sqr <- summary(linear.model)$adj.r.squared
pre.r.sqr <- pred_r_squared(linear.model)
PRESS <- PRESS(linear.model)
return.df <- data.frame(r.squared = r.sqr, adj.r.squared = adj.r.sqr, pred.r.squared = pre.r.sqr, press = PRESS)
return(return.df)
}
pred_r_squared <- function(linear.model) {
#' Use anova() to get the sum of squares for the linear model
lm.anova <- anova(linear.model)
#' Calculate the total sum of squares
tss <- sum(lm.anova$'Sum Sq')
# Calculate the predictive R^2
pred.r.squared <- 1-PRESS(linear.model)/(tss)
return(pred.r.squared)
}
PRESS <- function(linear.model) {
#' calculate the predictive residuals
pr <- residuals(linear.model)/(1-lm.influence(linear.model)$hat)
#' calculate the PRESS
PRESS <- sum(pr^2)
return(PRESS)
}
# General multi plot function that is more flexible
data <- n2_data
vars <- data %>% select_if(is.numeric) %>% select(1:5) %>% colnames()
p1 <- data %>%
ggplot(aes(x = logerror)) +
geom_histogram(bins = nrow(data)^(1/3)) +
labs(title = "With outliers")
p2 <- prep_outlier(data) %>% # need to think about how to fix for outliers. Need variable to iterate. Maybe just make the second plot with no outliers.
ggplot(aes(x = logerror)) +
geom_histogram(bins = nrow(prep_outlier(data))^(1/3)) +
labs(title = "Without outliers")
# index plots
data <- n2_data %>%
select_if(is.numeric) %>%
select(1, 3:5) %>%
prep_rankColumn
x_vars <- data %>% select(contains("index")) %>% colnames()
y_vars <- setdiff(colnames(data), x_vars)
p3 <- data %>%
ggplot(aes_string(x = x_vars[1], y = y_vars[1])) +
geom_point(alpha = 1/12) +
labs(title = "With outliers")
p4 <- prep_outlier(data, y_vars[2]) %>%
ggplot(aes_string(x = x_vars[1], y = y_vars[1])) +
geom_point(alpha = 1/12) +
labs(title = "Without outliers")
explore_plot(plot1 = p1, x_vars = vars, plot2 = p2)
explore_plot(plot1 = p3, x_vars = x_vars, y_vars = y_vars, plot2 = p4)
RShowDoc('WVPlots_examples',package='WVPlots')
WVPlots::ScatterHist(prep_outlier(data, "logerror"), "taxamount", "logerror", smoothmethod="lm", "Linear fit")
WVPlots::ScatterHist(prep_outlier(data, "logerror"), "taxamount", "logerror", smoothmethod="identity", "Relation Plot")
data <- data %>% mutate(bathroomcnt = as.factor(bathroomcnt),
bedroomcnt = as.factor(bedroomcnt)
)
p5 <- WVPlots::ScatterBoxPlot(data, "bathroomcnt", "logerror", pt_alpha=0.002, title="Scatter/Box plot")
p5 + aes(x = bedroomcnt)
# Old Stuff ---------------------------------------------------------------
# Other plotting stuff
# "%+%" overrides the data for gggplot
# ggplot_build(plot2) looks at underlying data
# q <- ggplot_build(plot2)
# q$data[[1]]$colour <- "black" # change to different color
# q <- ggplot_gtable(q) build the table
# in ggvis use something like get_data
# Use class(plot1)[1] to determine the type of plot being used
# pltList <- list() Create a list of plots to reference later
# Annotation logic. Work on later.
# equation = function(x) {
# lm_coef <- list(a = round(coef(x)[1], digits = 2),
# b = round(coef(x)[2], digits = 2),
# r2 = round(summary(x)$r.squared, digits = 2));
# lm_eq <- substitute(italic(y) == a + b %.% italic(x)*","~~italic(R)^2~"="~r2,lm_coef)
# as.character(as.expression(lm_eq));
# }
# +
# annotate("rect", xmin = 0.00, xmax = 0.1, ymin = -0.056, ymax = -0.044, fill="white", colour="red") +
# annotate("text", x = 0.05, y = -0.05, label = equation(fit), parse = TRUE)
# Looking at transaction dates
# Eventually have this applied to any date fields.
plot1 <- train_data %>%
ggplot(aes(x = transactiondate)) +
geom_bar() +
labs(title = "By Date")
plot2 <- train_data %>%
mutate(month = as.factor(month(transactiondate))) %>%
ggplot(aes(x = month)) +
geom_bar() +
labs(title = "By Month")
plot3 <- train_data %>%
mutate(year = as.factor(year(transactiondate))) %>%
ggplot(aes(x = year)) +
geom_bar() +
labs(title = "By Year")
gridExtra::grid.arrange(plot1, plot2, plot3, nrow=3)
#### Pulling in correlation for all variables
treatment <- designTreatmentsZ(test, colnames(test), verbose = FALSE)
treated <- as_tibble(prepare(treatment, test))
treated <- select(treated, -contains("catP"))
treated_describe <- prep_describe(treated) #Need to add to the describe function some additional fields from the vtreat summary (like orignal name).
treated_describe <- treated_describe %>%
left_join(treatment$scoreFrame, c("variable" = "varName")) %>%
select(variable, origName, code, class, total_count, dist_count, dist_perc, null_count, mean)
treated_describe %>% print(n = Inf)
# rsq & sig are important when not doing the TreatmentsZ but another one.
correlations <- rcorr(as.matrix(treated))
testCor <- as.tibble(as.data.frame(correlations$r))
corTable <- testCor %>%
rownames_to_column(var = "variable") %>%
gather(-variable, key = "comparison_variable", value = "correlation") %>%
select(variable, comparison_variable, correlation, everything()) %>%
arrange(variable, desc(abs(correlation))) %>%
filter(variable != comparison_variable)
corTable <- corTable %>%
group_by(variable) %>%
mutate(avg_corr = mean(correlation),
max_corr = max(abs(correlation)),
rank_corr = row_number(desc(abs(correlation)))) %>%
ungroup()
corTable %>%
filter(max_corr == correlation) %>%
print(n = Inf)
# use this then to join on the other exploratory table analysis. Possilbe to pull the top 2 correlated variables if desired.
#### Decision tree models
trainIndex <- caret::createDataPartition(treated$logerror_clean,
p = .75,
list = FALSE,
times = 1)
dataTrain <- treated[trainIndex,]
dataTest <- treated[-trainIndex,]
dTrain <- xgb.DMatrix(data = as.matrix(dataTrain[,-1]), label = dataTrain$logerror_clean)
dTest <- xgb.DMatrix(data = as.matrix(dataTest[,-1]), label = dataTest$logerror_clean)
# Boosted Model
watchlist <- list(train=dTrain, test=dTest)
bst <- xgb.train(data=dTrain, nround=200, watchlist=watchlist, objective = "reg:linear",
eval_metric = "rmse", early_stopping_rounds = 5, print_every_n = 5, eta = .1)
bst_importance <- as.tibble(xgb.importance(model = bst))
# Random Forest Model (Useful for comparison purposes for correlated variables.)
rfm <- xgboost(data = dTrain, max.depth = 4, tree_method = "approx",
nthread = 10, num_parallel_tree = 200, subsample = 0.5, colsample_bytree =0.5,
nround = 1, objective = "reg:linear", metrics = "rmse")
rfm_importance <- as.tibble(xgb.importance(model = rfm))
### Pull from the Frequency column to join with the correlation data
# Linear Model (possibly include something here. Maybe be a duplicate of an earlier process.)
model2 <- xgboost(data = sparse_matrix, label = output_vector, booster = "gblinear", nround = 15,
alpha = .0001, lambda = 1, objective = "binary:logistic", metrics = "error")
####
explore_describe(n2_data, var_outcome) %>% print(n = Inf)
explore_describe <- function(data, var_outcome) {
num_data <- select_if(data, is.numeric)
summaryFns = list(
spearman = function(x) cor(x, select(n2_data, var_outcome), method = "spearman"),
pearson = function(x) cor(x, select(n2_data, var_outcome), method = "pearson"),
r_squared = function(x) cor(x, select(n2_data, var_outcome), method = "pearson")^2
# Pearson correlation evaluated the linear relationship. Spearman evaluates monotonic relationship (change together but not at the same rate).
# Spearman is looking at one type of non-linear relationship.
)
summary_data <- as.data.frame(sapply(summaryFns, function(fn){num_data %>% summarise_all(fn)}))
if (ncol(summary_data) == 0) {
summary_data <- tibble(
variable = "none"
)
} else {
summary_data <- summary_data %>%
rownames_to_column(var = "variable") %>%
unnest()
}
lm <- tidy(lm(paste(var_outcome, "~ ."), data = data))
describe <- summary_data %>%
full_join(lm, by = c("variable" = "term")) %>%
dplyr::rename(linear_std.error = std.error, linear_p.value = p.value) %>%
select(variable, spearman, pearson, r_squared, linear_p.value) %>%
filter(variable != "(Intercept)") %>%
as.tibble
return(describe)
}
# Misc Notes --------------------------------------------------------------
# Parallel Notes Section --------------------------------------------------------
# library("parallel")
#
# # max(1, detectCores() - 1)
#
# # useful windows function for multi cores: memory.limit()/memory.size()
#
# # parallel tips: http://gforge.se/2015/02/how-to-go-parallel-in-r-basics-tips/
#
# # for mac
# cl <- makeCluster(detectCores(), type = "FORK")
#
# # for windows (doesn't seem worth it to try this method)
# cl <- makeCluster(detectCores(), type = "PSOCK")
# clusterEvalQ(cl, {library(tidyverse); library(dplyr); library(moments)})
# clusterExport(cl, list("data", "summaryFnsAll"))
#
# parSapply(cl, summaryFnsAll, function(fn){data %>% summarise_all(fn)})
#
# stopCluster(cl)
|
a6ee2e44aa0cb045c1f816b6a792a5ce733f7a2f
|
2e731f06724220b65c2357d6ce825cf8648fdd30
|
/dexterMST/inst/testfiles/mutate_booklet_score/libFuzzer_mutate_booklet_score/mutate_booklet_score_valgrind_files/1612727887-test.R
|
7bec56a377ecf954633094a44eddd0ecd2ed7ce3
|
[] |
no_license
|
akhikolla/updatedatatype-list1
|
6bdca217d940327d3ad42144b964d0aa7b7f5d25
|
3c69a987b90f1adb52899c37b23e43ae82f9856a
|
refs/heads/master
| 2023-03-19T11:41:13.361220
| 2021-03-20T15:40:18
| 2021-03-20T15:40:18
| 349,763,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 725
|
r
|
1612727887-test.R
|
testlist <- list(id = NULL, score = NULL, id = NULL, booklet_id = -1L, item_score = c(16777215L, -8816263L, 2038004089L, 2030698666L, 1051377666L, -15663175L, -1183008511L, 0L, 4351L, -1179026247L, 16777216L, 121L, 2038004089L, 2038004089L, 2038004089L, 2038004089L, 2038004089L, 2038004089L, 2037972992L, 1210745L, 2038004089L, 2038004089L, 16711935L, 2038004089L, 2038004089L, 2038004089L, 2037984043L, 724249579L, 539365631L, 232325120L, 0L, 0L, 32768L, -2130706433L, -254L, 16714240L, -1438733632L, 50270463L, -1179026247L, -61991L, -640034376L, -1195853640L, -1195853640L, -1195853640L, -1195845159L, -1L), person_id = c(-640034343L, NA))
result <- do.call(dexterMST:::mutate_booklet_score,testlist)
str(result)
|
0cc591591dd3d329588f1ed83cf8d93361ae45e5
|
01a33c3170bf018372ee3fc7e77ee8dd52d028e5
|
/gbd/get_summary_files.R
|
8cc7bf30de1f2238e5a8596aeaa05e3b658555c7
|
[] |
no_license
|
deepajag/gbdeppaiml
|
0adcc098c0e9436e39232a70f1ed0eca7400c568
|
3a21fd940d8a0a03847f59dd57de5a07750c2533
|
refs/heads/master
| 2021-09-09T22:06:25.669158
| 2021-09-03T17:17:49
| 2021-09-03T17:17:49
| 212,451,317
| 0
| 1
| null | 2019-10-02T22:15:53
| 2019-10-02T22:15:53
| null |
UTF-8
|
R
| false
| false
| 1,330
|
r
|
get_summary_files.R
|
## ---------------------------
## Script name:
## Purpose of script:
##
## Author: Maggie Walters
## Date Created: 2018-04-11
## Email: mwalte10@uw.edu
## ---------------------------
##
## Notes:
##
##
## ---------------------------
## Used in basically every script
Sys.umask(mode = "0002")
windows <- Sys.info()[1][["sysname"]]=="Windows"
root <- ifelse(windows,"J:/","/home/j/")
user <- ifelse(windows, Sys.getenv("USERNAME"), Sys.getenv("USER"))
eppasm_dir <- paste0(ifelse(windows, "H:", paste0("/ihme/homes/", user)), "/eppasm/")
setwd(eppasm_dir)
devtools::load_all()
gbdeppaiml_dir <- paste0(ifelse(windows, "H:", paste0("/ihme/homes/", user)), "/gbdeppaiml/")
setwd(gbdeppaiml_dir)
devtools::load_all()
library(parallel)
args <- commandArgs(trailingOnly = TRUE)
if(length(args) > 0) {
run.name = args[1]
loc = args[2]
} else {
run.name = '2020_ind_test_agg4'
loc = 'IND_4856'
}
loc.table <- get_locations(gbd_year = 2020, hiv_metadata = T)
if(file.exists(paste0('/ihme/hiv/epp_input/gbd20/',run.name,'/array_table.csv'))){
array.dt <- fread(paste0('/ihme/hiv/epp_input/gbd20/',run.name,'/array_table.csv'))
locs <- unique(array.dt[,loc_scalar])
}else{
locs <- loc.table[epp == 1, ihme_loc_id]
}
get_summary(loc, run.name = run.name, gbdyear = 'gbd20',
paediatric = T, old.splits = F)
|
4656be336d251e9acf3320d0574f90d8b5c7ca63
|
ec7be542fd7b75e5741bbf5b0605f1e993d1733a
|
/R/plot_feature.R
|
1be500d34269169b73a8e05894c53db327cdabeb
|
[] |
no_license
|
czhu/R_nanopore
|
d2b67d50005ce7468b1da7fa13de6b93045e8954
|
4e13e92e104a5ba2c6a1c772f077ef15ea199193
|
refs/heads/master
| 2023-07-14T11:02:16.083619
| 2021-08-26T15:29:14
| 2021-08-26T15:29:14
| 98,747,970
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,162
|
r
|
plot_feature.R
|
## from tiling array package
# this function sets up a new viewport. It is used by plotAlongChromLegend,
# plotSegmentationHeatmap and plotSegmentationDots when they are called as
# stand-alone functions (ie when vpr is not specified)
new_vp = function(main, cexMain=1, dataPanelHeight=1, vpHeight=0.7, titleOffSet=0) {
if(!missing(main)) {
vpr = c("title"=0.1, "data"=dataPanelHeight)
pushViewport(viewport(width=0.85, height=vpHeight)) ## plot margin
pushViewport(viewport(layout=grid.layout(length(vpr), 1, heights=vpr)))
pushViewport(viewport(layout.pos.col=1, layout.pos.row=which(names(vpr)=="title")))
grid.text(label=main, x=0.5, y=1.1+titleOffSet, just="centre", gp=gpar(cex=cexMain))
popViewport()
} else {
vpr = c("data"=dataPanelHeight)
pushViewport(viewport(width=0.85, height=vpHeight)) ## plot margin
pushViewport(viewport(layout=grid.layout(length(vpr), 1, heights=vpr)))
}
return(which(names(vpr)=="data"))
}
plot_coord = function(coord, vpr, center=FALSE) {
if(missing(vpr)) {
vpr = new_vp()
}
fontcex = 0.5
############ draw coord
pushViewport(dataViewport(xData=coord, yscale=c(0,0.3), extension=0, clip="off",
layout.pos.col=1, layout.pos.row=vpr))
segmentsHeight = 0.08 ## native
if(center){
y0 = (0.3 - convertY(unit(get.gpar("lwd")$lwd + get.gpar("fontsize")$fontsize * fontcex, "points"), "native", valueOnly=TRUE) - segmentsHeight)/2
} else{
y0 = 0
}
grid.lines(coord, c(y0,y0), default.units = "native")
tck = alongChromTicks(coord)
grid.text(label=formatC(tck, format="d"), x = tck, y = y0 + 0.1,
just = c("centre", "bottom"), gp = gpar(cex=fontcex), default.units = "native")
grid.segments(x0 = tck, x1 = tck, y0 = y0, y1 = y0 + segmentsHeight, default.units = "native")
popViewport()
}
### this is core plotting function for plotting granges with exons (in blocks slot)
### FIXME: x could be GenomicRangesList, where each element in a list is a exon, this would be more general
plot_feature_vpr = function(x, vpr, coord, lineWidth, featureCols="steelblue", featureAlpha=1, featureHeight=10,
doLine=TRUE, lineAlpha=0.5, lineType= "dotted", plotBottomToTop = FALSE,
spaceBetweenFeatures, center=FALSE, keepOrder=FALSE, textLabelFront, textLabelFrontFontSize=6, scaleFeatureHeightToVP=FALSE) {
## x is a GRanges object with blocks
## conivence functon to call plot_feature with vpr
if(missing(vpr)) {
vpr = new_vp()
}
if(missing(coord)) {
coord = c(min(start(x)), max(end(x)))
}
pushViewport(
dataViewport(xData=coord, yscale=c(0,1), extension=0, clip="off",
layout.pos.col=1,layout.pos.row=vpr)
)
plot_feature(x=x, coord=coord, lineWidth=lineWidth,
featureCols=featureCols, featureAlpha=featureAlpha, featureHeight=featureHeight,
doLine=doLine, lineAlpha=lineAlpha, lineType= lineType,
plotBottomToTop=plotBottomToTop, spaceBetweenFeatures=spaceBetweenFeatures, center=center,keepOrder=keepOrder, scaleFeatureHeightToVP=scaleFeatureHeightToVP)
## FIXME replace this by plot_feature_text with side = 1 once it's implemented
if(!missing(textLabelFront)){
s = as.character(textLabelFront)
grid.text(s,
x= unit(convertX(unit(median(start(x)),"native"),"points",
valueOnly=TRUE)- textLabelFrontFontSize,"points"),
0.5,
just=c("right","center"),gp=gpar(fontsize=textLabelFrontFontSize))
}
popViewport()
}
plot_feature = function(x, coord, lineWidth, featureCols="steelblue", featureAlpha=1, featureHeight=10, featureBorderColor=NA,
doLine=TRUE, lineAlpha=0.5, lineType= "dotted", plotBottomToTop = FALSE, plotNames,
spaceBetweenFeatures, center=FALSE,keepOrder=FALSE, scaleFeatureHeightToVP=FALSE) {
## key function used to plot read and tx annotation
## x is a GRanges object with blocks
## featureHeight does not include spaceBetweenFeatures !!!
## plotBottomToTop TRUE for "+" strand FALSE for minus strand
## center: if the whole plotting should be centered in the view area
## scaleFeatureToVP: should the featureHeight be scaled according to the viewport's area
if(missing(coord)) {
coord = c(min(start(x)), max(end(x)))
}
thisMaxHeight = convertHeight(unit(1,"npc"),"points",valueOnly=TRUE)
if(keepOrder){
mybins = seq_len(length(x))
} else{
mybins = disjointBins(x, ignore.strand=TRUE)
}
nfeature = max(mybins)
## if spaceBetweenFeatures not defined, 1/8 featureHeight as spacing the rest as new featureHeight
if(missing(spaceBetweenFeatures)) {
spaceBetweenFeatures = featureHeight/8
featureHeightWithSpacing = featureHeight
featureHeight = featureHeightWithSpacing - spaceBetweenFeatures
}
if(scaleFeatureHeightToVP) {
## scaling avoids overflow i.e. drawing space cannot acommodate this many feature given the size
featureHeightSpacingRatio = featureHeight / (spaceBetweenFeatures + featureHeight)
featureHeightWithSpacing = thisMaxHeight / nfeature
featureHeight = featureHeightWithSpacing * featureHeightSpacingRatio
spaceBetweenFeatures = featureHeightWithSpacing - featureHeight
} else {
featureHeightWithSpacing = featureHeight + spaceBetweenFeatures
}
marginSpace = thisMaxHeight - featureHeightWithSpacing * nfeature
# if(marginSpace<0){
# warning("Plot exceeds the viewport. Consider using scaleFeatureHeightToVP.")
# }
myfeature = blocks(x)
myx = unlist(start(myfeature))
## for - strand stack top to bottom, for + strand bottom to top
if(plotBottomToTop){### usually for "+" strand
yPerRead = (mybins-1) * featureHeightWithSpacing
} else{ ## usually for "-" strand
## we omit -1 here because grid.rect is left bottom justed
yPerRead = thisMaxHeight - mybins * featureHeightWithSpacing
}
yPerRead = yPerRead + spaceBetweenFeatures/2
if(center) {
yPerRead = yPerRead + sign(plotBottomToTop-0.5) * marginSpace/2
}
nFeatureEach = lengths(myfeature)
myy = rep(yPerRead, nFeatureEach)
if (length(featureCols)>1) {
if( !(length(x) == length(featureCols)) )
stop("featureCols should have the same length as x or 1\n")
lineCols = rep(featureCols, nFeatureEach-1)
featureCols = rep(featureCols, nFeatureEach)
} else {
lineCols = featureCols
}
grid.rect(myx, unit(myy,"points"), width=unlist(width(myfeature)), height=unit(featureHeight, "points"), gp=gpar(col=featureBorderColor, fill=featureCols, alpha=featureAlpha), default.units="native", just=c("left","bottom"))
## FIXME: wrap this as a function draw_line ?
cumLength = cumsum(elementNROWS(myfeature))
myxStart = unlist(end(myfeature))[-cumLength]
## lapply(end(myfeature),function(x) x[-length(x)])
myxEnd = unlist(start(myfeature))[-c(1,cumLength[-length(cumLength)]+1)]
if(doLine & length(c(myxStart,myxEnd))>0){
#penaltyFactorReadNumber = (1/log10(plotDat$param$normCountMat[txIdx,vpCol]))^2
## lapply(start(myfeature),function(x) x[-1])
myyLine = c(rep(yPerRead, nFeatureEach-1), rep(yPerRead, nFeatureEach-1))
grid.polyline(
x = unlist(c(myxStart,myxEnd)),
y = unit(myyLine + featureHeight/2, "points"),
id = rep(1:length(unlist(myxStart)),2),
gp=gpar(col=lineCols,
lwd=if(missing(lineWidth)) unit(min(1,featureHeight/10),"points") else {
unit(lineWidth,"points")},
## FIXME scale alpha depending on the number of reads
alpha=lineAlpha,lty=lineType), ##lex=1/penaltyFactorReadNumber),
default.units = "native")
}
if(!missing(plotNames)){
stop("plotNames is deprecated. The functionality is replaced by plot_feature_text")
}
}
# x = IRanges(start=c(10,40,20),end=c(20,50,80))
# mytext = c("a","b","c")
# pushViewport(viewport(width=0.5,height=0.5))
# pushViewport(dataViewport(xData=c(10,100), yscale=c(0,1), extension=0, clip="off"))
# grid.rect()
# plot_feature_text(x,mytext,fontsize=20,debug=TRUE,plotBottomToTop=FALSE)
plot_feature_text = function(x, text, fontsize=12, side=0, col="black",just="center", xjust=NULL, yjust=NULL, plotBottomToTop=TRUE, spacing = 0, debug=FALSE){
## side: 0 center,1, left, 2,top 3,right 4 bottom
## xjust, yjust, shift in x or y if defined, currently ignored
## textSize: text height in points
## spacing: extra space to the board
mybins = disjointBins(x)
featureHeight = fontsize
if( ! side %in% seq(0,4) ) stop("side can be only 0 (center), 1,2,3,4 ")
myx = start(x)
## for - strand stack top to bottom, for + strand bottom to top
myy = if(plotBottomToTop){ ### usually for "+" strand
(mybins-1) * featureHeight
} else { ## usually for "-" strand
convertHeight(unit(1,"npc"),"points",valueOnly=TRUE) - mybins * featureHeight
}
if(debug){
grid.rect(myx, unit(myy,"points"), width=width(x),
height=unit(featureHeight,"points"), gp=gpar(col = "black" , fill = NA),
default.units="native", just=c("left","bottom"))
}
## for side other than 0 play with 1 strwidth and strheight
## use signif to make sure there are not too many digits after converting from npc to points
if( length(just) ==1 )
just = rep(just, 2)
x_native_to_points = function(xloc) {convertX(unit(xloc , "native"), "points", valueOnly=TRUE)}
## default in the center
xtext = x_native_to_points((start(x) + end(x))/2)
ytext = myy + featureHeight/2
if (side == 1) { ## left
xtext = x_native_to_points(start(x)) - spacing
just[1] = "right"
} else if (side == 2) {
ytext = myy + featureHeight + spacing
just[2] = "bottom"
} else if (side == 3) {
xtext = x_native_to_points(end(x)) + spacing
just[1] = "left"
} else if (side == 4) {
ytext = myy - spacing
just[2] = "top"
}
grid.text(text, x =unit(xtext,"points"), y = unit(ytext,"points"),just=just,
hjust = xjust, vjust = yjust, gp = gpar(col=col,fontsize=fontsize))
}
plot_feature_text_vpr = function(x, text, vpr,coord, fontsize=12,side=0, col="black", just = "center", xjust=NULL, yjust=NULL, spacing = 0, plotBottomToTop=TRUE, debug=FALSE) {
## x is a GRanges object with blocks
## conivence functon to call plot_feature with vpr
if(missing(vpr)) {
vpr = new_vp()
}
if(missing(coord)) {
coord = c(min(start(x)), max(end(x)))
}
pushViewport(
dataViewport(xData=coord, yscale=c(0,1), extension=0, clip="off",
layout.pos.col=1,layout.pos.row=vpr))
plot_feature_text(x=x,text=text,fontsize=fontsize,side=side, col=col, just=just, xjust=xjust, yjust=yjust, spacing=spacing, plotBottomToTop=plotBottomToTop,debug=debug)
popViewport()
}
|
5cce52caced8d4bf96803fc039ae64e4633b3412
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.networking/man/servicediscovery_get_instances_health_status.Rd
|
cd3b777f9c3cf0ecc6360943cbd164b8418948a0
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 2,608
|
rd
|
servicediscovery_get_instances_health_status.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/servicediscovery_operations.R
\name{servicediscovery_get_instances_health_status}
\alias{servicediscovery_get_instances_health_status}
\title{Gets the current health status (Healthy, Unhealthy, or Unknown) of one
or more instances that are associated with a specified service}
\usage{
servicediscovery_get_instances_health_status(ServiceId, Instances,
MaxResults, NextToken)
}
\arguments{
\item{ServiceId}{[required] The ID of the service that the instance is associated with.}
\item{Instances}{An array that contains the IDs of all the instances that you want to get
the health status for.
If you omit \code{Instances}, AWS Cloud Map returns the health status for all
the instances that are associated with the specified service.
To get the IDs for the instances that you've registered by using a
specified service, submit a
\code{\link[=servicediscovery_list_instances]{list_instances}} request.}
\item{MaxResults}{The maximum number of instances that you want AWS Cloud Map to return in
the response to a
\code{\link[=servicediscovery_get_instances_health_status]{get_instances_health_status}}
request. If you don't specify a value for \code{MaxResults}, AWS Cloud Map
returns up to 100 instances.}
\item{NextToken}{For the first
\code{\link[=servicediscovery_get_instances_health_status]{get_instances_health_status}}
request, omit this value.
If more than \code{MaxResults} instances match the specified criteria, you
can submit another
\code{\link[=servicediscovery_get_instances_health_status]{get_instances_health_status}}
request to get the next group of results. Specify the value of
\code{NextToken} from the previous response in the next request.}
}
\value{
A list with the following syntax:\preformatted{list(
Status = list(
"HEALTHY"|"UNHEALTHY"|"UNKNOWN"
),
NextToken = "string"
)
}
}
\description{
Gets the current health status (\code{Healthy}, \code{Unhealthy}, or \code{Unknown}) of
one or more instances that are associated with a specified service.
There is a brief delay between when you register an instance and when
the health status for the instance is available.
}
\section{Request syntax}{
\preformatted{svc$get_instances_health_status(
ServiceId = "string",
Instances = list(
"string"
),
MaxResults = 123,
NextToken = "string"
)
}
}
\examples{
\dontrun{
# This example gets the current health status of one or more instances
# that are associate with a specified service.
svc$get_instances_health_status(
ServiceId = "srv-e4anhexample0004"
)
}
}
\keyword{internal}
|
f75dcf06328b3aa11f8f2923d034817684732a31
|
8c374f8b433c33bd2989a5cd66c6dff601208efa
|
/R/forest_plot_1-to-many.R
|
22b524e244a1fa77938a76b1f623e96385e7b67a
|
[
"MIT"
] |
permissive
|
MRCIEU/TwoSampleMR
|
2514d01692c95db1e9fbe23f8696e99a12c6ab34
|
592ebe05538558b330c39ddeda0d11b1313ad819
|
refs/heads/master
| 2023-08-29T22:47:33.163801
| 2023-05-29T20:46:39
| 2023-05-29T20:46:39
| 49,515,156
| 277
| 160
|
NOASSERTION
| 2023-06-13T00:24:11
| 2016-01-12T16:57:46
|
R
|
UTF-8
|
R
| false
| false
| 25,908
|
r
|
forest_plot_1-to-many.R
|
#' Format MR results for a 1-to-many forest plot
#'
#' This function formats user-supplied results for the [forest_plot_1_to_many()] function.
#' The user supplies their results in the form of a data frame.
#' The data frame is assumed to contain at least three columns of data:
#' \enumerate{
#' \item effect estimates, from an analysis of the effect of an exposure on an outcome;
#' \item standard errors for the effect estimates; and
#' \item a column of trait names, corresponding to the 'many' in a 1-to-many forest plot.
#' }
#'
#' @param mr_res Data frame of results supplied by the user.
#' @param b Name of the column specifying the effect of the exposure on the outcome. Default = `"b"`.
#' @param se Name of the column specifying the standard error for b. Default = `"se"`.
#' @param TraitM The column specifying the names of the traits. Corresponds to 'many' in the 1-to-many forest plot. Default=`"outcome"`.
#' @param addcols Name of any additional columns to add to the plot. Character vector. The default is `NULL`.
#' @param by Name of the column indicating a grouping variable to stratify results on. Default=`NULL`.
#' @param exponentiate Convert log odds ratios to odds ratios? Default=`FALSE`.
#' @param ao_slc Logical; retrieve trait subcategory information using [available_outcomes()]. Default=`FALSE`.
#' @param weight The default is `NULL`.
#'
#' @export
#' @return data frame.
format_1_to_many <- function(mr_res, b="b",se="se",exponentiate=FALSE, ao_slc=FALSE,by=NULL,TraitM="outcome",addcols=NULL,weight=NULL)
{
if(!is.null(by)){
mr_res<-mr_res[,names(mr_res)!="subcategory"]
names(mr_res)[names(mr_res)==by]<-"subcategory"
}else{
mr_res$subcategory<-""
}
if(is.null(weight)) {
mr_res$weight=3
}
if(TraitM=="exposure"){ #the plot function currently tries to plot separate plots for each unique exposure. This is a legacy of the original multiple exposures forest plot function and needs to be cleaned up. The function won't work if the TraitM column is called exposure
names(mr_res)[names(mr_res)=="exposure"]<-"TraitM"
TraitM<-"TraitM"
}
names(mr_res)[names(mr_res)==b ]<-"b"
names(mr_res)[names(mr_res)==se ]<-"se"
Letters<-c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z")
Letters<-sort(c(paste0("A",Letters),paste0("B",Letters),paste0("C",Letters),paste0("D",Letters)))
mr_res$outcome2<-mr_res[,TraitM]
mr_res[,TraitM]<-paste(Letters[1:length(mr_res[,TraitM])],mr_res[,TraitM])
mr_res$subcategory<-trim(mr_res$subcategory)
mr_res$exposure<-""
# Get extra info on outcomes
if(ao_slc)
{
ao <- available_outcomes()
ao$subcategory[ao$subcategory == "Cardiovascular"] <- "Cardiometabolic"
ao$subcategory[ao$trait == "Type 2 diabetes"] <- "Cardiometabolic"
names(ao)[names(ao) == "nsnp"]<-"nsnp.array"
}
dat<-mr_res
dat$index <- 1:nrow(dat)
if(ao_slc)
{
dat <- merge(dat, ao, by.x="id.outcome", by.y="id")
}
dat <- dat[order(dat$b), ]
# Create CIs
dat$up_ci <- as.numeric(dat$b) + 1.96 * as.numeric(dat$se)
dat$lo_ci <- as.numeric(dat$b) - 1.96 * as.numeric(dat$se)
# Exponentiate?
if(exponentiate)
{
dat$b <- exp(as.numeric(dat$b))
dat$up_ci <- exp(dat$up_ci)
dat$lo_ci <- exp(dat$lo_ci)
}
# Organise cats
dat$subcategory <- as.factor(dat$subcategory)
if(!ao_slc) #generate a simple trait column. this contains only the outcome name (ie excludes consortium and year from the outcome column generated by mr()). This step caters to the possibility that a user's results contain a mixture of results obtained via MR-Base and correspondence. The later won't be present in the MR-Base database. However, still need to split the outcome name into trait, year and consortium.
{
dat$trait<-as.character(dat[,TraitM])
Pos<-grep("\\|\\|",dat$trait) #this indicates the outcome column was derived from data in MR-Base. Sometimes it wont look like this e.g. if the user has supplied their own outcomes
if(sum(Pos)!=0)
{
Outcome<-dat$trait[Pos]
Outcome<-unlist(strsplit(Outcome,split="\\|\\|"))
Outcome<-Outcome[seq(1,length(Outcome),by=2)]
Outcome<-trim(Outcome)
dat$trait[Pos]<-Outcome
}
}
dat1 <- data.frame(
exposure = as.character(dat$exposure),
outcome = as.character(dat$trait),
outcome2= as.character(dat$outcome2),
category = as.character(dat$subcategory),
effect = dat$b,
se = dat$se,
up_ci = dat$up_ci,
lo_ci = dat$lo_ci,
index = dat$index,
weight=dat$weight,
stringsAsFactors = FALSE
)
if(!is.null(addcols)){
dat2<-dat[,addcols]
dat<-cbind(dat1,dat2)
if(length(addcols)==1){
names(dat)[names(dat)=="dat2"]<-addcols
}
}else{
dat<-dat1
}
exps <- unique(dat$exposure)
dat <- dat[order(dat$index), ]
dat <- dat[order(dat$outcome), ]
return(dat)
}
#' Sort results for 1-to-many forest plot
#'
#' This function sorts user-supplied results for the [forest_plot_1_to_many()] function. The user supplies their results in the form of a data frame.
#'
#' @param mr_res Data frame of results supplied by the user.
#' @param b Name of the column specifying the effect of the exposure on the outcome. The default is `"b"`.
#' @param trait_m The column specifying the names of the traits. Corresponds to 'many' in the 1-to-many forest plot. The default is `"outcome"`.
#' @param group Name of grouping variable in `mr_res`.
#' @param priority If `sort_action = 3`, choose which value of the `trait_m` variable should be given priority and go above the other `trait_m` values.
#' The trait with the largest effect size for the prioritised group will go to the top of the plot.
#' @param sort_action Choose how to sort results.
#' \itemize{
#' \item `sort_action = 1`: sort results by effect size within groups. Use the group order supplied by the user.
#' \item `sort_action = 2`: sort results by effect size and group. Overides the group ordering supplied by the user.
#' \item `sort_action = 3`: group results for the same trait together (e.g. multiple results for the same trait from different MR methods).
#' \item `sort_action = 4`: sort by decreasing effect size (largest effect size at top and smallest at bottom).
#' \item `sort_action = 5`: sort by increasing effect size (smallest effect size at top and largest at bottom).
#' }
#'
#' @export
#' @return data frame.
#'
sort_1_to_many <- function(mr_res,b="b",trait_m="outcome",sort_action=4,group=NULL,priority=NULL){
mr_res[,trait_m]<-as.character(mr_res[,trait_m])
mr_res[,group]<-as.character(mr_res[,group])
if(!b %in% names(mr_res)) warning("Column with effect estimates not found. Did you forget to specify the column of data containing your effect estimates?")
if(sort_action==1){
if(is.null(group)) warning("You must indicate a grouping variable")
# Numbers<-1:100
Letters<-c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z")
Letters<-sort(c(paste0("A",Letters),paste0("B",Letters),paste0("C",Letters)))
groups<-unique(mr_res[,group])
mr_res$Index<-unlist(lapply(1:length(unique(mr_res[,group])),FUN=function(x) rep(Letters[Letters==Letters[x]],length(which(mr_res[,group]==groups[x])))))
mr_res<-mr_res[order(mr_res[,b],decreasing=TRUE),]
mr_res$Index2<-Letters[1:nrow(mr_res)]
mr_res$Index3<-paste(mr_res$Index,mr_res$Index2,sep="")
mr_res<-mr_res[order(mr_res$Index3),]
mr_res<-mr_res[,!names(mr_res) %in% c("Index","Index2","Index3")]
}
if(sort_action ==2){
if(is.null(group)) warning("You must indicate a grouping variable")
mr_res<-mr_res[order(mr_res[,b],decreasing=TRUE),]
mr_res<-mr_res[order(mr_res[,group]),]
}
if(sort_action==3){
if(is.null(group)) warning("You must indicate a grouping variable")
if(is.null(priority)) warning("You must indicate which value of the grouping variable ",group," to use as the priority value")
mr_res$b.sort<-NA
mr_res1<-mr_res[mr_res[,group] %in% mr_res[,group][duplicated(mr_res[,group])],]
mr_res2<-mr_res[!mr_res[,group] %in% mr_res[,group][duplicated(mr_res[,group])],]
mr_res1$b.sort[mr_res1[,trait_m]==priority]<-mr_res1[,b][mr_res1[,trait_m]==priority]
# mr_res1$b.sort[mr_res1[,group]==priority]<-1000
for(i in unique(mr_res1[,group]))
{
mr_res1$b.sort[mr_res1[,group] == i & is.na(mr_res1$b.sort)]<-mr_res1$b.sort[mr_res1[,group]== i & !is.na(mr_res1$b.sort)]
}
# mr_res1$b.sort[is.na(mr_res1$b.sort)]<-mr_res1$b.sort[!is.na(mr_res1$b.sort)]
mr_res2$b.sort<-mr_res2$b
mr_res<-rbind(mr_res1,mr_res2)
mr_res<-mr_res[order(mr_res$b.sort,decreasing=TRUE),]
groups<-unique(mr_res[,group])
List<-NULL
for(i in 1:length(groups)){
Test<-mr_res[mr_res[,group]==groups[i],]
Test1<-Test[Test[,trait_m] != priority,]
Test2<-Test[Test[,trait_m] == priority,]
List[[i]]<-rbind(Test2,Test1)
}
mr_res<-do.call(rbind,List)
}
if(sort_action ==4){
mr_res<-mr_res[order(mr_res[,b],decreasing=TRUE),]
}
if(sort_action ==5){
mr_res<-mr_res[order(mr_res[,b],decreasing=FALSE),]
}
return(mr_res)
}
#' A basic forest plot
#'
#' This function is used to create a basic forest plot.
#' It requires the output from [format_1_to_many()].
#'
#' @param dat Output from [format_1_to_many()]
#' @param section Which category in dat to plot. If `NULL` then prints everything.
#' @param colour_group Which exposure to plot. If `NULL` then prints everything grouping by colour.
#' @param colour_group_first The default is `TRUE`.
#' @param xlab x-axis label. Default=`NULL`.
#' @param bottom Show x-axis? Default=`FALSE`.
#' @param trans x-axis scale.
#' @param xlim x-axis limits.
#' @param lo Lower limit of x axis.
#' @param up Upper limit of x axis.
#' @param subheading_size text size for the subheadings. The subheadings correspond to the values of the section argument.
#' @param colour_scheme the general colour scheme for the plot. Default is to make all text and data points `"black"`.
#' @param shape_points the shape of the data points to pass to [ggplot2::geom_point()]. Default is set to `15` (filled square).
#'
#' @return ggplot object
forest_plot_basic2 <- function(dat, section=NULL, colour_group=NULL, colour_group_first=TRUE, xlab=NULL, bottom=TRUE, trans="identity", xlim=NULL, lo=lo,up=up,subheading_size=subheading_size,colour_scheme="black",shape_points=15)
{
if(bottom)
{
text_colour <- ggplot2::element_text(colour="black")
tick_colour <- ggplot2::element_line(colour="black")
xlabname <- xlab
} else {
text_colour <- ggplot2::element_blank()
tick_colour <- ggplot2::element_blank()
xlabname <- NULL
}
# OR or log(OR)?
# If CI are symmetric then log(OR)
# Use this to guess where to put the null line
null_line <- ifelse(all.equal(dat$effect - dat$lo_ci, dat$up_ci - dat$effect) == TRUE, 0, 1)
# Change lab
if(!is.null(xlim))
{
stopifnot(length(xlim) == 2)
stopifnot(xlim[1] < xlim[2])
dat$lo_ci <- pmax(dat$lo_ci, xlim[1], na.rm=TRUE)
dat$up_ci <- pmin(dat$up_ci, xlim[2], na.rm=TRUE)
}
if(is.null(up) | is.null(lo) ){
up <- max(dat$up_ci, na.rm=TRUE)
lo <- min(dat$lo_ci, na.rm=TRUE)
}
r <- up-lo
lo_orig <- lo
lo <- lo - r * 0.5
if(!is.null(section))
{
dat <- subset(dat, category==section)
main_title <- section
} else {
main_title <- ""
}
if(!is.null(colour_group))
{
dat <- subset(dat, exposure == colour_group)
point_plot <- ggplot2::geom_point(size=dat$weight,colour=colour_scheme,fill=colour_scheme,shape=shape_points)
} else {
point_plot <- ggplot2::geom_point(ggplot2::aes(colour=colour_scheme), size=dat$weight,fill=colour_scheme)
}
if((!is.null(colour_group) & colour_group_first) | is.null(colour_group))
{
outcome_labels <- ggplot2::geom_text(ggplot2::aes(label=outcome2,colour=colour_scheme), x=lo, y=mean(c(1, length(unique(dat$exposure)))), hjust=0, vjust=0.5, size=2.5)
main_title <- ifelse(is.null(section), "", section)
title_colour <- "black"
} else {
outcome_labels <- NULL
lo <- lo_orig
main_title <- ""
title_colour <- "white"
}
main_title <- section
dat$lab<-dat$outcome
l <- data.frame(lab=sort(unique(dat$lab)), col="a", stringsAsFactors=FALSE)
l$col[1:nrow(l) %% 2 == 0] <- "b"
dat <- merge(dat, l, by="lab", all.x=TRUE)
dat <- dat[nrow(dat):1, ]
p <-ggplot2::ggplot(dat, ggplot2::aes(x=effect, y=exposure)) +
ggplot2::geom_rect(ggplot2::aes(fill=col), colour=colour_scheme,xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
ggplot2::geom_vline(xintercept=seq(ceiling(lo_orig), ceiling(up), by=0.5), alpha=0, size=0.3) +
ggplot2::geom_vline(xintercept=null_line, colour="#333333", size=0.3) +
# ggplot2::geom_errorbarh(ggplot2::aes(xmin=lo_ci, xmax=up_ci), height=0, size=0.4, colour="#aaaaaa") +
ggplot2::geom_errorbarh(ggplot2::aes(xmin=lo_ci, xmax=up_ci), height=0, size=0.4, colour=colour_scheme) +
# ggplot2::geom_point(colour="black", size=2.2) +
ggplot2::geom_point(colour=colour_scheme, size=2.2,shape=shape_points,fill=colour_scheme) +
# ggplot2::scale_fill_manual(values="cyan4")+
point_plot +
ggplot2::facet_grid(lab ~ .) +
ggplot2::scale_x_continuous(trans=trans, limits=c(lo, up)) +
ggplot2::scale_colour_brewer(type="qual") +
# ggplot2::scale_fill_manual(values=c("#eeeeee", "#ffffff"), guide=FALSE) +
ggplot2::scale_fill_manual(values=c("#eeeeee", "#ffffff"), guide=FALSE) +
ggplot2::theme(
axis.line=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
axis.text.x=text_colour,
axis.ticks.x=tick_colour,
# strip.text.y=ggplot2::element_text(angle=360, hjust=0),
strip.background=ggplot2::element_rect(fill="white", colour="white"),
strip.text=ggplot2::element_text(family="Courier New", face="bold", size=9),
legend.position="none",
legend.direction="vertical",
panel.grid.minor.x=ggplot2::element_blank(),
panel.grid.minor.y=ggplot2::element_blank(),
panel.grid.major.y=ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust = 0, size=subheading_size, colour=title_colour),
plot.margin=ggplot2::unit(c(2,3,2,0), units="points"),
plot.background=ggplot2::element_rect(fill="white"),
panel.spacing=ggplot2::unit(0,"lines"),
panel.background=ggplot2::element_rect(colour="white", fill=colour_scheme, size=1),
strip.text.y = ggplot2::element_blank()
# strip.background = ggplot2::element_blank()
) +
ggplot2::labs(y=NULL, x=xlabname, colour="", fill=NULL, title=main_title) +
outcome_labels
return(p)
}
forest_plot_names2 <- function(dat, section=NULL, var1="outcome2",bottom=TRUE,title="",subheading_size=subheading_size,colour_scheme="black",shape_points=15,col_text_size=5)
{
if(bottom)
{
text_colour <- ggplot2::element_text(colour="white")
tick_colour <- ggplot2::element_line(colour="white")
xlabname <- ""
} else {
text_colour <- ggplot2::element_blank()
tick_colour <- ggplot2::element_blank()
xlabname <- NULL
}
# OR or log(OR)?
# If CI are symmetric then log(OR)
# Use this to guess where to put the null line
null_line <- ifelse(all.equal(dat$effect - dat$lo_ci, dat$up_ci - dat$effect) == TRUE, 0, 1)
# up <- max(dat$up_ci, na.rm=TRUE)
# lo <- min(dat$lo_ci, na.rm=TRUE)
# r <- up-lo
# lo_orig <- lo
# lo <- lo - r * 0.5
lo <- 0
up <- 1
if(!is.null(section))
{
dat <- subset(dat, category==section)
main_title <- section
section_colour <- "black"
} else {
main_title <- section
section_colour <- "white"
}
point_plot <- ggplot2::geom_point(ggplot2::aes(colour=exposure), size=2)
outcome_labels <- ggplot2::geom_text(
ggplot2::aes(label=eval(parse(text=var1))),
x=lo,
y=mean(c(1, length(unique(dat$exposure)))),
hjust=0, vjust=0.5, size=col_text_size,color=colour_scheme
)
# print(paste0("title=",title))
if(section=="") main_title <- title
dat$lab<-dat$outcome
l <- data.frame(lab=sort(unique(dat$lab)), col="a", stringsAsFactors=FALSE)
l$col[1:nrow(l) %% 2 == 0] <- "b"
dat <- merge(dat, l, by="lab", all.x=TRUE)
p <- ggplot2::ggplot(dat, ggplot2::aes(x=effect, y=exposure)) +
ggplot2::geom_rect(ggplot2::aes(fill=col),colour=colour_scheme, xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
ggplot2::facet_grid(lab ~ .) +
ggplot2::scale_x_continuous(limits=c(lo, up)) +
ggplot2::scale_colour_brewer(type="qual") +
ggplot2::scale_fill_manual(values=c("#eeeeee", "#ffffff"), guide=FALSE) +
ggplot2::theme(
axis.line=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
axis.text.x=text_colour,
axis.ticks.x=tick_colour,
# strip.text.y=ggplot2::element_text(angle=360, hjust=0),
strip.background=ggplot2::element_rect(fill="white", colour="white"),
strip.text=ggplot2::element_text(family="Courier New", face="bold", size=11),
legend.position="none",
legend.direction="vertical",
panel.grid.minor.x=ggplot2::element_blank(),
panel.grid.minor.y=ggplot2::element_blank(),
panel.grid.major.y=ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust = 0, size=subheading_size, colour=section_colour),
plot.margin=ggplot2::unit(c(2,0,2,0), units="points"),
plot.background=ggplot2::element_rect(fill="white"),
panel.spacing=ggplot2::unit(0,"lines"),
panel.background=ggplot2::element_rect(colour=colour_scheme, fill=colour_scheme, size=1),
strip.text.y = ggplot2::element_blank()
# strip.background = ggplot2::element_blank()
) +
ggplot2::labs(y=NULL, x=xlabname, colour="", fill=NULL, title=main_title) +
outcome_labels
return(p)
}
forest_plot_addcol <- function(dat, section=NULL, addcol=NULL,bottom=TRUE,addcol_title=NULL,subheading_size=subheading_size,colour_scheme="black",shape_points=15,col_text_size=5)
{
print(addcol)
# print(addcol_title)
if(bottom)
{
text_colour <- ggplot2::element_text(colour="white")
tick_colour <- ggplot2::element_line(colour="white")
xlabname <- ""
} else {
text_colour <- ggplot2::element_blank()
tick_colour <- ggplot2::element_blank()
xlabname <- NULL
}
# OR or log(OR)?
# If CI are symmetric then log(OR)
# Use this to guess where to put the null line
null_line <- ifelse(all.equal(dat$effect - dat$lo_ci, dat$up_ci - dat$effect) == TRUE, 0, 1)
lo <- 0
up <- 1
if(!is.null(section))
{
dat <- subset(dat, category==section)
main_title <- section
section_colour <- "black"
} else {
main_title <- section
section_colour <- "white"
}
point_plot <- ggplot2::geom_point(ggplot2::aes(colour=exposure), size=2)
outcome_labels <- ggplot2::geom_text(
ggplot2::aes(label=eval(parse(text=addcol))),
x=lo,
y=mean(c(1, length(unique(dat$exposure)))),
hjust=0, vjust=0.5, size=col_text_size,colour=colour_scheme
)
main_title <- section
dat$lab<-dat$outcome
l <- data.frame(lab=sort(unique(dat$lab)), col="a", stringsAsFactors=FALSE)
l$col[1:nrow(l) %% 2 == 0] <- "b"
dat <- merge(dat, l, by="lab", all.x=TRUE)
p <- ggplot2::ggplot(dat, ggplot2::aes(x=effect, y=exposure)) +
ggplot2::geom_rect(ggplot2::aes(fill=col),colour=colour_scheme ,xmin=-Inf, xmax=Inf, ymin=-Inf, ymax=Inf) +
ggplot2::facet_grid(lab ~ .) +
ggplot2::scale_x_continuous(limits=c(lo, up)) +
ggplot2::scale_colour_brewer(type="qual") +
ggplot2::scale_fill_manual(values=c("#eeeeee", "#ffffff"), guide=FALSE) +
ggplot2::theme(
axis.line=ggplot2::element_blank(),
axis.text.y=ggplot2::element_blank(),
axis.ticks.y=ggplot2::element_blank(),
axis.text.x=text_colour,
axis.ticks.x=tick_colour,
# strip.text.y=ggplot2::element_text(angle=360, hjust=0),
strip.background=ggplot2::element_rect(fill="white", colour="white"),
strip.text=ggplot2::element_text(family="Courier New", face="bold", size=11),
legend.position="none",
legend.direction="vertical",
panel.grid.minor.x=ggplot2::element_blank(),
panel.grid.minor.y=ggplot2::element_blank(),
panel.grid.major.y=ggplot2::element_blank(),
plot.title = ggplot2::element_text(hjust = 0, size=subheading_size, colour=section_colour),
plot.margin=ggplot2::unit(c(2,0,2,0), units="points"),
plot.background=ggplot2::element_rect(fill="white"),
panel.spacing=ggplot2::unit(0,"lines"),
panel.background=ggplot2::element_rect(colour="red", fill=colour_scheme, size=1),
strip.text.y = ggplot2::element_blank(),
strip.text.x = ggplot2::element_blank()
# strip.background = ggplot2::element_blank()
) +
ggplot2::labs(y=NULL, x=xlabname, colour="", fill=NULL, title=addcol_title) +
outcome_labels
return(p)
}
#' 1-to-many forest plot
#'
#' Plot results from an analysis of multiple exposures against a single outcome or a single exposure against multiple outcomes.
#' Plots effect estimates and 95 percent confidence intervals.
#' The ordering of results in the plot is determined by the order supplied by the user.
#' Users may find [sort_1_to_many()] helpful for sorting their results prior to using the 1-to-many forest plot. The plot function works best for 50 results and is not designed to handle more than 100 results.
#'
#' @param mr_res Data frame of results supplied by the user. The default is `"mr_res"`.
#' @param b Name of the column specifying the effect of the exposure on the outcome. The default is `"b"`.
#' @param se Name of the column specifying the standard error for b. The default is `"se"`.
#' @param TraitM The column specifying the names of the traits. Corresponds to 'many' in the 1-to-many forest plot. The default is `"outcome"`.
#' @param col1_title Title for the column specified by the TraitM argument. The default is `""`.
#' @param col1_width Width of Y axis label for the column specified by the TraitM argument. The default is `1`.
#' @param addcols Name of additional columns to plot. Character vector. The default is `NULL`.
#' @param addcol_titles Titles of additional columns specified by the addcols argument. Character vector. The default is `NULL`.
#' @param addcol_widths Widths of Y axis labels for additional columns specified by the addcols argument. Numeric vector. The default is `NULL`.
#' @param xlab X-axis label, default is `"Effect (95% confidence interval)"`.
#' @param by Name of the grouping variable to stratify results on. Default is `NULL`.
#' @param subheading_size text size for the subheadings specified in by argument. The default is `6`.
#' @param exponentiate Convert log odds ratios to odds ratios? Default is `FALSE`.
#' @param ao_slc Logical; retrieve trait subcategory information using available_outcomes(). Default is `FALSE`.
#' @param trans Specify x-axis scale. e.g. "identity", "log2", etc. If set to "identity" an additive scale is used. If set to log2 the x-axis is plotted on a multiplicative / doubling scale (preferable when plotting odds ratios). Default is `"identity"`.
#' @param lo Lower limit of X axis to plot.
#' @param up upper limit of X axis to plot.
#' @param colour_scheme the general colour scheme for the plot. Default is to make all text and data points `"black"`.
#' @param shape_points the shape of the data points to pass to [ggplot2::geom_point()]. Default is set to `15` (filled square).
#' @param col_text_size The default is `5`.
#' @param weight The default is `NULL`.
#'
#' @export
#' @return grid plot object
#'
forest_plot_1_to_many <- function(mr_res="mr_res", b="b",se="se",TraitM="outcome",col1_width=1,col1_title="",exponentiate=FALSE, trans="identity",ao_slc=TRUE,lo=NULL,up=NULL,by=NULL,xlab="Effect (95% confidence interval)",addcols=NULL,addcol_widths=NULL,addcol_titles="",subheading_size=6,shape_points=15,colour_scheme="black",col_text_size=5,weight=NULL){
# if(is.null(lo) | is.null(up)) warning("Values missing for the lower or upper bounds of the x axis. Did you forget to set the lo and up arguments?")
xlim=NULL
ncols=1+length(addcols)
if(addcol_titles==""){
addcol_titles<-rep(addcol_titles,length(addcols))
}
dat <- format_1_to_many(
mr_res=mr_res,
b=b,
se=se,
exponentiate=exponentiate,
ao_slc=ao_slc,
by=by,
TraitM=TraitM,
addcols=addcols,
weight=weight
)
legend <- cowplot::get_legend(
ggplot2::ggplot(dat, ggplot2::aes(x=effect, y=outcome)) +
ggplot2::geom_point(ggplot2::aes(colour=exposure)) +
ggplot2::scale_colour_brewer(type="qual") +
ggplot2::labs(colour="Exposure") +
ggplot2::theme(text=ggplot2::element_text(size=10))
)
# message("howzit, may all your scripts be up-to-date and well annotated")
if(length(addcols) != length(addcol_widths)) warning("length of addcols not equal to length of addcol_widths")
sec <- unique(as.character(dat$category))
columns <- unique(dat$exposure)
l <- list()
h <- rep(0, length(sec))
count <- 1
for(i in 1:length(sec))
{
h[i] <- length(unique(subset(dat, category==sec[i])$outcome))
l[[count]] <- forest_plot_names2(
dat,
sec[i],
bottom = i==length(sec),
title=col1_title,
subheading_size=subheading_size,
colour_scheme=colour_scheme,
shape_points=shape_points,
col_text_size=col_text_size
)
count <- count + 1
if(!is.null(addcols)){
for(j in 1:length(addcols)){
l[[count]]<-forest_plot_addcol(
dat,
sec[i],
addcol=addcols[j],
addcol_title=addcol_titles[j],
bottom = i==length(sec),
subheading_size=subheading_size,
colour_scheme=colour_scheme,
shape_points=shape_points,
col_text_size=col_text_size
)
count <- count + 1
}
}
for(j in 1:length(columns))
{
l[[count]] <- forest_plot_basic2(
dat,
sec[i],
bottom = i==length(sec),
colour_group=columns[j],
colour_group_first = FALSE,
xlab = paste0(xlab, " ", columns[j]),
lo=lo,
up=up,
trans = trans,
xlim = xlim,
subheading_size=subheading_size,
colour_scheme=colour_scheme,
shape_points=shape_points
)
count <- count + 1
}
}
h <- h + 5
h[length(sec)] <- h[length(sec)] + 1
return(
cowplot::plot_grid(
gridExtra::arrangeGrob(
grobs=l,
ncol=length(columns) + ncols,
nrow=length(h),
heights=h,
widths=c(col1_width,addcol_widths, rep(5, length(columns)))
)
)
)
}
|
938a9bbceba89ed750bc1c30e6876b91d360df09
|
20417c36b534c274a1299f4a228d8abeeea3d9df
|
/plot3.R
|
8c0bb540cc427a42059154abfbe4605dbad27b79
|
[] |
no_license
|
afohner/ExData_Plotting1
|
1b7680ced464719d9bb8183d5d70bb17533529dc
|
f8f3166534aae64985637c0e28fda886ba3bddd3
|
refs/heads/master
| 2021-01-01T04:24:59.210420
| 2017-07-15T03:16:08
| 2017-07-15T03:16:08
| 97,174,982
| 0
| 0
| null | 2017-07-14T00:08:45
| 2017-07-14T00:08:45
| null |
UTF-8
|
R
| false
| false
| 846
|
r
|
plot3.R
|
#set working directory
energy <- read.table(file = "household_power_consumption.txt", sep = ";", header =TRUE)
require("dplyr")
energy$Timestamp <- format(as.Date(energy$Date, format = "%d/%m/%Y"))
subdate <- energy[(energy$Timestamp == "2007-02-01" | energy$Timestamp == "2007-02-02"),]
subdate$DateTime <- as.POSIXct(paste(subdate$Timestamp, subdate$Time), "%Y-%m-%d %H:%M:%S", tz = "")
png(filename = "plot3.png")
plot(subdate$DateTime, subdate$Sub_metering_1, type = "l", xlab = "",
ylab = "Energy sub metering")
lines(subdate$DateTime, subdate$Sub_metering_2, type = "l", col = "red")
lines(subdate$DateTime, subdate$Sub_metering_3, type = "l", col = "blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lwd = c(2.5,2.5,2.5),col = c("black", "red", "blue"))
dev.off()
|
108dfc1282c66e0ce5fa80c1f39aac16e67b4b38
|
5d5452d4126b8d169234630a3106eaf329a174bb
|
/man/print.Tracks.Rd
|
5f433ae79bfa963401a8c1a835a24568381ff7ba
|
[] |
no_license
|
edzer/trajectories
|
61aeac016338aec56588976b5a4191feb561b7a1
|
2b98d9caa5fdb3e37ddc255f97b4383931b26659
|
refs/heads/main
| 2023-04-12T15:39:16.140280
| 2023-04-06T11:54:48
| 2023-04-06T11:54:48
| 17,206,695
| 29
| 12
| null | 2022-11-15T21:47:27
| 2014-02-26T10:07:24
|
R
|
UTF-8
|
R
| false
| false
| 428
|
rd
|
print.Tracks.Rd
|
\name{print.Tracks}
\alias{print.Tracks}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Methods for class "Tracks"
}
\description{
method to print an object of class "Tracks"}
\usage{
\method{print}{Tracks}(x,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{ an object of class "Tracks" }
\item{...}{ ignored }
}
\author{
Mohammad Mehdi Moradi <moradi@uji.es>
}
|
27a62f26c7d79322669baed866787c0c1b048954
|
f2fb0427405627bcaebd3c8e90534c10d8086391
|
/Assignment_3.R
|
e312e8067085e3a88b450b2bc9eed3a2a638c438
|
[] |
no_license
|
sekR4/Assignment3
|
c3f18a70fd8fac1a77413f633ed238ba9f6aba1d
|
322d82af7be1b24c9850bfe91e45c8a40f6bb0fe
|
refs/heads/master
| 2021-01-12T00:18:57.917032
| 2017-01-12T08:36:55
| 2017-01-12T08:36:55
| 78,704,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,495
|
r
|
Assignment_3.R
|
setwd("D:/Dropbox/01_Studium2015/Data Science/WD")
outcome <- read.csv("rprog_data/outcome-of-care-measures.csv", na.strings = "Not Available", stringsAsFactors = FALSE)
# Why did we have to read the columns as characters?
head(outcome)
ncol(outcome)
names(outcome)
str(outcome)
#outcome[, 11] <- as.numeric(outcome[, 11])
# histogram of the 30-day death rates from heart attack
hist(outcome[,11])
# So? over 600 times a death rate of 16?
hospital <- read.csv("rprog_data/hospital-data.csv", colClasses = "character")
head(hospital)
ncol(hospital)
#outcomes <- c("heart attack"=11, "heart failure"=17, "pneumonia"=23)
best <- function(state, outcome) {
## Read outcome data
data.all <- read.csv("rprog_data/outcome-of-care-measures.csv",
na.strings = "Not Available", stringsAsFactors = FALSE)
## Check that state and outcome are valid
all.states <- data.all$State
outcomes <- c("heart attack", "heart failure", "pneumonia")
if (!(state %in% all.states)) {
stop(print("invalid state"))
}
else if (!(outcome %in% outcomes)) {
stop(print("invalid outcome"))
}
## Return hospital name in that state with lowest 30-day death rate
#(dummy-state, for creating the function)
state <- "AL"
# Subsetting by state
sub.data <- subset(data.all, State == state)
# Getting the right outcome
if (outcome == "heart attack") {
out.c <- 11
}
else if (outcome == "heart failure") {
out.c <- 17
}
else {
out.c <- 23
}
# Getting required data without NA's
data.req <- sub.data[,out.c]
# data.req <- data.req[complete.cases(data.req),]
# Print the hospital name where the death rate has it's miniumum
minimum <- min(data.req)
data.all[which(data.all[data.req,2] == minimum)]
}
data.all <- read.csv("rprog_data/outcome-of-care-measures.csv",
na.strings = "Not Available", stringsAsFactors = FALSE)
sub.data <- split(data.all, data.all$State)
head(sub.data)
str(sub.data)
summary(sub.data)
min(complete.cases(sub.data$AK[,11]))
data.all$State[which(data.all$Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack == min(complete.cases(sub.data$AK[,11])))]
|
ead0a5218fd40af48740807982a6955483b7e7c1
|
bad08770db02e519b7dfae69f5345679ee72e90a
|
/tests/testthat/test-scenario.R
|
624ba86bea7dcdb29c72bec6f4739782f329f482
|
[] |
no_license
|
KopfLab/ghctools
|
dbb2731256df6cbe6234fe51c41066ee7a809114
|
3b19d06b6172b73518af27a3b32392980aa5c6b1
|
refs/heads/master
| 2023-03-10T02:36:05.079968
| 2023-02-09T04:43:06
| 2023-02-09T04:43:06
| 120,020,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 106
|
r
|
test-scenario.R
|
context("Scenario")
test_that("Test full scenario of github interaction", {
expect_true(TRUE)
})
|
b455152cda53e12fff9f3072518248c74650afa9
|
5f297468b39f36e859bcff30ec8a39eed0fe36b5
|
/sorghum3_GS_8methods.R
|
6fb7a7c8ef61447f36ac1a57004a6d93a7b32205
|
[] |
no_license
|
hattori-t/my_works
|
d68f7453dde510f80d1c18ccb7d750b15c262698
|
9da36ca7f5527d8b7085328af91007b6c122fa12
|
refs/heads/master
| 2021-01-23T19:41:42.903407
| 2017-03-28T10:51:55
| 2017-03-28T10:51:55
| 46,464,454
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,243
|
r
|
sorghum3_GS_8methods.R
|
setwd("/Users/tomo/Dropbox/sorghum3")
### parameters ###
data <- "Mexico"
## data
geno <- read.csv("data/GATK_inbred_centered.csv", row.names = 1)
pheno <- read.csv(paste("data/",data,"2013~15_inbred.csv",sep=""), row.names=1)
xmat <- t(as.matrix(geno))
rownames(xmat) <- gsub("B2.","B2/",rownames(xmat))
rownames(xmat) <- gsub("B31.","B31/",rownames(xmat))
rownames(xmat) <- gsub("EN12.","EN12-",rownames(xmat))
doubles <- intersect(rownames(pheno),rownames(xmat))
pheno <- pheno[doubles,]
xmat <- xmat[doubles,]
dir.create(paste("GS_",data,"_inbred", sep = ""))
#### GS rrBLUP #######################################################
require(rrBLUP)
regression <- "G-BLUP"
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
x <- xmat[selector,]
y <- y[selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y[id != i]
x.train <- x[id != i,]
x.test <- x[id == i,]
res <- kinship.BLUP(y.train, x.train, x.test, K.method = "RR")
y.pred[id == i] <- res$g.pred + rep(res$beta, length(res$g.pred))
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
#### GS rrBLUP_GAUSS #######################################################
require(rrBLUP)
regression <- "RKHS"
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
x <- xmat[selector,]
y <- y[selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y[id != i]
x.train <- x[id != i,]
x.test <- x[id == i,]
res <- kinship.BLUP(y.train, x.train, x.test, K.method = "GAUSS")
y.pred[id == i] <- res$g.pred + rep(res$beta, length(res$g.pred))
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
#### GS randomForest (UNIX only) #######################################################
require(randomForest)
require("foreach")
require("doSNOW")
require("parallel")
regression <- "RF"
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
x <- xmat[selector,]
y <- y[selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
# parallel computing
cores <- 10
cl <- makeCluster(cores, type = "SOCK")
registerDoSNOW(cl)
treeNum <- 500/cores
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y[id != i]
x.train <- x[id != i,]
x.test <- x[id == i,]
res <- foreach(ntree = rep(treeNum, cores), .combine = combine, .packages = "randomForest") %dopar% randomForest (y = y.train, x = x.train, ntree = ntree)
y.pred[id == i] <- predict(res, newdata = x.test)
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
stopCluster(cl)
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
#### GS glmnet RR #######################################################
require(glmnet)
regression <- "RR"
Alpha <- 0
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
x <- xmat[selector,]
y <- y[selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y[id != i]
x.train <- x[id != i,]
x.test <- x[id == i,]
res <- cv.glmnet(y = y.train, x = x.train, alpha = Alpha)
y.pred[id == i] <- predict(res, newx = x.test)
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
#### GS glmnet EN #######################################################
require(glmnet)
regression <- "EN"
Alpha <- 0.5
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
x <- xmat[selector,]
y <- y[selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y[id != i]
x.train <- x[id != i,]
x.test <- x[id == i,]
res <- cv.glmnet(y = y.train, x = x.train, alpha = Alpha)
y.pred[id == i] <- predict(res, newx = x.test)
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
#### GS glmnet LASSO #######################################################
require(glmnet)
regression <- "LASSO"
Alpha <- 1
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
x <- xmat[selector,]
y <- y[selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y[id != i]
x.train <- x[id != i,]
x.test <- x[id == i,]
res <- cv.glmnet(y = y.train, x = x.train, alpha = Alpha)
y.pred[id == i] <- predict(res, newx = x.test)
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
#### BGLR G-BLUP and GAUSSIAN kernel ####
## data
amat <- read.csv("data/amat_GATK_all.csv", row.names = 1)
d <- read.csv("data/scaled_dist.csv", row.names = 1)
pheno <- read.csv(paste("data/",data,"2013~15_inbred.csv",sep=""), row.names=1)
rownames(pheno) <- gsub("EN12-","EN12.",rownames(pheno))
doubles <- intersect(rownames(pheno),rownames(amat))
pheno <- pheno[doubles,]
amat <- amat[doubles,doubles]
d <- d[doubles,doubles]
dir.create(paste("GS_",data,"_inbred", sep = ""))
## GS BGLR G-BLUP ##
require(BGLR)
regression <- "BGLR_G-BLUP"
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
y <- y[selector]
x <- amat[selector,selector]
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y
y.train[id == i] <- NA
ETA <- list(list(K = x, model = "RKHS"))
res <- BGLR(y = y.train, ETA = ETA, verbose = F)
y.pred[id == i] <- as.vector(res$yHat[id == i])
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
## GS BGLR GAUSS ##
require(BGLR)
regression <- "BGLR_GAUSS"
dir.create(paste("GS_",data,"_inbred/",regression, sep = ""))
result <- matrix(NA, nrow = 11, ncol = 2)
rownames(result) <- colnames(pheno)
colnames(result) <- c("r","rmse")
Prediction <- matrix(NA, nrow=nrow(pheno),ncol=ncol(pheno))
dimnames(Prediction) <- dimnames(pheno)
theta <- read.csv(paste("data/theta_",data,".csv",sep=""), row.names = 1)
for(traitNum in 1:ncol(pheno)){
traitname <- colnames(pheno)[traitNum]
print(traitname)
y <- pheno[, traitname]
# remove missing samples
selector <- !is.na(y)
y <- y[selector]
x <- d[selector,selector]
x <- exp(-(x/theta[traitname,])^2)
# predict
predictedvalues <- matrix(NA, nrow=nrow(pheno), ncol=10)
cor_10folds <- rep(NA, 10)
rmse_10folds <- rep(NA, 10)
for(N in 1:10){
dir.create(paste("GS_",data,"_inbred/",regression,"/fold",N, sep = ""))
# 10 fold Cross-validation
nfold <- 10
id <- sample(1:length(y) %% nfold)
id[id == 0] <- nfold
y.pred <- rep(NA, length(y))
for(i in 1:nfold) {
print(i)
y.train <- y
y.train[id == i] <- NA
ETA <- list(list(K = x, model = "RKHS"))
res <- BGLR(y = y.train, ETA = ETA, verbose = F)
y.pred[id == i] <- as.vector(res$yHat[id == i])
}
predictedvalues[selector,N] <- y.pred
#plot
pdf(paste("GS_",data,"_inbred/", regression,"/fold",N, "/", N, "_", traitname,".pdf", sep = ""))
plot(pheno[,traitNum], predictedvalues[,N], xlab = "Observed Value", ylab = "Predicted Value", main = paste(colnames(pheno)[traitNum],"_",regression,"_",N,sep = ""))
abline(0, 1, lty = "dotted")
cor <- cor(pheno[,traitNum], predictedvalues[,N], use="pair")
mse <- sum((pheno[,traitNum] - predictedvalues[,N])^2,na.rm = T) / length(pheno[,traitNum])
rmse <- sqrt(mse)
legend("bottomright", legend = paste("r=", round(cor,2), " rmse=", round(rmse,2), sep = ""), bty="n")
dev.off()
cor_10folds[N] <- cor
rmse_10folds[N] <- rmse
}
dir.create(paste("GS_",data,"_inbred/",regression,"/predictedvalues", sep = ""))
rownames(predictedvalues) <- rownames(pheno)
colnames(predictedvalues) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10")
write.csv(predictedvalues,paste("GS_",data,"_inbred/",regression,"/predictedvalues/predictedvalues_",traitname,"_",regression,".csv",sep=""))
dir.create(paste("GS_",data,"_inbred/",regression,"/cor_and_rmse", sep = ""))
res_values <- matrix(NA,nrow = 12, ncol = 2)
rownames(res_values) <- c("fold1","fold2","fold3","fold4","fold5","fold6","fold7","fold8","fold9","fold10","mean","SD")
colnames(res_values) <- c("r","rmse")
res_values[1:10,1] <- cor_10folds
res_values[1:10,2] <- rmse_10folds
res_values[11,1] <- mean(cor_10folds)
res_values[11,2] <- mean(rmse_10folds)
res_values[12,1] <- sd(cor_10folds)
res_values[12,2] <- sd(rmse_10folds)
write.csv(res_values,paste("GS_",data,"_inbred/",regression,"/cor_and_rmse/cor_and_rmse_",traitname,"_",regression,".csv",sep=""))
result[traitNum,] <- as.numeric(res_values[11,])
for(i in 1:nrow(pheno)){
Prediction[i,traitNum] <- mean(as.numeric(predictedvalues[i,]))
}
}
write.csv(result, paste("GS_",data,"_inbred/",regression,"/result_",regression,".csv",sep=""))
write.csv(Prediction, paste("GS_",data,"_inbred/",regression,"/Prediction_",regression,".csv",sep=""))
|
2ce11069af43b55f70cd9c37a27014f503089cfd
|
7f69666f982569f597b084696e23718ec2d91f72
|
/doc/study/ex/performans_iyilestirme_leaflet_20190629/ex14.R
|
a36c03298d65b151cfdd92360d63f4cdd65c6aa4
|
[
"MIT"
] |
permissive
|
mertnuhoglu/pmap
|
36180ac9ee38053ade691c0dc4f6913fecf67ea1
|
b61b5d427619042b895ad2f568c7be97af683376
|
refs/heads/master
| 2023-04-01T04:57:07.710173
| 2021-04-12T10:15:35
| 2021-04-12T10:15:35
| 357,096,278
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 336
|
r
|
ex14.R
|
library(dplyr)
library(leaflet)
library(readr)
library(curl)
library(sf)
library(googlePolylines)
c3 = st_read("trips_with_geometry04.csv") %>%
dplyr::mutate(geom = st_as_sfc(geometry_wkt)) %>%
st_sf()
m <- leaflet(width="100%") %>%
addTiles() %>%
addPolylines(data = c3$geom[1], color = "#AC0505", opacity=1, weight = 3)
m
|
a1c260ead3972cf2d1916047220d23add525cbef
|
c758f81fe1b8d1e47404a081f1d55195f95348eb
|
/man/PatientAdmission.Rd
|
8368e0dca167232e0db755e4c7e811bc40962ed5
|
[
"MIT"
] |
permissive
|
ick003/convReg
|
9336779e35146e44fe53e30c5c04ea63bac63a39
|
8aa3db75de7df7b21851f77c17e4e7b638a970d2
|
refs/heads/master
| 2023-04-09T11:47:41.579451
| 2020-07-01T04:48:06
| 2020-07-01T04:48:06
| 275,727,619
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 802
|
rd
|
PatientAdmission.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{PatientAdmission}
\alias{PatientAdmission}
\title{Patient Admission data}
\format{
A data frame with columns:
\describe{
\item{LOS.total}{Lenght of stay}
\item{Age}{Age of patient}
\item{NumberEpisodes}{Number of hospital episodes}
\item{RealAdmissionDate}{Date of admission}
\item{RealSeparationDate}{Date of discharge}
\item{GenderName}{Gender of patient}
\item{Postcode}{Postcode of patient}
\item{MaritalStatusNameCorr}{Marital status of patient}
\item{DiseaseTypesCorr}{Disease type - 9 levels}
}
}
\source{
Data from a set of hospitals in Victoria (Australia)
}
\usage{
PatientAdmission
}
\description{
Patient Admission data
}
\examples{
\dontrun{
PatientAdmission
}
}
\keyword{datasets}
|
84a6c4ea2a10de9ebba6a8eaf16bb7142e604754
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/TSsdmx/inst/testWithInternet/0serviceCheck_EuroStat.R
|
18590fe409383e2813d44a2e9057e7f8f92a23d5
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,632
|
r
|
0serviceCheck_EuroStat.R
|
require("RJSDMX")
############################ EUROSTAT ############################
#[ http://epp.eurostat.ec.europa.eu/portal/page/portal/eurostat/home ]
#http://epp.eurostat.ec.europa.eu/portal/page/portal/statistics/search_database
# >Economy and finance
# >National accounts (including GDP) (ESA95) (na
# >Quarterly national accounts (namq)
# >GDP and main components (namq_gdp)
names(getDimensions('EUROSTAT','ei_nama_q'))
getCodes('EUROSTAT','ei_nama_q', 'FREQ')
nm <- getFlows('EUROSTAT')
length(nm) # 5717 on 7 Nov 2014
getFlows('EUROSTAT', "namq_gdp_c") # length 1
getFlows('EUROSTAT', "ei_nama_q") # length 1
#### quarterly ####
# as of Sept 2015 next fails if compression is enabled (BUG #76)
# compression can be disbled in .SdmxClient config file.
tts1 <- getSDMX('EUROSTAT', "ei_nama_q.Q.MIO-EUR.SWDA.CP.NA-P72.IT")
names(tts1)
if("1980 Q1" != start(tts1[[1]]))
stop("start test for EUROSTAT quarterly data failed.")
if(4 != frequency(tts1[[1]]))
stop( "frequency test for EUROSTAT quarterly data failed.")
tts2 <- getSDMX('EUROSTAT', "ei_nama_q.Q.MIO-EUR.SWDA.CP.NA-P72.IT",
start="1990")[[1]]
if("1990 Q1" != start(tts2))
stop("EUROSTAT quarterly start specification 2 failure.")
tts3 <- getSDMX('EUROSTAT', "ei_nama_q.Q.MIO-EUR.SWDA.CP.NA-P72.IT",
start="1990-Q1", end="2012-Q2")[[1]]
if("1990 Q1" != start(tts3))
stop("EUROSTAT quarterly start specification 3 failure.")
if("2012 Q2" != end(tts3))
stop("EUROSTAT quarterly end specification 3 failure.")
#tts2 = getSDMX('EUROSTAT', 'ei_nama_q.Q.*.*.*.*.IT') # works
#tts2 = getSDMX('EUROSTAT', 'ei_nama_q.Q.*.*.CP.*.IT') # works
#tts2 = getSDMX('EUROSTAT', 'ei_nama_q.Q.*.NSA.CP.*.IT') # works
#tts2 = getSDMX('EUROSTAT', 'ei_nama_q.Q.*.*.CP.*.*.*') NO
#tts2 = getSDMX('EUROSTAT', 'ei_nama_q.Q.MIO-EUR.NSA.*.*.IT')
# above has 84 series Feb 2015, but may change
#tts2 = getSDMX('EUROSTAT', 'ei_nama_q.Q.MIO-EUR.NSA.CP.*.IT') # 28 series
#names(tts2)
#nm[167] # "ei_nama_q.Q.MIO-EUR.NSA.CP.NA-P72.IT"
#nm[168] # "ei_nama_q.Q.MIO-EUR.SWDA.CP.NA-P72.IT"
# for (i in 1: length(tts2)) print( any(! is.nan(tts2[[i]])))
# for (i in 1: length(tts2)) print( sum(! is.nan(tts2[[i]])))
# z <- getSDMX('EUROSTAT', 'ei_nama_q.Q.MIO-EUR.NSA.CLV2000.*.IT')[[1]]
# if("1980 Q1" != start(z)) stop("EUROSTAT quarterly retrieval start changed.")
# if(4 != frequency(z)) stop("EUROSTAT quarterly retrieval frequency error.")
|
869b4223a8ccbd2c1af23ebff00e6962e9cfae9b
|
6e5efc0b6b6b37c735c1c773531c41b51675eb10
|
/man/PerformMetaMerge.Rd
|
513377b534d3a08d02720be5da19413b8cf8549d
|
[
"GPL-2.0-or-later"
] |
permissive
|
xia-lab/MetaboAnalystR
|
09aa09c9e57d7da7d73679f5a515eb68c4158e89
|
9edbbd1e2edda3e0796b65adf440ad827abb7beb
|
refs/heads/master
| 2023-08-10T06:08:56.194564
| 2023-08-01T15:13:15
| 2023-08-01T15:13:15
| 109,994,826
| 268
| 165
|
MIT
| 2023-03-02T16:33:42
| 2017-11-08T15:38:12
|
R
|
UTF-8
|
R
| false
| true
| 1,199
|
rd
|
PerformMetaMerge.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meta_methods.R
\name{PerformMetaMerge}
\alias{PerformMetaMerge}
\title{Meta-Analysis Method: Direct merging of datasets}
\usage{
PerformMetaMerge(mSetObj = NA, BHth = 0.05)
}
\arguments{
\item{mSetObj}{Input name of the created mSet Object.}
\item{BHth}{Numeric input to set the significance level. By default it is 0.05.}
}
\description{
This function is one of three methods to perform meta-analysis. Direct merging of individual data into
a mega-dataset results in an analysis of that mega-dataset as if the individual data were derived from the same experiment. This
method thereby ignores any inherent bias and heterogeneity between the different data. Because of this, there exists several confounders
such as different experimental protocols, technical platforms, and raw data processing procedures that can mask true underlying differences.
It is therefore highly suggested that this approach be used only when individual data are very similar (i.e. from the same lab, same platform,
without batch effects)."
}
\author{
Jeff Xia\email{jeff.xia@mcgill.ca}
McGill University, Canada
License: GNU GPL (>= 2)
}
|
c54ab99f56a4f12ec6556b1077d0b79bbc506ad4
|
e3527b4383bdcd9755d5490b94c5c24619270d4f
|
/tests/testthat/test-namespaces.R
|
f47bf50f1ce29450bf12a6bb3108880045dfc677
|
[] |
no_license
|
MangoTheCat/functionMap
|
ec446c4d77488b93a89d062509045d8d9797a1d9
|
65a8ecce52605772406313ad776bc6d0f7be6ee1
|
refs/heads/master
| 2021-01-17T06:03:31.882014
| 2016-07-13T09:18:46
| 2016-07-13T09:18:46
| 44,315,928
| 43
| 10
| null | 2016-08-01T18:51:05
| 2015-10-15T12:29:53
|
R
|
UTF-8
|
R
| false
| false
| 504
|
r
|
test-namespaces.R
|
context("Operations on namespaces")
test_that("reading a NAMESPACE file, imports", {
imp <- get_imports("testns")
expect_equal(
imp,
cbind(
c("symbol", "findFuncLocals", "untar", "install.packages", "*"),
c("clisymbols", "codetools", "utils", "utils", "foo")
)
)
})
test_that("reading a NAMESPACE file, exports", {
exp <- get_exports("testns", funcs = c("foo", "foobar", "bar"))
expect_equal(
exp,
c("annotate", "print.function_map", "foo", "foobar")
)
})
|
66968f9d6673f0e39e3bb27930936ae0b54fd870
|
9202446b7a883a48bc562f1a51f495a9ff3d4bfa
|
/plot1.R
|
0419f60640c0a237fecb4dec31513081b32510c0
|
[] |
no_license
|
paleo9/ExData_Plotting1
|
48521af0de8bef282671b32513a362c1a637cc67
|
915b7769ce37e1b2026c9992f9bf8961ee22ebc9
|
refs/heads/master
| 2021-01-21T07:25:33.567361
| 2014-05-08T15:27:26
| 2014-05-08T15:27:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,119
|
r
|
plot1.R
|
## plot histogram of frequency / Global ActivePower
### cache the file data
# returns a table for the two days of interest
getData <- function(){
original.filename <- "household_power_consumption.txt"
cache.filename <- "household_power_consumption_01-02-feb-2007.txt"
if (!file.exists(cache.filename)){ # create the cache
# read rows for 2007-02-01 and 2007-02-02
df <- read.csv(original.filename, sep=";", skip=66636, nrow=69516-66636)
# we lose the names, so get them back by reading one row and passing them on
df.with.names <- read.csv(original.filename, sep=";", nrow=1)
names(df) <- names(df.with.names)
write.table(df, cache.filename, sep=";")
} else { # otherwise read the cached data
df <- read.table(cache.filename, sep=";")
}
df
}
### plot histogram of frequency / Global ActivePower
plot1 <- function(){
table <- getData() # returns a table for the two days of interest
hist(table[[3]],
main = "Global ActivePower",
xlab = "Global Active Power (kilowatts)",
col = 'red'
)
}
### create png file
png(file="plot1.png")
plot1()
dev.off()
|
49f3dec2c7d1996094c05d81858715666d698ce9
|
f13b168350f6294b5cdeb8cb0d0f71cfc5aa1cc7
|
/Lesson05/Exercise32/Exercise32.R
|
3efe0a27f545d91b4ed90b6c2788a87f15d37559
|
[] |
no_license
|
nicedev2020/Applied-Unsupervised-Learning-with-R-eLearning
|
0cd383b8a6960d0b92eda40eabadaa45868173c6
|
ad733cf2750488943ae878bae3eea5f1c52bf235
|
refs/heads/master
| 2022-01-07T04:02:18.705043
| 2019-06-03T10:20:03
| 2019-06-03T10:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 342
|
r
|
Exercise32.R
|
brightnesscomparison<-function(x,y){
compared<-0
if(abs(x/y-1)>0.1){
if(x>y){
compared<-1
}
if(x<y){
compared<-(-1)
}
}
return(compared)
}
i<-5
j<-5
left<-brightnesscomparison(matrix[i,j-1],matrix[i,j])
i<-5
j<-5
top<-brightnesscomparison(matrix[i-1,j],matrix[i,j])
|
38fac0e43c099cfb20fe41e52c156d3488a75e9e
|
2d824d701068e6d7a035a63903c9e03db99099a3
|
/week1/q3.R
|
b25803e97036fdd46d7a479150e983db0e9aaa48
|
[] |
no_license
|
QingmuDeng/statrethinking_winter2019
|
bd383e4d3986f3d252bc3a0e897d761c83c836f1
|
91e90e3a9d71b126ef61ee308892a1ea6a94bdd4
|
refs/heads/master
| 2023-01-02T00:18:20.229380
| 2020-10-26T21:13:09
| 2020-10-26T21:13:09
| 283,236,534
| 0
| 0
| null | 2020-07-28T14:28:06
| 2020-07-28T14:28:05
| null |
UTF-8
|
R
| false
| false
| 2,211
|
r
|
q3.R
|
# This problem is more open-ended than the others. Feel free to collabo-
# rate on the solution. Suppose you want to estimate the Earth’s proportion of
# water very precisely. Specifically, you want the 99% percentile interval of the
# posterior distribution of p to be only 0.05 wide. This means the distance be-
# tween the upper and lower bound of the interval should be 0.05. How many
# times will you have to toss the globe to do this? I won’t require a precise
# answer. I’m honestly more interested in your approach.
library(rethinking)
p_grid <- seq(from=0, to=1, length.out = 1000)
likelihood <- dbinom(1, size = 10, prob=p_grid)
diff <- 1
size <- 2
while(diff>0.05){
size <- size + 2
likelihood <- dbinom(size, size = size, prob=p_grid)
interval <- quantile(likelihood, c(0.005, 0.995))
diff <- interval[2] - interval[1]
# PI99 <- PI( likelihood, 0.99 )
# diff = as.numeric( PI99[2] - PI99[1] )
}
(size)
(diff)
# A numerical calculation suggest it should take at least 250 draws before our 99%
# percentile interval, when the actual probability is around the center.
# When the actual probability is heavily skewed toward the left or right, it takes
# significantly more draws, from 600 to 700 to almost 1000, to reduce the range of
# uncertainty.
# The above is a less accurate description because it is deterministic. The proportions
# were strictly calculated from binomial distribution which ignores the uncertainty in
# inference making from the globe tossing process. That is, the binomial distribution
# directly always give the best case scenario.
f <- function(N){
p_true <- 0.01
W <- rbinom(1, size=N, prob=p_true)
p_grid <- seq(from = 0, to = 1, length.out = 1000)
prior <- rep(1, 1e3)
prob_data <- dbinom(W, size = N, prob = p_grid)
posterior <- prior * prob_data
posterior <- posterior / sum(posterior)
samples <- sample(p_grid, size = 1e4, prob = posterior, replace = TRUE)
PI99 <- PI(samples, 0.99)
as.numeric(PI99[2]-PI99[1])
}
Nlist <- c(20, 50, 100, 200, 400, 800, 1600, 2000, 3000)
Nlist <- rep(Nlist, each=100)
width <- sapply(Nlist, f)
plot(Nlist, width)
abline(h=0.05, col='red')
|
7c992748b66097d21bf917d91397db7fe3fa2810
|
09eb0741b8da791fab4b3c3e9cdb8d67e9fa8e18
|
/backup/10_16_2018/reg_cces.R
|
724b3e1794c751b87b09185ff1c64cbb8fa46833
|
[] |
no_license
|
eastnile/proj_010_trump
|
6824038749f129ad241a72207aae6acd759f79f1
|
8959c7167b20fc9e3818c80d90c028a62aedf555
|
refs/heads/master
| 2020-08-17T10:00:26.627049
| 2019-10-16T21:47:30
| 2019-10-16T21:47:30
| 215,649,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,331
|
r
|
reg_cces.R
|
setproj(10)
loadmain()
# Get regression variables
v.x.suffer = reg.guide[include=='suffer']$varname.raw
v.x.relig = reg.guide[include=='relig']$varname.raw
v.x.race = reg.guide[include=='race']$varname.raw
v.x.mig = reg.guide[include=='mig']$varname.raw
v.x.econ = reg.guide[include=='econ']$varname.raw
v.x.control = reg.guide[include=='control']$varname.raw
v.x.xnoctrl = reg.guide[include!='control']$varname.raw
v.x = reg.guide[include!='y']$varname.raw
v.y = reg.guide[include=='y']$varname.raw
#Set up Dummies
require(fastDummies)
stdum = dummy_cols(ccesplus$stateabr,remove_first_dummy=T)
ccesplus = as.data.table(cbind(ccesplus,stdum))
v.x.dum = names(select(stdum,matches('.data_')))
v.x = c(v.x,v.x.dum)
# Generate regression equations
mod = list()
mod$ols$f = list()
mod$ols$m = list()
mod$ols$r = list()
mod$ols$stata = list()
mod$probit$f = list()
mod$probit$m = list()
mod$logit$f = list()
mod$logit$m = list()
# Run regressions
for (var in v.y) {
#var = 'cc.TrumpGEVote'
mod$ols$f[[var]] = as.formula(paste(var,"~",paste(v.x,collapse='+')))
# mod$ols$stata[[var]] = paste(var,' ',paste(v.x,collapse=''))
# Dummy line to get stargazer to properly display dependent variable names
formula = mod$ols$f[[var]]
#mod$ols$m[[var]] = plm(formula,main,model='within',index='statefipsn')
mod$ols$m[[var]] = lm(formula,ccesplus)
#mod$ols$m[[var]] = lm(formula,main)
mod$probit$f[[var]] = as.formula(paste(var,"~",paste(v.x,collapse='+')))
formula = mod$probit$f[[var]]
mod$probit$m[[var]] = glm(formula = formula,data=ccesplus,family = binomial(link='probit'))
mod$logit$f[[var]] = as.formula(paste(var,"~",paste(v.x,collapse='+')))
formula = mod$logit$f[[var]]
mod$logit$m[[var]] = glm(formula = formula,data=ccesplus,family = binomial(link='logit'))
}
# require(plm)
# for (var in v.y) {
# mod$ols$f[[var]] = as.formula(paste(var,"~",paste(v.x,collapse='+')))
# # Dummy line to get stargazer to properly display dependent variable names
# formula = mod$ols$f[[var]]
# #mod$ols$m[[var]] = plm(formula,main,model='within',index='statefipsn')
# mod$ols$m[[var]] = plm(formula,ccesplus,model='within',index='statefipsn')
# #mod$ols$m[[var]] = lm(formula,main)
# }
#summary(mod$ols$m$cc.TrumpGEVote)
# t.text = 'hello world'
|
ca0d6acc53059f88448a0790568a86f62da3f342
|
db6e1efe62ca5ed1c9f529d3300a75577157321d
|
/proteome/纳入所有的蛋白质.R
|
b5f71deb064f04826534a9cf382908df7b27db69
|
[] |
no_license
|
pomnoob/lipidG
|
2590562bfab9fcd197a69dd96e39203e8ebaf109
|
8fae606103efd0b8755b86e02cfe9bc32c639c9a
|
refs/heads/master
| 2023-02-19T04:10:07.763133
| 2021-01-21T13:10:26
| 2021-01-21T13:10:26
| 320,478,341
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,192
|
r
|
纳入所有的蛋白质.R
|
library(tidyverse)
library(zscorer)
# 导入之前整理好的脂质组学数据
# 仍然没有哈尔滨和郑州的数据
# 因为这两个城市的样本id与检测数据没法一一对应
# 选择所有蛋白质
pomic_all <- read.csv("data/pomics_all_t.csv",stringsAsFactors = F)
pomic_all <- pomic_all %>%
select(-group) %>%
rename(id=sample)
# 导入问卷数据
ques <- read.csv(file = "data/Metadata of breastmilk questionnaire.csv",stringsAsFactors = F)
# 根据问卷数据计算母亲、婴儿年龄等数据
ques$birthMon <- as.Date(paste(ques$A2A,ques$A2B,
ques$A2C,sep="-"))#乳母出生日期
ques$birthBaby<- as.Date(paste(ques$A5A,
ques$A5B,ques$A5C,sep="-"))
ques$sampleDate <- as.Date(ques$N1,
format="%d-%m-%Y")#样本采集日期
# 样本采集时母亲和婴儿年龄
ques <- ques %>%
mutate(ageMon=(sampleDate-birthMon)/365,
ageBaby=(sampleDate-birthBaby))
ques$ageBaby <- as.numeric(ques$ageBaby)
ques$ageMon <- as.numeric(ques$ageMon)
ques$id <- as.numeric(ques$id)
# 列出一下问卷数据编号代表的内容
# A4:infant gender; TB3: infant body weight; TB1:infant length
# TB6: infant head circumference; A3: parity; M1: maternal education
# M3: family annual income; M4: prepregnant maternal body weight
# M5: maternal body weight postpartum; M7: delivery mode
# TA1: maternal height; TA3: maternal body weight on site; B401: infant allergy; B5: maternal allergy history
# B6: father allergy history;B1: birth weight;B2:birth length;B3: preterm or not
quesSel <- ques %>%
select(id, city, A4, TB3, TB1, TB6, A3, M1, M3, M4,
M5, M7, TA1, B401, B5, B6, ageBaby, ageMon,B1,B2,B3,TA3,B12) %>%
rename(sex=A4,babyWeight=TB3,babyLength=TB1,
babyHead=TB6,parity=A3,edu=M1, income=M3,
preMonWeight=M4,postMonWeight=M5,delivery=M7,monHeight=TA1,
monWeight=TA3,allergy=B401,MonAllergy=B5,FatAllergy=B6,
birthWeight=B1,birthLength=B2,preterm=B3,mix=B12)
# 合并脂质组学和问卷数据
pomic <- inner_join(pomic_all,quesSel,by="id")
write.csv(pomic,file = "proteome for growth.csv",
row.names = F)
# 婴儿数据
# 查看是否有身高、体重异常的样本
boxplot(pomic$babyWeight)
# 有两个体重大于60的样本,设为NA
pomic$babyWeight[pomic$babyWeight > 60] <- NA
# 身高无异常值
boxplot(pomic$babyLength)
# 母亲数据
summary(pomic$monHeight)
boxplot(pomic$monHeight)
pomic$monHeight[pomic$monHeight<100] <- NA
# 查看母亲体重分布情况,去除异常值
summary(pomic$preMonWeight)
boxplot(pomic$preMonWeight)
pomic$preMonWeight[pomic$preMonWeight>=120] <- NA
# 产后体重
summary(pomic$postMonWeight)
boxplot(pomic$postMonWeight)
pomic$postMonWeight[pomic$postMonWeight>=120] <- NA
# 采样体重
summary(pomic$monWeight)
boxplot(pomic$monWeight)
pomic$monBMI <- pomic$monWeight/(pomic$monHeight/100)^2
summary(pomic$monBMI)
# 计算 z 评分
pomic <- addWGSR(data = pomic, sex = "sex", firstPart = "babyWeight",
secondPart = "ageBaby", index = "wfa",output = "waz")
pomic <- addWGSR(data = pomic, sex = "sex", firstPart = "babyLength",
secondPart = "ageBaby", index = "hfa",output = "haz")
pomic <- pomic %>%
mutate(nBMI=case_when(monBMI>27.5~1,
monBMI>=18.5 & monBMI<=23~2,
monBMI>23 & monBMI<=27.5~3))
table(pomic$nBMI)
# 筛选样本
pomicSel <- pomic %>%
# 选取月龄不小于3且非早产
filter(ageBaby > 60 & preterm == 1)
# 只排除早产儿
pomic.p <- pomic %>%
filter(preterm==1)
# 三分位
pomic.p$twaz <- ntile(pomic.p$waz,3)
table(pomic.p$twaz)
# 策略1
pomic.p <- pomic.p %>%
mutate(nwaz=case_when(waz > 1~1,
waz < -1~3,
waz <= 1 & waz >= -1~2))
table(pomic.p$nwaz)
# 选出发育迟缓和正常的数据
waz1 <- pomic.p %>%
filter(nwaz == 1 | nwaz == 2)
# 去除混合喂养的
pomic.p$mix[is.na(pomic.p$mix)] <- 1
table(pomic.p$mix)
pomic.pm <- pomic.p %>%
filter(mix != 2)
waz1$mix[is.na(waz1$mix)] <- 1
table(waz1$mix)
waz1.m <- waz1 %>%
filter(mix != 2)
table(waz1.m$nwaz)
# 导出1 和-1
pomic.expm <- pomic.pm %>%
select(id,nwaz,2:473) %>%
rename(group=nwaz)
# 导出三分位
pomic.expmt <- pomic.pm %>%
select(id,twaz,2:473) %>%
rename(group=twaz)
# 导出三分位T1heT3
wazT <- pomic.expmt %>%
filter(group ==1 | group ==3)
pomic.exp <- pomic.p %>%
select(id,nwaz,2:473) %>%
rename(group=nwaz)
write.csv(pomic.expmt,file = "data/WAZ tertile all proteins no preterm not mixed feeding.csv",row.names = F)
write.csv(wazT,file = "data/WAZ tertile 1&3 all proteins no preterm not mixed feeding.csv",row.names = F)
write.csv(pomic.expm,file = "data/WAZ all proteins no preterm not mixed feeding.csv",row.names = F)
write.csv(pomic.exp,file = "data/WAZ all proteins no preterm.csv",row.names = F)
####### 过敏
table(pomic.p$allergy)
pomic.p$allergy <- ifelse(pomic.p$allergy==1,0,1)
p.allergy <- pomic.p %>%
select(id,allergy,2:473) %>%
rename(group=allergy)
write.csv(p.allergy,file = "data/allergy all proteins no preterm 1 and -1.csv",row.names = F)
|
a68b02489c87f8ffbf31a5e4afacddd67d3af863
|
143573c86cf4fd60d7e9868cf4a5c7af6b0b41bf
|
/man/write.xarf.Rd
|
4d0d16e47064151afb97f887adf29eccf3c2540a
|
[
"MIT"
] |
permissive
|
AlekseyBuzmakov/XARF
|
fc18dad500c1a356ef946ddb9708672c03b9fe3d
|
0d1b85213a91719cb23c14ac0dba0bed99188d54
|
refs/heads/master
| 2020-03-22T03:40:38.355542
| 2018-07-03T14:33:42
| 2018-07-03T14:33:42
| 139,445,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,542
|
rd
|
write.xarf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/xarf.R
\name{write.xarf}
\alias{write.xarf}
\title{Data Output to XARF}
\usage{
write.xarf(x, dbID, file = "", dbDescription = NULL,
attrDescription = NULL)
}
\arguments{
\item{x}{the data.frame to be written}
\item{dbID}{a unique identifier of the dataset}
\item{file}{either a character string naming a file or a connection open for writing. "" indicates output to the console.}
\item{dbDescription}{a list specifying the description of the dataset}
\item{attrDescription}{a vector of descriptions for every attribute}
}
\description{
Function saves a data.frame like objct to XARF format.
More details on format can be found in \url{https://bitbucket.org/realKD/realkd/wiki/model/data/xarf}.
}
\details{
The function takes the dataset and writes it to file \code{file}.
The names of the rows are written as the name attribute.
The attributes are written in the order given in \code{attrDescription}.
If an attribute is missing from \code{attrDescription}, it is written at the end of the dataset.
}
\examples{
write.xarf(mtcars, file="",
"mtcars",
dbDescription=list(
title="Motor Trend Car Road Tests",
version="1.0.0",
source="Basic R dataset 'mtcars'"),
attrDescription=list(
list(id="mpg",name="Miles/(US) gallon",type="real",description="The distance in miles a car can go with a single gallon of petrolium"),
list(id="cyl",name="Number of cylinders",type="integer"),
list(id="disp",name="Displacement (cu.in.)")
))
}
|
32b8e2b9f3ae70196c8667cca1a26ea41a38597f
|
ba0116945c4526fa79fda1015b04900cc9d02e8c
|
/sagar/kmean.R
|
38fb03ce7af236d7ff1bb99650f604678d01c8f5
|
[] |
no_license
|
sayanm-10/storm-data-insights
|
25767cd8ec8fda4d484232b119932203121a17b9
|
67115954828ec5ee924b685504712f9e25e80797
|
refs/heads/master
| 2020-03-13T08:45:26.070924
| 2018-05-09T14:24:46
| 2018-05-09T14:24:46
| 131,050,012
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
kmean.R
|
#################################################
# Company : Stevens Tech
# Project : Group Project
# Purpose : knn
# First Name : Sagar
# Last Name : Jain
# Id : 10429097
# Date : 04/20/2018
#################################################
#Clear the environment
rm(list=ls())
#Loading the data
storm <- read.csv("/Users/sagarjain/Desktop/kddm/project/sagar/storm.csv")
#Remove unwanted columns
storm<-storm[,-c(1,13)]
#To assign integer values to factors
storm$INJURIES<-factor(storm$INJURIES,levels = c("No","Yes"),labels = c(1,2))
storm$DEATHS<-factor(storm$DEATHS,levels = c("No","Yes"),labels = c(1,2))
storm$DAMAGE_PROPERTY<-factor(storm$DAMAGE_PROPERTY,levels = c("Low","Medium","High"),labels = c(1,2,3))
storm$DAMAGE_CROPS<-factor(storm$DAMAGE_CROPS,levels = c("Low","Medium","High"),labels = c(1,2,3));
?kmeans
#calculating Kmeans for Injurues
kmeans_injuries<- kmeans(storm[,-c(8)],2,nstart = 10)
kmeans_injuries
kmeans_injuries$cluster
table(kmeans_injuries$cluster,storm[,8])
#calculating kmeans for Death
kmeans_death<- kmeans(storm[,-c(9)],2,nstart = 10)
kmeans_death$cluster
table(kmeans_death$cluster,storm[,8])
#calculating Kmeans for Damage property
kmeans_property<- kmeans(storm[,-c(10)],3,nstart = 10)
kmeans_property$cluster
table(kmeans_property$cluster,storm[,10])
#calculating Kmeans for damage crops
kmeans_crops<- kmeans(storm[,-c(11)],3,nstart = 10)
kmeans_crops$cluster
table(kmeans_crops$cluster,storm[,11])
?kmeans
|
6a87517274fea8157993283c78b7fbe787a5b33f
|
cf8622557c2d10b6424b17e694da9fa2b13b47ec
|
/sim_axis/simulate_axis_results_87.R
|
6b7ff40cd266eb3302eb9da64502eb12837a88de
|
[
"MIT"
] |
permissive
|
silastittes/lasthenia_curves
|
0c0c7cb645abe27a2b58aa8e7aa01405e630dc58
|
d7bed64203f23a028e1ce737b395cecbc228b50d
|
refs/heads/master
| 2021-09-26T22:31:51.831345
| 2018-11-03T20:50:49
| 2018-11-03T20:50:49
| 140,039,078
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 36,492
|
r
|
simulate_axis_results_87.R
|
run_x <-
structure(list(lm_df = structure(list(term = c("maxima_sc", "stretch_sc",
"x_max_sc", "x_min_sc"), coef = c(-0.0835441254689772, 0.0352677425055282,
0.0486455684267596, -0.0733756920586411), p_value = c(0.0518997678042226,
0.0235679439589644, 0.135178263583774, 0.00430657139136283),
rand_var = c(3.46288656126394, 3.46288656126394, 3.46288656126394,
3.46288656126394), mean_rand = c(-0.419643221034459, -0.419643221034459,
-0.419643221034459, -0.419643221034459), sim_n = c(87L, 87L,
87L, 87L)), .Names = c("term", "coef", "p_value", "rand_var",
"mean_rand", "sim_n"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -4L)), lasso_df = structure(list(var = c("maxima",
"stretch", "x_max", "x_min"), mean = c(-0.0691944288105645, -0.00717418397616598,
0.182033292846807, -0.242815506890829), prop = c(0.0925, 0.1075,
0.1725, 0.0075), rand_var = c(3.46288656126394, 3.46288656126394,
3.46288656126394, 3.46288656126394), mean_rand = c(-0.419643221034459,
-0.419643221034459, -0.419643221034459, -0.419643221034459),
sim_n = c(87L, 87L, 87L, 87L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), summ_stan_df = structure(list(
shape1 = 3.64113343292168, shape2 = 2.91383010244913, stretch = 4.57988031536012,
x_min = -0.527963994587279, x_max = 4.69405612765934, maxima = 3.19728692234639,
rep = 87L), .Names = c("shape1", "shape2", "stretch", "x_min",
"x_max", "maxima", "rep"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -1L))), .Names = c("lm_df", "lasso_df",
"summ_stan_df"))
run_x <-
structure(list(lm_df = structure(list(term = c("maxima_sc", "stretch_sc",
"x_max_sc", "x_min_sc"), coef = c(-0.0835441254689772, 0.0352677425055282,
0.0486455684267596, -0.0733756920586411), p_value = c(0.0518997678042226,
0.0235679439589644, 0.135178263583774, 0.00430657139136283),
rand_var = c(3.46288656126394, 3.46288656126394, 3.46288656126394,
3.46288656126394), mean_rand = c(-0.419643221034459, -0.419643221034459,
-0.419643221034459, -0.419643221034459), sim_n = c(87L, 87L,
87L, 87L)), .Names = c("term", "coef", "p_value", "rand_var",
"mean_rand", "sim_n"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -4L)), lasso_df = structure(list(var = c("maxima",
"stretch", "x_max", "x_min"), mean = c(-0.0691944288105645, -0.00717418397616598,
0.182033292846807, -0.242815506890829), prop = c(0.0925, 0.1075,
0.1725, 0.0075), rand_var = c(3.46288656126394, 3.46288656126394,
3.46288656126394, 3.46288656126394), mean_rand = c(-0.419643221034459,
-0.419643221034459, -0.419643221034459, -0.419643221034459),
sim_n = c(87L, 87L, 87L, 87L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), summ_stan_df = structure(list(
shape1 = 3.64113343292168, shape2 = 2.91383010244913, stretch = 4.57988031536012,
x_min = -0.527963994587279, x_max = 4.69405612765934, maxima = 3.19728692234639,
rep = 87L), .Names = c("shape1", "shape2", "stretch", "x_min",
"x_max", "maxima", "rep"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -1L))), .Names = c("lm_df", "lasso_df",
"summ_stan_df"))
run_x <-
structure(list(lasso_df = structure(list(var = c("maxima", "stretch",
"x_max", "x_min"), mean = c(0.106205923797183, 0.108839706242091,
0.0795315905677505, -0.115092097396361), prop = c(0.1875, 0.1025,
0.18, 0.01), rand_var = c(3.46288656126394, 3.46288656126394,
3.46288656126394, 3.46288656126394), mean_rand = c(-0.419643221034459,
-0.419643221034459, -0.419643221034459, -0.419643221034459),
sim_n = c(87L, 87L, 87L, 87L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), lasso_depth = structure(list(
draw = 1:400, `(Intercept)` = c(-0.0680069191809077, -0.0525779819455739,
-0.0666130952380953, -0.0604239163331304, -0.0666130952380953,
-0.0666130952380953, -0.0584546976795232, -0.0666130952380953,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0538512593792992, -0.0666130952380953, -0.0212913611217475,
-0.0666130952380953, -0.0666130952380953, -0.0588305782659702,
-0.0654888482573592, -0.0627982929259626, -0.0666130952380953,
-0.0666130952380953, -0.0487010493287212, -0.0492024485233575,
-0.0666130952380953, -0.101127915894368, -0.0523030586109589,
-0.0666130952380953, -0.0666130952380953, -0.0978307944382482,
-0.074178830368722, -0.0581267988180302, -0.0666130952380953,
-0.0437501818013041, 0.00374195774727024, -0.0666130952380953,
-0.0382762850301985, -0.0666130952380953, -0.0666130952380953,
-0.0926663856204514, -0.0817791499121405, -0.0636439827788836,
-0.0693813055674814, -0.0666130952380953, -0.0666130952380953,
-0.0341085425476664, -0.0454202690105636, -0.00681256587256069,
-0.0530387902580315, -0.0433561006786133, -0.064142834983586,
-0.0588704006638073, -0.0547595622166364, -0.0634058591287103,
-0.0737499030517818, -0.0351818883111884, -0.0744589126022688,
-0.0479100726666247, -0.00379490500776646, -0.0292595676839448,
-0.0655719077125164, -0.0666130952380953, -0.117567189828654,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0270505578706547, -0.0666130952380953, -0.0787787137892682,
-0.0655656428427561, -0.0697541283243264, -0.0723229660356339,
-0.0614967245481939, -0.061541643725161, -0.0588379908876136,
-0.0239545774163583, -0.0460912053668645, -0.0514160954597438,
-0.0767792052300888, -0.0593699171829306, -0.0666130952380953,
-0.0666130952380953, -0.0793026743578104, 0.099403667740772,
-0.0343845953723881, -0.0666130952380953, -0.076436752957336,
-0.0666130952380953, -0.0666130952380953, -0.0752783496370758,
-0.0609388690448738, -0.0666130952380953, -0.0551104357483787,
-0.0603673714197941, -0.0479228310528571, -0.0661510978825487,
-0.0702660182925083, -0.0398822570637138, -0.0666130952380953,
-0.0623305274648305, -0.0654202512038361, -0.0501603285861884,
-0.0557675473093856, -0.0642287636627331, -0.0292671735464498,
-0.0666130952380953, -0.0630660476781669, -0.0666130952380953,
-0.0601012424350901, -0.0523965045547127, -0.0314483721979581,
-0.0524179633709794, -0.0727334766724062, -0.063050478547931,
-0.0570197136854183, -0.0475742364504763, -0.0666130952380953,
-0.0652413098833836, -0.0597504216716291, -0.0604245562733517,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0666130952380953, -0.0579376904307004, -0.0666130952380953,
-0.131728493637553, -0.0646239260028795, -0.0650417056733952,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0613056691821499, -0.0492395886776436, -0.0364485836559159,
-0.0437281928123868, -0.0666130952380953, -0.0597717576139179,
-0.0336390755390114, -0.060521914431699, -0.0662687049289203,
-0.0361234574100402, -0.0666130952380953, -0.0689843109301144,
-0.0666130952380953, -0.0663837059710371, -0.0737651745218816,
-0.0447274963261424, -0.0606180492933174, -0.111531069735521,
-0.0718005021884339, -0.0245213777587742, -0.0599449202085318,
-0.0538601876594991, -0.0733150839622052, -0.1015656499102,
-0.00684300759996156, -0.0587676232146741, -0.0614309486689104,
-0.0536260712317133, -0.0464086455759759, -0.0666130952380953,
-0.0666130952380953, -0.0666130952380953, -0.0310752236790739,
-0.0604599114201671, -0.0666130952380953, -0.0666130952380953,
-0.0684457116428743, -0.0172407690959876, -0.069625181801978,
-0.0666130952380953, -0.0238637903603282, -0.0666130952380953,
-0.0422183356791487, -0.0484710377591112, -0.0750092834817554,
-0.0577190154882847, -0.0733511867203564, -0.110382928671877,
-0.0565504616820692, -0.0523824669129135, -0.0615769353886407,
-0.0325330052450611, -0.0666130952380953, -0.0666130952380953,
-0.0618025749846246, -0.0972081630751926, -0.0660957401196496,
-0.0666130952380953, -0.0426123035461864, -0.0585303046794149,
-0.0498556812784727, -0.0564686862230523, -0.0770672326266426,
-0.0605286479196564, -0.0601639031636981, -0.0572044676140316,
-0.0601402413041661, -0.0666130952380953, -0.150142947914556,
-0.0397654788377745, -0.0666130952380953, -0.0666130952380953,
-0.0666130952380953, -0.0609759693472137, -0.0770457452443529,
-0.0666130952380953, -0.066310439011508, -0.0633107751718981,
-0.0666130952380953, -0.0480016139807024, -0.0589301257207596,
-0.030180501945054, -0.0666130952380953, -0.0607735978010731,
-0.0698155625516758, -0.0666130952380953, -0.0716726280386986,
-0.0494734891817157, -0.0666130952380953, -0.0651842654069508,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.042887714918332, -0.0666130952380953, -0.0701638263414902,
-0.0817631661144659, -0.0666130952380953, -0.00937158664001314,
-0.0575840886122575, -0.0666130952380953, -0.0666130952380953,
-0.0598591494213735, -0.0557038320033084, -0.0666130952380953,
-0.0590220649644606, -0.0528362168855462, -0.0426935850217279,
-0.0318183346762392, -0.0508572429320736, -0.0641616738802795,
-0.0666130952380953, -0.0207446424046446, -0.0666130952380953,
-0.0741577959529574, -0.0546720559720767, -0.0492880347925692,
-0.0666130952380953, -0.086149030244666, -0.0666130952380953,
-0.0662310806650532, -0.0666130952380953, -0.0915934663176024,
-0.0395240548859927, -0.0456365508143606, -0.0666130952380953,
-0.0172579722440983, -0.0691360173251981, -0.0441554101113419,
-0.0666130952380953, 0.0142018379317258, -0.0447018378049303,
-0.0666130952380953, -0.0760075887723022, -0.0850611637292269,
-0.0736469600930926, -0.0666130952380953, -0.0666130952380953,
-0.0666130952380953, -0.0541192268202119, -0.0536837514379698,
-0.0477417939628071, -0.058512477133855, -0.0666130952380953,
-0.0295882472264979, -0.065673530100199, -0.0666130952380953,
-0.0616779261590031, -0.0666130952380953, -0.053057665904927,
-0.0666130952380953, -0.0666130952380953, -0.0311382033645125,
-0.0666130952380953, -0.0298366530882728, -0.0745623608499884,
-0.091780222674061, -0.0666130952380953, -0.0666130952380953,
-0.0748749234872021, -0.0666130952380953, -0.0468513461171135,
-0.0559713833266192, -0.0629645780385528, -0.0666130952380953,
-0.0367876480990122, -0.0666130952380953, -0.045251824577524,
-0.0371427299300226, -0.0446894574208021, -0.0592797740825014,
-0.0666130952380953, -0.0666130952380953, -0.0426578811881878,
-0.0651753611079087, -0.0417693347464228, -0.0666130952380953,
-0.0431615506834774, -0.0568156342048831, -0.0364224821917102,
-0.0596457137371548, -0.0527007510815286, -0.0379708544120393,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0477472109870405, -0.0649816982447936, -0.084608535889807,
-0.0612563920853877, -0.0666130952380953, -0.0666130952380953,
-0.0787489119171483, -0.0554347848423182, -0.0666130952380953,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0666130952380953, -0.0666130952380953, -0.0793823205327554,
-0.0666130952380953, -0.0226178982751914, -0.0848547973031335,
-0.0704522025950701, -0.0602465243616286, -0.0658987230179029,
-0.0666130952380953, -0.0666130952380953, -0.0647066008170651,
-0.0444064058820672, -0.0666130952380953, -0.0666130952380953,
-0.0708140488468511, -0.0666130952380953, -0.0666130952380953,
-0.0450755910260887, 0.00240692388114627, -0.059701927212519,
-0.0666130952380953, -0.0666130952380953, -0.0785393038412623,
-0.0702450889331666, -0.0625339533640636, -0.0666130952380953,
-0.0600381437865471, -0.0324600515405212, -0.0510360062617457,
-0.0525001215338692, -0.0666130952380953, -0.0608614223565599,
-0.0700392468546845, -0.0666130952380953, -0.0546951177824598,
-0.0666130952380953, -0.0648626547111476, -0.0666130952380953,
-0.0527718753975726, -0.0604437175509632, -0.0441706397836825,
-0.0666130952380953, -0.0538857057689086, -0.0666130952380953,
-0.07744341580101, -0.0325407567282728, -0.0666130952380953,
-0.0676626588794269, -0.0666130952380953, -0.0362834585304165,
-0.0666130952380953, -0.0666130952380953, -0.0743658352598106,
-0.0656135579891761, -0.0666130952380953, -0.0666130952380953,
-0.0397766333551095, -0.0547507889724661, -0.0577983379684555,
-0.0666130952380953, -0.0807684841400622, -0.0666130952380953,
-0.0666130952380953, -0.0666130952380953, -0.0666130952380953,
-0.0421889593450021, -0.0046432752729088, -0.0666130952380953,
-0.0666130952380953, -0.0589502853593406), stretch_sc = c(0.0213546472302334,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0782075347905746, 0, 0.0105597638524953,
0, 0, 0, 0, 0, 0, 0, 0.0122985922817906, 0.0431838861106742,
0, 0.0306500572944704, 0, 0, 0, 0.0333818937842072, 0.0258171107344038,
0.00821591858956543, 0, 0, 0.0118154465931337, 0, 0.0380034436113531,
0, 0, 0.0353159960488084, 0.0679418360369808, 0, 0, 0, 0,
0.053078585740234, 0, 0.018392336397189, 0.0312923623553551,
0.0077944223526142, 0, 0, 0.0163507888235917, 0, 0.000507389661685978,
0.0621528959867679, 0.0335994433456686, 0.00588279223228634,
0.038907051679227, -0.00111595727306711, 0, 0, 0.0510734640067122,
0, 0, 0, 0.00629427886077594, 0, 0.0356151309747397, 0.0367227485286787,
0, 0.0186414217593499, 0.0457213083982528, 0, 0, -0.0760821240208864,
0, -0.0568504713691807, -0.0101605840987422, 0.0313423654637691,
0, 0, 0.0153264896985072, -0.0499307470687317, -0.0325929572197761,
0, 0.0361888731984225, 0, 0, 0.0321021940630417, 0, 0, 0.0345462629592249,
0.0507067739982562, 0, 0.0105576287560699, 0, 0, 0, 0.00197825246996031,
0, 0.0159368598852146, 0.0689775733305701, 0, 0.0370704470141431,
0, 0, 0, 0.0176158847785572, 0.0176741732962327, 0.00543422400900112,
0, 0.0327191824908173, 0, 0.0413494533680188, 0, 0, 0.043942007712094,
0, 0.0413630234640725, 0, 0, 0, 0, 0, 0, 0.0696972413201581,
0, 0, 0, 0, 0, 0, 0.0113881298977518, 0, 0, 0, 0, 0, 0, 0.0746246175264124,
0.0496176224131823, 0, 0, 0, 0, 0.0244180860441373, 0.0172159656948204,
0, 0.0544253582076241, 0.0264755151200747, 0.045979194566166,
0, 0, 0.00223628483758589, 0.0251726256231141, 0.0139217341247475,
-0.00680428565703541, 0, 0, 0.0412617472943413, 0, 0, 0,
0.0247815127572229, 0.0046806953915934, 0, 0, 0, 0.0454843039923968,
0.0256473394871719, 0, 0, 0, 0.0482099806217423, 0.00822231108738782,
0, 0, 0.0274677679556307, 0.0940250173631695, 0, 0.0287412111472497,
0, 0, 0, 0, 0, 0.0843569894838108, 0.0227314938739312, 0,
0.0433159693235582, 0, 0, 0, 0.0195163488020371, 0, 0.0117057653016849,
0, 0, 0, 0.0680526461757134, 0.0190071176552177, 0, 0, 0,
0.0262553934402669, 0.0172217383283043, 0, 0.0220607963750974,
0.0147778125004214, 0, 0.0437791335624697, 0, 0.0350508724507245,
0, 0, 0.0163659571032953, 0, 0.0347919590605872, 0.0264582085451993,
0, 0.0162907778897589, 0, 0, 0, 0.0543834290308378, 0, -0.0206487668773617,
0.00664277823553612, 0, 0.0644640919925148, 0.0553320624834839,
0, 0, 0, 0.00999253718095009, 0, 0, 0.0356437673491373, 0.0141447179907039,
0.0479873766474914, 0.0466837992625525, 0.00667523070686181,
0, 0.0406048861844932, 0, 0, 0.0138990368737238, 0.0344823428348119,
0, 0, 0, 0, 0, 0.0138185294879379, 0.0473010151860488, 0.016920984420083,
0, 0.0440095525344126, 0.0439615295237087, 0.0406868798094353,
0, 0.11431893907717, 0.0354595120264165, 0, 0, 0.0279961318736022,
0.0239752488087428, 0, 0, 0, 0.00727768690967668, 0.013927077878362,
0.0248606474711813, 0.0256115054969005, 0, -0.012674690742121,
0, 0, 0.0246823542607023, 0, 0.0264368372947173, 0, 0, 0.0525731073819534,
0, 0.0307143979125612, 0.0661027339148527, 0.0804101679938427,
0, 0, 0.0275352723152288, 0, 0.0242818640640519, 0.0244426587639165,
0.0155070152086104, 0, 0.0200958111330707, 0, 0.0583348820979376,
0.0397478644709504, 0, 0, 0, 0, 0.077490917682339, 0, 0.0467193866299883,
0, 0.054572017933992, 0.0220303617685204, -0.0109506347460411,
0, 0, 0.0553282443485292, 0, 0, 0, 0.0405838489233216, 0,
0.051339287562605, 0, 0, 0, 0.0527388247090429, 0, 0, 0,
0, 0, 0, 0, 0.0532685385265411, 0, 0.0202624805730131, 0.0564832865869019,
0.00795549081326069, 0, 0, 0, 0, 0, 0.0137772435810718, 0,
0, 0.0301290625969906, 0, 0, 0.0440637812213186, 0.0662992819338787,
0.0129850438361935, 0, 0, 0.0266600962804208, 0.0505054868840197,
0, 0, 0, 0.0560380213540731, 0.0317565316537403, 0, 0, 0.0043569990058871,
0.0330306364797615, 0, 0, 0, 0.0472918784776929, 0, 0.0209119201089168,
0.0385898668435822, 0.0393007441984195, 0, 0.0895682578089564,
0, 0.0367936627635798, 0.0454240473825874, 0, 0.00309732638072131,
0, 0.0417883999404238, 0, 0, 0.0459053817844359, 0, 0, 0,
0.0279767174328075, 0, 0, 0, 0.0443713086601673, 0, 0, 0,
0, 0.0159834113164315, 0.092678815367198, 0, 0, 0), maxima_sc = c(0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.200556637002534, 0,
0, 0, 0, 0, 0, 0, -0.0190447397430233, 0, 0, 0, 0, 0, 0,
0, -0.0359711657154592, 0, 0, 0, -0.151273116939408, 0, -0.139330119020552,
0, 0, -0.0296158473355692, 0, 0, 0, 0, 0, -0.0417179076092917,
-0.0161069974622325, -0.0749542481580482, 0, -0.0887634861184027,
0, 0, 0, 0, -0.0416966198309481, -0.0613672208416581, 0,
0, -0.337815450789827, -0.148027214930148, 0, 0, -0.104615403286479,
0, 0, 0, -0.106346537038896, 0, 0, 0, 0, 0, 0, 0, 0, -0.233748503976384,
0, -0.0134916185706073, 0, 0, 0, 0, -0.138168143261987, -0.173984168164317,
-0.0386001206439953, 0, -0.0810955719638333, 0, 0, 0, 0,
0, 0, -0.100847317451419, 0, 0, 0, 0, 0, 0, 0, 0, 0.0632184588989326,
0, -0.309751414109214, 0, 0, 0, 0, 0, -0.097333146334677,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0550378711239535,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.116325639524691,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.129208456363197,
-0.179245039217553, 0, 0, -0.171806676278009, 0, 0, 0, 0,
0, 0, 0, 0, -0.0336439176656599, 0, 0, 0, 0, -0.162927833773197,
0, 0, 0, 0, -0.0961357290639194, 0, 0, 0, 0, 0, 0, 0, -0.0765163258679632,
0, 0, -0.129284288167959, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.0057134877988027, 0, 0, 0.0102471994271524,
0, 0, 0.0162042665487743, 0.00719441332064075, 0, 0.00900879456108547,
-0.0385619311844307, 0, 0.0180714074877232, 0, 0, -0.0424841305469968,
0, 0, 0, 0, 0, 0.0142639376355293, 0, 0, 0.0177121282621066,
-0.045809385109604, 0, 0, 0.0141989667366524, 0.000662338211113475,
0, -0.0358953699191424, 0, 0, 0.0175023558375995, 0, 0.000590451921075793,
0, 0.005788198782746, 0, 0, 0, -0.0321724473516533, 0, 0,
0, 0, 0, -0.0554496091268746, 0.00632601632015868, 0, 0,
0.012712471671865, 0.0154618678082636, 0.00825009652981887,
0, 0.021408320022951, 0, 0, 0, -0.0248702067882746, 0.0317325153325458,
0, 0, 0, 0, 0.00753125789167563, 0, 0.0283923400956219, 0,
-0.0797745496257212, 0, 0, 0, 0, 0, 0, 0, -0.129509921279577,
0, -0.0708677974264562, 0.00359481250453923, 0, 0, 0, -0.0528240419908927,
0, 0.00672062080229202, 0, 0.00997730392746862, 0, 0.00118250350101174,
0, 0, 0, 0, 0, 0, 0, -0.171727909796498, 0.00378920676304837,
0, 0, -0.262091343588638, 0, -0.172697988549104, 0.0124475488721332,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00672846783674653, 0,
0, 0, 0, 0, 0, 0, -0.114153821696557, 0, -0.178449542723685,
-0.20757077324404, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.0911374322246784,
0, 0, 0, -0.0619067392877401, 0, 0, 0, -0.073393318809089,
0, 0, 0, 0, 0, 0, 0.0152922332657052, 0, 0, 0.00854020723240683,
0, 0.00426688996561378, 0, 0, 0, 0.0107357406723146, 0, 0,
0, 0.0137203973681666, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.00239037231952961,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0196446877864873, 0,
0, 0), x_max_sc = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.121742851453408, 0, 0, 0, 0, 0, 0, 0, -0.0356713466242046,
-0.0265934358203086, 0, 0, 0, 0, 0, 0.00551129790101666,
-0.0408789266348747, 0, 0, -0.0178002119469577, -0.0753214277163844,
0, 0.0865110540660369, 0, 0, -0.0130091941440353, 0.0191190831946547,
0, 0, 0, 0, 0, -0.0123353332512219, -0.0504651862935457,
-0.0157442718690318, 0, 0, 0, 0, 0, -0.0154816429742862,
0, 0, 0.000182971871178654, 0.161727952661905, 0.0450798572727341,
0, 0, 0.0594258553278637, 0, 0, 0, 0.0212800208657947, 0,
0, 0.0402694841982105, 0, 0, 0, -0.00583075011987247, 0,
0.0819792685200424, 0, -0.0623467663225429, -0.0659407841965443,
0.00130200275445809, 0, 0, 0.0935007203630625, -0.253259321069567,
-0.154265194051883, 0, 0.0315156647079132, 0, 0, 0, 0, 0,
0, 0, -0.0376663636997422, 0, 0, 0, 0, -0.0460837848226699,
0, 0.00718467519254593, 0.0897055243101885, 0, 0.210966152010732,
0, 0, 0, 0, 0.0208138331932129, 0.0345759537296373, 0, 0,
0.0382878224014585, 0, -0.0232382473316953, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -0.026230209312896, 0, 0.00847823513267838,
-0.0176973376064043, 0, 0, 0, 0, 0.144474917780754, -0.0414082257752962,
0, 0, 0, 0.0234518108388105, 0, 0.0394733106740863, 0, 0,
0.00305700535714775, -0.0436813889216206, 0, 0, 0.0215460943440699,
0.0690076894334068, 0.0632684960064022, 0.085395714313035,
0, 0, 0.0839682070180933, 0, 0, 0, 0, 0, 0, 0, 0, -0.0500324858485464,
0, 0, 0, 0, 0.115151615856595, 0, 0, 0, 0, 0.0639452318074097,
0, -0.0798713515358508, 0, 0, 0, 0, 0, 0.0374171043834073,
0, 0, 0.0920048778024359, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0158080092179486,
0, 0, 0, 0.0317123099596297, 0, 0, 0, 0.0103109942760036,
0, -0.0156175154777943, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, -0.0227994928031389, 0, 0, -0.00118131465634652,
0, 0, 0, 0.0106789745041685, 0, 0, 0.028936645272221, 0.02208475394458,
0, -0.0399064777467966, 0.0337080683154963, 0, 0, 0, 0, 0.0273355008391142,
-0.00319943947756276, 0, 0, 0, 0.0155668053452587, 0, 0,
0, -0.018781553148025, 0, 0, 0.00552241176009076, 0, 0, 0.0347733018815195,
0, 0, 0, 0, -0.0222174978175335, 0, 0, 0, 0.0224381922951927,
0, -0.021955884746776, 0.00144003877399278, 0, 0.0234060585875118,
0, 0, -0.0609171062487514, 0, 0, 0, 0, 0.105691375825273,
0, 0, 0.029352688780721, 0.0132636458665752, 0, 0, -0.021801474376867,
0, 0.000104785231711009, 0.0200189274811031, 0, 0, 0.0145479853032333,
0, 0, 0.0251250877118424, 0, 0, 0, 0, 0.185577853228089,
0, 0, 0, 0.225817674807354, 0.0153172625518291, 0.170259904159936,
0, 0, 0.0224744421528872, 0, 0, 0, 0.0215102994605086, 0,
0.0137518942659391, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.122738181962601,
0, 0.149546970269863, 0.149842987774064, 0, 0, 0, 0, 0, 0,
0.00598607747314934, 0, 0, 0.0569500938646281, 0, 0, 0.00566674701959121,
0.0826216716122193, 0, 0, 0, 0.0506390466144339, 0.0124439511532164,
0.00209477413097196, 0, 0, 0, 0, 0, 0, 0.00134414865222856,
0, 0, 0, 0, 0.0396229926422484, 0, 0, 0.0147849164297124,
0.0197150043261554, 0, 0.0233663609262777, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0.00871203272246896, 0, 0, 0, 0.0133903524094804,
0.0235888206492377, 0, 0, 0.0117679105377624, 0, 0, 0, 0,
0.00371800860245391, 0.0212828129770899, 0, 0, 0.000144711795410012
), x_min_sc = c(-0.0433977978111986, -0.037022333909806,
0, -0.0126980521360969, 0, 0, -0.0324499365419276, 0, 0,
0, 0, 0, 0, -0.0884417917733361, 0, 0, -0.0276113615861828,
-0.0165792390720809, -0.0149611204584604, 0, 0, -0.118005034353122,
-0.0547753315935704, 0, -0.0342966317509926, -0.0385535047133658,
0, 0, -0.0242910368543919, -0.0830110891110079, -0.032168294845918,
0, -0.057039496047047, -0.114205231609559, 0, -0.101239600811682,
0, 0, -0.0626866134325487, -0.0378410653079622, -0.0166252703659279,
-0.0118737962216659, 0, 0, -0.0710993411558749, -0.0562027711583185,
-0.11787872176971, -0.0425452956904461, -0.109607663061347,
-0.0186730000328438, -0.0352736134685927, -0.0290930157114081,
-0.0206679678416502, -0.0529576279833907, -0.0844991711903495,
-0.0479039628492655, -0.0407655398288681, -0.13908507063625,
-0.117119880881184, -0.0273712658568516, 0, -0.073254432036207,
0, 0, 0, -0.105466946435514, 0, -0.019058643547029, -0.0196949303325041,
-0.0189466341848053, -0.0170100493910313, -0.0267302461187671,
-0.03039943653335, -0.0332083968592728, -0.0834318965784096,
-0.0403995110364155, -0.0457058623091914, -0.0663350750820877,
-0.0299980504868031, 0, 0, -0.0593770354779633, -0.0685015337295176,
-0.0879782166359669, 0, -0.0773578228070954, 0, 0, -0.0389967922648104,
-0.0228300498409603, 0, -0.0494271060517062, -0.0787221128167931,
-0.0559877552547482, -0.0216469540553648, -0.0200910779685323,
-0.0509148464496529, 0, -0.0585480485955023, -0.0282269437492589,
-0.0445353214530601, -0.0323521408756787, -0.0179501310658199,
-0.108110017697109, 0, -0.0053822141828333, 0, -0.0229497727179725,
-0.0421658049390137, -0.0982497510173035, -0.0406674306375489,
-0.024442283078203, -0.0350595760377154, -0.027487313045333,
-0.0446538548926882, 0, -0.0283755092106293, -0.0282772978978828,
-0.0346017501950722, 0, 0, 0, 0, -0.0347035580031419, 0,
0, -0.0446140769542635, -0.0205130342871637, 0, 0, 0, -0.0408923926334249,
-0.0361242632747472, -0.0563382042223051, -0.0405363793042339,
0, -0.0369222499333306, -0.0452672274143006, -0.0137660293349386,
-0.0549106439906691, -0.0490919647129289, 0, -0.0295820747102043,
0, -0.0325695398026911, -0.0354409925420791, -0.0517401901582949,
-0.0158630847315773, -0.00315155034453143, -0.00936203358181622,
-0.0414236232584262, -0.035927707507295, -0.0296113811423056,
-0.0095847476758757, -0.0242140894250012, -0.139346529796289,
-0.109265114374473, -0.0196756695316025, -0.033809738371823,
-0.112737799537232, 0, 0, 0, -0.0568552974627711, -0.0208266304016953,
0, 0, -0.0279193579113713, -0.0702708630688188, -0.0202933545180519,
0, -0.0557273560381436, 0, -0.142927201010256, -0.0377160453705698,
-0.0232333813266858, -0.0187431273561933, -0.0214793671013568,
-0.0557301282769586, -0.0316447146008915, -0.106294593325588,
-0.0187586667633117, -0.053661544344068, 0, 0, -0.0173502455801449,
-0.0572913978070236, -0.0295561988098118, 0, -0.0739760465906372,
-0.0322367129762376, -0.0298638267826958, -0.0326256774475542,
-0.0283373334890878, -0.0326515246816049, -0.0212277697551489,
-0.0207273838681294, -0.0378932245449321, 0, -0.0225428520910576,
0, 0, 0, 0, -0.0317190342903547, 0, 0, -0.0272127350488671,
-0.00810288309906322, 0, -0.0241453790247292, 0, -0.0154657813233795,
0, 0, -0.0573778081482544, 0, -0.0249373018340001, -0.00894103605425922,
0, -0.0723538880946444, 0, 0, 0, -0.0166145008052554, 0,
-0.0233680103706337, -0.0454740874248569, 0, 0.0011271392145662,
-0.0543630562976568, 0, 0, 0, 0, 0, -0.0616264092171402,
-0.0292927570977314, 0, 0.0139271657000048, -0.0757856936067436,
-0.0227368913539256, 0, -0.0078646350998936, 0, -0.0257960763217383,
-0.0287852826981817, -0.0343790695116755, 0, -0.0229070862379536,
0, 0, 0, -0.0587235968084139, -0.0186049084149524, -0.063398099630419,
0, 0, -0.0370154031082641, -0.0177419950294842, 0, -0.0201246554797852,
0, 0, -0.0230334785792435, -0.0450227030104788, -0.0200441287067618,
0, 0, 0, 0, 0, -0.0503704310575096, -0.0190489892283018,
0, -0.100427794158883, -0.0187341754292917, 0, -0.0688672034737189,
0, -0.0165123745256843, 0, 0, -0.0343212619295638, 0, -0.071519568795201,
-0.00701710726803736, -0.0207116940706478, 0, 0, -0.0815087747501751,
0, 0, -0.0324621870397958, 0, 0, 0, 0, 0, -0.0223787771155636,
-0.0396960656630579, -0.0370813412054863, 0, 0, -0.0821522459668853,
-5.01908489854484e-05, -0.00378867419078883, 0, -0.154335264679118,
-0.0124645023323722, -0.106839161566875, -0.00252509963485653,
-0.0403811796076733, 0, 0, 0, 0, -0.060751377941934, -0.0405620109174947,
-0.0275301972729166, -0.0188766987358542, 0, 0, -0.0364448016131874,
-0.0201592447017298, 0, 0, 0, 0, 0, 0, -0.0705557687737699,
0, -0.0468638166731219, -0.118272055524433, -0.006361707161286,
-0.0237083020213555, -0.0183141726844018, 0, 0, -0.0226780623214973,
-0.0392589170133786, 0, 0, -0.0732475883167977, 0, 0, -0.0340608716135162,
-0.0294278278070295, -0.0290532000842291, 0, 0, -0.069803336531426,
-0.0326331450390884, -0.0273240843826592, 0, -0.0201140534092084,
-0.0215368787344871, -0.0273683576068298, -0.0227751541806039,
0, -0.0283389984861146, -0.00453560560570672, 0, -0.0334463515047625,
0, -0.0445670276808785, 0, -0.0397905389564646, -0.0237122231456569,
-0.0384612974804523, 0, -0.0109039316311105, 0, -0.0167681297193736,
-0.0209909676940585, 0, 0, 0, -0.0453725575680251, 0, 0,
-0.0202480069780526, 0, 0, 0, -0.0360282914309926, -0.0257750511826921,
-0.0342116061561723, 0, -0.0276476899566191, 0, 0, 0, 0,
-0.0476147908507956, -0.0343799038266395, 0, 0, -0.0421117667514446
)), row.names = c(NA, -400L), class = c("grouped_df", "tbl_df",
"tbl", "data.frame"), .Names = c("draw", "(Intercept)", "stretch_sc",
"maxima_sc", "x_max_sc", "x_min_sc"), vars = "draw", drop = TRUE, indices = list(
0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L,
14L, 15L, 16L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L,
26L, 27L, 28L, 29L, 30L, 31L, 32L, 33L, 34L, 35L, 36L, 37L,
38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L, 46L, 47L, 48L, 49L,
50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L, 59L, 60L, 61L,
62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L, 73L,
74L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L,
86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 97L,
98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 106L, 107L,
108L, 109L, 110L, 111L, 112L, 113L, 114L, 115L, 116L, 117L,
118L, 119L, 120L, 121L, 122L, 123L, 124L, 125L, 126L, 127L,
128L, 129L, 130L, 131L, 132L, 133L, 134L, 135L, 136L, 137L,
138L, 139L, 140L, 141L, 142L, 143L, 144L, 145L, 146L, 147L,
148L, 149L, 150L, 151L, 152L, 153L, 154L, 155L, 156L, 157L,
158L, 159L, 160L, 161L, 162L, 163L, 164L, 165L, 166L, 167L,
168L, 169L, 170L, 171L, 172L, 173L, 174L, 175L, 176L, 177L,
178L, 179L, 180L, 181L, 182L, 183L, 184L, 185L, 186L, 187L,
188L, 189L, 190L, 191L, 192L, 193L, 194L, 195L, 196L, 197L,
198L, 199L, 200L, 201L, 202L, 203L, 204L, 205L, 206L, 207L,
208L, 209L, 210L, 211L, 212L, 213L, 214L, 215L, 216L, 217L,
218L, 219L, 220L, 221L, 222L, 223L, 224L, 225L, 226L, 227L,
228L, 229L, 230L, 231L, 232L, 233L, 234L, 235L, 236L, 237L,
238L, 239L, 240L, 241L, 242L, 243L, 244L, 245L, 246L, 247L,
248L, 249L, 250L, 251L, 252L, 253L, 254L, 255L, 256L, 257L,
258L, 259L, 260L, 261L, 262L, 263L, 264L, 265L, 266L, 267L,
268L, 269L, 270L, 271L, 272L, 273L, 274L, 275L, 276L, 277L,
278L, 279L, 280L, 281L, 282L, 283L, 284L, 285L, 286L, 287L,
288L, 289L, 290L, 291L, 292L, 293L, 294L, 295L, 296L, 297L,
298L, 299L, 300L, 301L, 302L, 303L, 304L, 305L, 306L, 307L,
308L, 309L, 310L, 311L, 312L, 313L, 314L, 315L, 316L, 317L,
318L, 319L, 320L, 321L, 322L, 323L, 324L, 325L, 326L, 327L,
328L, 329L, 330L, 331L, 332L, 333L, 334L, 335L, 336L, 337L,
338L, 339L, 340L, 341L, 342L, 343L, 344L, 345L, 346L, 347L,
348L, 349L, 350L, 351L, 352L, 353L, 354L, 355L, 356L, 357L,
358L, 359L, 360L, 361L, 362L, 363L, 364L, 365L, 366L, 367L,
368L, 369L, 370L, 371L, 372L, 373L, 374L, 375L, 376L, 377L,
378L, 379L, 380L, 381L, 382L, 383L, 384L, 385L, 386L, 387L,
388L, 389L, 390L, 391L, 392L, 393L, 394L, 395L, 396L, 397L,
398L, 399L), group_sizes = c(1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L), biggest_group_size = 1L, labels = structure(list(
draw = 1:400), row.names = c(NA, -400L), class = "data.frame", vars = "draw", drop = TRUE, .Names = "draw")),
summ_stan_df = structure(list(shape1 = 3.6358787682407, shape2 = 2.94700878295665,
stretch = 4.68608397140926, x_min = -0.538709853860826,
x_max = 4.68994099660417, maxima = 3.17172654840587,
rep = 87L), .Names = c("shape1", "shape2", "stretch",
"x_min", "x_max", "maxima", "rep"), class = c("tbl_df", "tbl",
"data.frame"), row.names = c(NA, -1L))), .Names = c("lasso_df",
"lasso_depth", "summ_stan_df"))
run_x <-
structure(list(lasso_df = structure(list(var = c("maxima", "stretch",
"x_max", "x_min"), mean = c(0.106205923797183, 0.108839706242091,
0.0795315905677505, -0.115092097396361), prop = c(0.1875, 0.1025,
0.18, 0.01), rand_var = c(3.46288656126394, 3.46288656126394,
3.46288656126394, 3.46288656126394), mean_rand = c(-0.419643221034459,
-0.419643221034459, -0.419643221034459, -0.419643221034459),
sim_n = c(87L, 87L, 87L, 87L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), lasso_depth = structure(list(
var = c("maxima", "stretch", "x_max", "x_min"), mean = c(-0.0132860118186162,
0.0133030227868278, 0.00637148969674952, -0.0249576408196899
), prop = c(0.0925, 0.4075, 0.21, 0.005), rand_var = c(3.46288656126394,
3.46288656126394, 3.46288656126394, 3.46288656126394), mean_rand = c(-0.419643221034459,
-0.419643221034459, -0.419643221034459, -0.419643221034459
), sim_n = c(87L, 87L, 87L, 87L)), .Names = c("var", "mean",
"prop", "rand_var", "mean_rand", "sim_n"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -4L)), summ_stan_df = structure(list(
shape1 = 3.6358787682407, shape2 = 2.94700878295665, stretch = 4.68608397140926,
x_min = -0.538709853860826, x_max = 4.68994099660417, maxima = 3.17172654840587,
rep = 87L), .Names = c("shape1", "shape2", "stretch", "x_min",
"x_max", "maxima", "rep"), class = c("tbl_df", "tbl", "data.frame"
), row.names = c(NA, -1L))), .Names = c("lasso_df", "lasso_depth",
"summ_stan_df"))
|
8ef4c987a69d419af73318b273353415ea412821
|
3356b120292c623e49a699dc86a762ae2c8fd15b
|
/R/refit.R
|
316cdf8e140ecd0365a91983adee788a1632cdf2
|
[] |
no_license
|
curso-r/ggilberto
|
7643bda025153e5d9ebaaac74294b8d709f76519
|
b1bb97b8344ebb5013b77b88d36f8998729f5e29
|
refs/heads/master
| 2020-12-30T15:29:19.692576
| 2017-06-16T16:32:55
| 2017-06-16T16:32:55
| 91,142,328
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 308
|
r
|
refit.R
|
#' Refit a model
#'
#' Refits a model changing the response variable
#'
#' @param model a model object - currently working for lm and glm
#' @param y new response variable
refit <- function(model, y) {
x <- as.data.frame(stats::model.matrix(model))
x$`.y` <- y
stats::update(model, .y ~ ., data = x)
}
|
4716043bba41495921258b829c0a81fcb2745dcd
|
7316adbd7eadd5da898e1782f5789bde955ce281
|
/man/pairs.boot.Rd
|
aaafa37487a0fb7dc16cb1838af9bf7a4e2e2a85
|
[] |
no_license
|
rdpeng/simpleboot
|
916cd502f2a46c23796a0e74c5bcc4eadda1cf8e
|
675b343014a8e54b94fa131b56e7cd1f0d65cde8
|
refs/heads/master
| 2021-01-10T20:59:22.322915
| 2019-01-29T16:28:14
| 2019-01-29T16:28:14
| 121,568
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,048
|
rd
|
pairs.boot.Rd
|
\name{pairs_boot}
\alias{pairs_boot}
\title{
Two sample bootstrap.
}
\usage{
pairs_boot(x, y = NULL, FUN, R, student = FALSE, M, weights = NULL, ...)
}
\description{
\code{pairs.boot} is used to bootstrap a statistic which operates on
two samples and returns a single value. An example of such a
statistic is the correlation coefficient (i.e. \code{cor}).
Resampling is done pairwise, so \code{x} and \code{y} must have the
same length (and be ordered correctly). One can alternatively pass a
two-column matrix to \code{x}.
}
\arguments{
\item{x}{Either a vector of numbers representing the first sample or a
two column matrix containing both samples.}
\item{y}{If NULL it is assumed that \code{x} is a two-column matrix.
Otherwise, \code{y} is the second sample.}
\item{FUN}{The statistic to bootstrap. If \code{x} and \code{y} are
separate vectors then \code{FUN} should operate on separate
vectors. Similarly, if \code{x} is a matrix, then \code{FUN} should
operate on two-column matrices. \code{FUN} can be either a quoted
string or a function name.}
\item{R}{The number of bootstrap replicates.}
\item{student}{Should we do a studentized bootstrap? This requires a
double bootstrap so it might take longer.}
\item{M}{If \code{student} is set to \code{TRUE}, then \code{M} is the
number of internal bootstrap replications to do.}
\item{weights}{Resampling weights.}
\item{...}{Other (named) arguments that should be passed to \code{FUN}.}
}
\value{
An object of class \code{"simpleboot"}, which is almost identical to the
regular \code{"boot"} object. For example, the \code{boot.ci}
function can be used on this object.
}
\examples{
library(boot)
set.seed(1)
x <- rnorm(100)
y <- 2 * x + rnorm(100)
boot.cor <- pairs_boot(x, y, FUN = cor, R = 1000)
boot.ci(boot.cor)
## With weighting
set.seed(20)
w <- (100:1)^2
bw <- pairs_boot(x, y, FUN = cor, R = 5000, weights = w)
boot.ci(bw, type = c("norm", "basic", "perc"))
}
\keyword{univar}
\author{Roger D. Peng}
|
74cda106b83e7a6695463d932ae3c2f44eb4821c
|
b1a12b171097fcb0b2a6f7a10e0ab7afdf41aac1
|
/R/sharedGenerics.R
|
4cd19159495b39913509972d5f792cbb9591cc68
|
[] |
no_license
|
myndworkz/rAmCharts
|
7e1d66002cbca9ef63e1d2af6b4e49a1ac7cd3c3
|
6ea352cab2c9bc5f647447e5e7d902d9cbec0931
|
refs/heads/master
| 2021-01-14T13:06:28.947936
| 2015-07-29T12:34:13
| 2015-07-29T12:34:13
| 39,955,321
| 1
| 0
| null | 2015-07-30T14:37:43
| 2015-07-30T14:37:43
| null |
UTF-8
|
R
| false
| false
| 1,425
|
r
|
sharedGenerics.R
|
# Shared by AmGraph and DataSet
#' @exportMethod setDataProvider
setGeneric( name = "setDataProvider",
def = function(.Object, dataProvider, keepNA = TRUE) { standardGeneric("setDataProvider") } )
# Shared by AmGraph and ValueAxis
#' @exportMethod setTitle
setGeneric( name = "setTitle", def = function(.Object, title){ standardGeneric("setTitle") } )
# Shared by AmGraph and AmChart
#' @exportMethod setType
setGeneric( name = "setType", def = function(.Object, type) { standardGeneric("setType") } )
# Shared by AmChart and ChartScrollbar
#' @exportMethod setGraph
setGeneric( name = "setGraph",
def = function(.Object, graph = NULL, ...) {standardGeneric("setGraph")} )
# Shared by AxisBase and AmChart
#' @exportMethod addGuide
setGeneric(name = "addGuide", def = function(.Object, guide = NULL, ...){ standardGeneric("addGuide") } )
# Shared by Title and Label
#' @exportMethod setText
setGeneric(name = "setText", def = function(.Object, text){ standardGeneric("setText") } )
# Shared by AmChart(type = "gantt"), TrendLine and Guide
#' @exportMethod setValueAxis
setGeneric(name = "setValueAxis",
def = function(.Object, valueAxis = NULL, ...){ standardGeneric("setValueAxis") } )
# Shared by TrendLine and Guide
#' @exportMethod addValueAxis
setGeneric( name = "addValueAxis",
def = function(.Object, valueAxis = NULL, ... ) { standardGeneric("addValueAxis") } )
|
a87174f6bfdbb802c85972ced2223f7bcf2d3075
|
14b4279e536da585ebe7bb6cd0660f3fe6d66d5d
|
/R/npphen-package.R
|
de59d45d62f148d8cf871fc4eb25c14bd627e3d9
|
[] |
no_license
|
cran/npphen
|
52a5f14e00f408d0332875636965fd40e500a515
|
b6ebb410a5fe1080d3487843a4692699b4f5a3cb
|
refs/heads/master
| 2022-06-14T00:01:01.440633
| 2022-06-03T20:50:02
| 2022-06-03T20:50:02
| 101,988,150
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,610
|
r
|
npphen-package.R
|
#' npphen
#' @name npphen
#' @docType package
#' @encoding UTF-8
#' @description The functions in this package estimate the expected annual phenological cycle from time series or raster stack of vegetation (greenness) indexes. The algorithm to estimate the annual phenological cycle (used by the functions in npphen) uses a bivariate kernel density estimator in the index-time space. In this space, the x-axis corresponds to days of the growing season (1-365) and the y-axis to the vegetation index values, which range's values are set using the rge argument (see each function's vignette for details). The expected value of the index for each day of the growing season (the expected phenology) is approximated by the maximum value of the kernel at that day. Anomalies are calculated as deviations from the expected values at given days. The package implements basic and high-level functions for manipulating vector data (numerical series) and raster data (satellite derived products). Processing of very large raster files is supported. For methodological details of kernel density estimation see Wand & Jones (1994).
#' @author
#' Roberto O. Chávez \email{roberto.chavez.o@pucv.cl}
#' \cr
#' Sergio A. Estay \email{sergio.estay@uach.cl}
#' \cr
#' José A. Lastra \email{jose.lastra@pucv.cl}
#' \cr
#' Carlos G. Riquelme \email{carlosriquelmemv@gmail.com}
#'
#'@references Wand, M.P. & Jones, M.C. (1994) Kernel smoothing. Crc Press.
#'
#' @seealso
#' \code{\link{Phen}}, \code{\link{PhenMap}}, \code{\link{ExtremeAnom}}, \code{\link{ExtremeAnoMap}}, \code{\link{PhenKplot}}
#'
NULL
|
c29b21405324741e517a33ac9c93ffd710e13b3c
|
e58bbbd41dccc180fe5fe106db2e7b517934d045
|
/man/geno2proteo-package.Rd
|
7fe38e9eea7b3e6e5cec78d0c15e1d94993446b8
|
[] |
no_license
|
cran/geno2proteo
|
2735a2f5c11a095d2fed61038d057aed69600c30
|
6df760f7f0884901db97efcde1ac61054a3b012f
|
refs/heads/master
| 2022-07-03T08:21:11.861686
| 2022-06-13T09:40:02
| 2022-06-13T09:40:02
| 114,025,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,909
|
rd
|
geno2proteo-package.Rd
|
\name{geno2proteo-package}
\alias{geno2proteo-package}
\alias{geno2proteo}
\docType{package}
\title{
\packageTitle{geno2proteo}
}
\description{
\packageDescription{geno2proteo}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{geno2proteo}
\packageIndices{geno2proteo}
~~ An overview of how to use the package and the most important functions ~~
The package needs three data files. One contains the genetic table, another
one contains the DNA sequences of a certain genome, and the third one contains
the gene annotations in the same format as the GTF file used in ENSEMBL.
The standard genetic table was provided in this package. The other two data
files can be downloaded from ENSEMBL web site. For details about those data
files and how to obtain them, see the introduction document of this package.
Of course you can create your own data files, as long as you observe the format
of the files which are specified in the introduction document as well.
The package also needs Perl installed and being available for use in order to
run some of the functions.
Once you have the three data files, you need to run the function
\code{generatingCDSaaFile} to generate another data file, which
will be used by some of the functions in this package.
Four main functions were implemented in this package so far.
The function \code{genomicLocToProteinSequence} will find the protein
sequences and DNA sequences of the coding regions within a list of genomic
loci given as input. \code{genomicLocToWholeDNASequence} will obtain the
whole DNA sequences of any genomic loci given in the input data.
\code{proteinLocsToGenomic} will find the genomic coordinates for a list of
sections in proteins as input. \code{proteinLocsToProteinSeq} will find the
the protein sequences of a list of the protein sections.
}
\author{
\packageAuthor{geno2proteo}
Maintainer: \packageMaintainer{geno2proteo}
}
|
1eea237eb9167b3faf35b77a5207b0912db14786
|
7c95033415669a0812a5c275547113eabd024db0
|
/R/rmnorm.R
|
c967a88fa7c65f004c15041d2923c6bc3bb8ebbd
|
[] |
no_license
|
cran/bifurcatingr
|
293d6a7e39e3fd5bbdb6713436f04dd4051e14bd
|
90a6596c19ed6f47c158d7587f2d12986d000287
|
refs/heads/master
| 2023-07-11T02:05:05.328474
| 2023-06-22T01:10:02
| 2023-06-22T01:10:02
| 340,015,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 805
|
r
|
rmnorm.R
|
#' Multivariate Normal Generator
#'
#' This function generates multivariate normal errors that are used in the
#' generation of the Bifurcating autoregressive tree.
#' @param n sample size
#' @param d dimension. Defaults to 2 for bivariate normal errors.
#' @param mu mean vector. Defaults to the zero vector.
#' @param sigma variance-covariance matrix. Defaults to the \code{d} by \code{d}
#' identity matrix, where d is the dimension.
#' @return An \code{n} by \code{d} multivariate normal matrix.
#' @export
#' @examples
#' rmnorm(10)
#' rmnorm(10, 3)
rmnorm <- function(n, d=2, mu=rep(0,d), sigma=diag(d)){
temp <- eigen(sigma)
lambda <- diag(temp$values)
A <- temp$vectors%*%(lambda^0.5)%*%t(temp$vectors)
z <- matrix(stats::rnorm(n*d,0,1),ncol=n,nrow=d)
x <- A%*%z + mu
return(t(x))
}
|
20b13b53a1e984448cc52a0ba99e521a60bc0ffe
|
ec37153a0e1dfab0fb070d6524cdf941cf9fbabd
|
/align.R
|
19cf9eeea9ab20092da91dbf5263f58221aac8a3
|
[] |
no_license
|
agaye/1958BC_Merge
|
ebdc3695c215644a3b555ecbf589faf0b704b499
|
d0d798a654303b92c8940c823266a3c249a17763
|
refs/heads/master
| 2016-09-05T22:44:33.632270
| 2014-09-28T15:32:08
| 2014-09-28T15:32:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,818
|
r
|
align.R
|
#
# Amadou Gaye - 13 September 2014
# This function processes two file prior to merging: it ensure the two files have only their overlapping SNPs
# included and aligns the alleles information using the alleles information of the first file.
# The arguments to the functions are the names of the input files, the path that indicates their location
# and the output of the ouptu files.
#
align <- function(file1, file2, indir, outdir){
# load the .bim files of the datasets
cat(" Loading .bim files\n")
t1 <- read.table(paste0(indir,file1,".bim"), header=F, sep="\t")
t2 <- read.table(paste0(indir,file2,".bim"), header=F, sep="\t")
# merge the 2 files by and chromosome, rsname and position
# I do not want rsnames with different chromosomal or physical position
colnames(t1) <- c("chr", "name", "gd", "pos", "allele1", "allele2")
colnames(t2) <- c("chr", "name", "gd", "pos", "allele1", "allele2")
mergedtable <- as.matrix(merge(t1,t2,by=c("name","chr","pos")))
# get the common rsnames and save it as a file
argfile1 <- paste0(outdir, file1, "_", file2, ".arg.txt")
write.table(mergedtable[,1], file=argfile1, quote=F, row.names=F, col.names=F)
# generate a plink file from the second file which contains only the overlapping rsnames
infile1 <- paste0(indir,file1)
infile2 <- paste0(indir,file2)
outfile1 <- paste0(outdir, file1)
tempfile2 <- paste0(outdir, file2,"_temp")
cat(" Generating the subset plink files with common markers only\n")
system(paste0("p-link --silent --bfile ", infile1," --extract ", argfile1, " --make-bed --out ", outfile1))
system(paste0("p-link --silent --bfile ", infile2," --extract ", argfile1, " --make-bed --out ", tempfile2))
# now get the allele coding of the merged file and update the allele information in the 2nd dataset
argfile2 <- paste0(outdir, file2,"_arg.txt")
mm <- cbind(mergedtable[,1], mergedtable[,c(8,9)], mergedtable[,c(5,6)])
# check for duplicated rsnames and keep only one, otherwise the aliging step will fail
mm <- unique(mm)
# now remove eventual duplicated rsname with differering allele coding (those will not be caught by the above 'unique' command)
duprs <- as.character(mm[duplicated(mm[,1]),1])
if(length(duprs) > 0){
for(i in 1:length(duprs)){
xx <- which(mm[,1] %in% duprs[i])
mm <- mm[-xx[2:length(xx)],]
}
}
write.table(mm, file=argfile2, quote=F, row.names=F, col.names=F, sep=" ")
# align second dataset allele info to those of the first dataset
outfile2 <- paste0(outdir,file2)
cat(" Aligning the allele information\n")
system(paste0("p-link --silent --bfile ", tempfile2," --update-alleles ", argfile2, " --make-bed --out ", outfile2))
# delete the temprorary file to spare space
system(paste0("rm ", tempfile2, ".*"))
}
|
cead02cad3914b93807bc4869b1d47d5b4fd3277
|
efdd6cacaa1c4f75778b0f6511cfad2a0f579081
|
/man/theme_gr.Rd
|
071299ca82a68d84af1f56a1d4200fe3d14d04ee
|
[] |
no_license
|
gragusa/grthemes
|
54ac150fe385e3f7b9ffb3ed5525d37d14d83dd0
|
1ea07ccef8886bc845bb9e2c0a417dc6a7e2e4b2
|
refs/heads/master
| 2021-01-19T01:16:27.419911
| 2016-02-15T00:02:00
| 2016-02-15T00:02:00
| 24,543,241
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 417
|
rd
|
theme_gr.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{theme_gr}
\alias{theme_gr}
\title{ggplot2 grtheme}
\usage{
theme_gr(base_size = 12, base_family = "sans")
}
\arguments{
\item{base_size}{base font size}
\item{base_family}{base font family}
}
\description{
Themes set the general aspect of the plot such as the colour of the
background, gridlines, the size and colour of fonts.
}
\author{
Giuseppe Ragusa
}
|
043d6973c3ecd48cbc88785e7008315e9a4efb01
|
8c4c2a54b9cbc7be94209246ea915121fe863201
|
/plot3.R
|
694e535d01e0f79d63b6bf8d6a5955177641a989
|
[] |
no_license
|
tommaschera/ExData_Plotting1
|
8a46c8624579f37e6f0c5bc738d2edb4a26e6fb7
|
b082ccf2483613c80572d41479d41da2819d9957
|
refs/heads/master
| 2020-03-11T16:41:03.773231
| 2018-04-19T13:10:40
| 2018-04-19T13:10:40
| 130,123,816
| 0
| 0
| null | 2018-04-18T21:26:39
| 2018-04-18T21:26:38
| null |
UTF-8
|
R
| false
| false
| 988
|
r
|
plot3.R
|
library("dplyr")
library("lubridate")
# Reading data
data <- read.table("household_power_consumption.txt", header = T, sep = ";")
data$Date <- dmy(data$Date)
data$Datetime <- strptime(paste(data$Date, data$Time, sep = " "), format = "%d/%m/%Y %H:%M:%S")
# Subsetting data
lower_bound <- dmy("01/02/2007")
upper_bound <- dmy("02/02/2007")
subsetdata <- data[data$Date == lower_bound | data$Date == upper_bound,]
# Converting to numeric
for(i in c(3:9)){
subsetdata[,i] <- as.numeric(as.character(subsetdata[,i]))
}
# Plot 3
png(filename = "plot3.png")
plot(subsetdata$Datetime,
subsetdata$Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = ""
)
lines(subsetdata$Datetime,
subsetdata$Sub_metering_2,
col = "red")
lines(subsetdata$Datetime,
subsetdata$Sub_metering_3,
col = "blue")
legend(x = "topright",
lwd = c(1,1,1),
col = c("black", "red", "blue"),
legend = colnames(subsetdata[,7:9]))
dev.off()
|
3d1bf2a7add2caff4d08641239fc6a21ce2bb2f0
|
2d66c0e4f8a006e32f58a031057a397b45ecb3e5
|
/man/detect_flash.Rd
|
05577c381903f2f9fa784eb8104c6181091807f6
|
[] |
no_license
|
tkatsuki/FlyceptionR
|
8537531c02f6e10082b4d1085add0bf2d31689c9
|
478a4e39cd6c59aeb9d183d3b3bb3a081727448c
|
refs/heads/master
| 2020-05-21T19:05:11.359209
| 2020-01-25T21:33:09
| 2020-01-25T21:33:09
| 65,296,814
| 0
| 1
| null | 2020-01-25T21:33:11
| 2016-08-09T13:22:06
|
R
|
UTF-8
|
R
| false
| true
| 443
|
rd
|
detect_flash.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/detect_flash.R
\name{detect_flash}
\alias{detect_flash}
\title{Detect flash}
\usage{
detect_flash(input, output, type = c("fluo", "fly", "arena"), flash_thresh,
reuse = F)
}
\arguments{
\item{obj}{A target image of Image object or an array.}
\item{ref}{A reference image of Image object or an array.}
}
\description{
Detect flash
}
\examples{
detect_flash()
}
|
beae8e2900dff23738c79308a3d3fec37d6ffafa
|
a4e57b6e4bfd13cb326cdacf014c2bd57d583513
|
/Voeding aanzet per omw Fn.R
|
bba89b8d9a87ef393a073c516bab138adac34d0c
|
[] |
no_license
|
Dennitizer/CNC
|
2da37e1e5083e5e5426229166abcb083db217c6e
|
34e309bdbc7f887a86e0996aed27febeeefe9eb7
|
refs/heads/master
| 2021-06-29T20:18:40.491045
| 2020-09-28T13:23:24
| 2020-09-28T13:23:24
| 158,672,085
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
Voeding aanzet per omw Fn.R
|
# Voeding / aanzet per omwenteling Fn
Vf = 2400 # Voedingsnelheid / aanzet
N = 16000 # Toerental
Fn = Vf / N
Fn #mm/Omwenteling
|
936fa288f2d5b752b07dfe7cde859cbb0b371f6e
|
d4d6cd1edbb3b2bb5022a2946f3a6e2b00a2b743
|
/plot1.r
|
5780e364714edf5867628fa4cb15dd757708ab63
|
[] |
no_license
|
benscoble/ExData_Plotting1
|
c9e45c7bc441fa9dc8746f22d1113d99dfb88544
|
df2055aadc57d7f0a05461d8d59596992f7be5e9
|
refs/heads/master
| 2020-12-11T06:09:06.086409
| 2014-05-11T20:37:37
| 2014-05-11T20:37:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 540
|
r
|
plot1.r
|
plot1 <- function() {
pdata <- read.table(file = "household_power_consumption.txt", sep = ";", skip = 66638,nrows = 2880)
colnames(pdata) <- c("Date", "Time", "Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
png(filename = "plot1.png", width = 480, height = 480, units = "px", pointsize = 12, bg = "white")
hist(pdata$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="red")
dev.off()
}
|
f466ec7ab259a75e642ddc7278ec91b6a744f955
|
c7edb0b105f5920f29ccd3d783050f2511b57f3d
|
/Samplers/pSTMori.R
|
f89219ed59630106cc376e7f1af9f6acaa6c1110
|
[] |
no_license
|
rache011857/pMTM-for-VS
|
cc38938d3321ccaec74c5fb76e31ddb663f13259
|
38b493cec5431c101be26650175addfeb9b41960
|
refs/heads/master
| 2021-01-19T04:28:28.382798
| 2016-08-12T18:33:54
| 2016-08-12T18:33:54
| 63,552,539
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,091
|
r
|
pSTMori.R
|
library(plyr)
pSTMori <- function(X, Y, s0, g = nrow(X), n.iter = 1e4, burnin = 2000, prior){
n <- nrow(X)
p <- ncol(X)
x <- scale(X)
y <- Y-mean(Y)
y.norm <- sum(y^2)
gamma.full <- 1:p
gamma <- integer(0)
n.acpt <- rep(0,3)
n.prop <- rep(0,3)
gamma.abs <- length(gamma)
gamma.store <- vector('list', n.iter)
model.size <- rep(NA, n.iter)
log.ml.cur <- logMl(gamma = gamma, y = y, x = x, y.norm = y.norm, g = g)
for (iter in 1:n.iter){
move.type <- rbinom(1,1,0.5)
if (move.type==0){ ## single flip (add or remove)
flip.ix <- sample.int(p,1)
if (flip.ix %in% gamma){ # remove
n.prop[1] <- n.prop[1] + 1
gamma.prime <- gamma[gamma!=flip.ix]
log.ml.prop <- logMl(gamma = gamma.prime, y = y, x = x, y.norm = y.norm, g = g)
acpt.rate <- log.ml.prop - log.ml.cur + logPrior(prior=prior, move.type=-1, p=p, gamma.abs=gamma.abs)
if (log(runif(1)) < acpt.rate){
gamma <- gamma.prime
n.acpt[1] <- n.acpt[1] + 1
log.ml.cur <- log.ml.prop
gamma.abs <- gamma.abs - 1
}
} else { # add
n.prop[2] <- n.prop[2] + 1
if (gamma.abs<=s0-1){
gamma.prime <- c(gamma,flip.ix)
log.ml.prop <- logMl(gamma = gamma.prime, y = y, x = x, y.norm = y.norm, g = g)
acpt.rate <- log.ml.prop - log.ml.cur + logPrior(prior=prior, move.type=1, p=p, gamma.abs=gamma.abs)
if (log(runif(1)) < acpt.rate){
gamma <- sort(gamma.prime)
n.acpt[2] <- n.acpt[2] + 1
log.ml.cur <- log.ml.prop
gamma.abs <- gamma.abs + 1
}
}
}
} else { # swap
n.prop[3] <- n.prop[3] + 1
if(gamma.abs > 0){
flip.ix.rem <- 'if'(gamma.abs==1, gamma, sample(gamma,1))
flip.ix.add <- sample(gamma.full[!gamma.full %in% gamma],1)
gamma.prime <- c(gamma[gamma!=flip.ix.rem],flip.ix.add)
log.ml.prop <- logMl(gamma = gamma.prime, y = y, x = x, y.norm = y.norm, g = g)
acpt.rate <- log.ml.prop - log.ml.cur
if (log(runif(1)) < acpt.rate){
gamma <- sort(gamma.prime)
n.acpt[3] <- n.acpt[3] + 1
log.ml.cur <- log.ml.prop
}
}
}
gamma.store[[iter]] <- gamma
model.size[iter] <- gamma.abs
}
inclusion <- as.matrix(count(unlist(gamma.store[-(1:burnin)])))
MPM <- inclusion[which(inclusion[,2]>=(n.iter-burnin)/2),1]
model.count <- count(sapply(gamma.store[-(1:burnin)], paste, collapse=" "))
model.sort <- model.count[order(-model.count[2]),]
post.prob <- model.sort[[2]]/(n.iter-burnin)
model <- lapply(model.sort$x,modelSplit)
HPM <- model[[1]]
model.size.avg <- mean(model.size[-(1:burnin)])
model.size.HPM <- length(MPM)
model.size.MPM <- length(HPM)
return(list(model=model, post.prob=post.prob, time.spend=toc-tic, n.prop=n.prop, n.acpt=n.acpt, n.iter=n.iter, MPM=MPM, HPM=HPM, burnin=burnin, model.size.avg=model.size.avg,model.size.MPM=model.size.MPM,model.size.HPM=model.size.HPM))
}
|
024a273465f0c64f217190a28a0ac3b20e2c8c4b
|
3945388ee0fef9e4f99b2c0b4cd49e4fc58082b5
|
/StreamNetworkTools/man/net_segid.Rd
|
620994d9a94f8e6b29c28c086b2543c248171201
|
[
"MIT"
] |
permissive
|
dkopp3/StreamNetworkTools
|
7ea52d4c917bbcf02314a603a3f9b6d5c5b926b5
|
7c693f3edc975493be946d400642bd99c1d9d809
|
refs/heads/master
| 2023-06-23T09:58:49.678987
| 2023-06-09T18:44:18
| 2023-06-09T18:44:18
| 140,187,794
| 3
| 1
|
MIT
| 2021-01-22T18:59:07
| 2018-07-08T17:19:10
|
R
|
UTF-8
|
R
| false
| true
| 991
|
rd
|
net_segid.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/net_delin.r
\name{net_segid}
\alias{net_segid}
\title{Identify Network Segments (Deprecated)}
\usage{
net_segid(netdelin, nhdplus_path, vpu)
}
\arguments{
\item{netdelin}{output from \code{\link{net_delin}}}
\item{nhdplus_path}{nhdplus_path directory for downloaded NHDPlus files
(\code{\link{net_nhdplus}})}
\item{vpu}{vector processing unit}
}
\value{
modifies \code{link{net_delin}} with additional field \code{seg.id}.
}
\description{
modifies \code{\link{net_delin}} by creating an index (\code{seg.id}) for
multiple comids occuring between confluences
}
\details{
\code{seg.id} are arbitrarily assigned to network segments (i.e. there is no
up/down stream ordering to seg.id with a network)
requires /NHDPlusAttributes directory (see \code{\link{net_nhdplus}})
optional with respect to other functions within StreamNetworkTools
}
\examples{
net_segid(netdelin = b, nhdplus_path = getwd(), vpu = "01")
}
|
d6e51b663d4bf644f53142d8244a0f73239681fc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ecd/examples/lamp.qsl_fit_config.Rd.R
|
b1077f9998877daf1d439d5fd798233972483c5f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 212
|
r
|
lamp.qsl_fit_config.Rd.R
|
library(ecd)
### Name: lamp.qsl_fit_config
### Title: Read QSLD fit config
### Aliases: lamp.qsl_fit_config lamp.qsl_fit_config_xtable
### Keywords: data sample
### ** Examples
c <- lamp.qsl_fit_config()
|
62b6ea533c367ad1285d41429700b675cbb8ce9e
|
402e0c46eb8eaedfce09f0e2056ee3e49a347213
|
/summarizeMiRNA_new.R
|
45344484779e97ac06bfcf335cfa8f124272e449
|
[] |
no_license
|
ctoste/smRNA-seq
|
f044b3e9e200eb072349c42939b4384bf6bd1b53
|
96cca47bf1d541c0741dadf85887f378aa75a1f9
|
refs/heads/master
| 2021-06-28T01:03:18.874265
| 2017-09-05T17:16:46
| 2017-09-05T17:16:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,379
|
r
|
summarizeMiRNA_new.R
|
#summarize_mature_miRNA <- function(data, miRNA_all_loc, genome, chr_length=NULL, offset=0, verbose=F) {
# counts <- numeric()
# leftover <- AlignedRead()
# leftover@alignData@varMetadata <- data@alignData@varMetadata
#
# miRNA_all_loc <- miRNA_all_loc[order(miRNA_all_loc[,1], miRNA_all_loc[,2], miRNA_all_loc[,3]),]
# read_range <- RangedData(IRanges(start=position(data), width=width(data)), space=chromosome(data))
# dum <- findOverlaps(read_range, ann_range, type="within")
# counts <- table(as.matrix(dum)[,2])
# names <- miRNA_all_loc[as.numeric(names(counts)),"name"]
# counts_new <- aggregate(as.numeric(counts), list(as.character(names)), sum)
#
# summary_count <- array(0, dim=length(levels(miRNA_all_loc$name)))
# names(summary_count) <- levels(miRNA_all_loc$name)
# ind <- match(counts_new[,1], names(summary_count))
# summary_count[ind] <- counts_new[,2]
#
## if(length(unique(as.matrix(dum)[,1])) >0) {
# leftover <- data[-unique(as.matrix(dum)[,1])]
# } else {
# leftover <- data
# }
# gc()
# return(list(summary_count, leftover))
#}#
#}#
summarize_mature_miRNA<- function(data_new, miRNA_all_loc, genome, chr_length=NULL, offset=0, verbose=F) {
counts <- numeric()
#leftover <- AlignedRead()
#leftover@alignData@varMetadata <- data_new@alignData@varMetadata
#browser()
miRNA_all_loc <- miRNA_all_loc[order(miRNA_all_loc[,1], miRNA_all_loc[,2], miRNA_all_loc[,3]),]
#read_range <- RangedData(IRanges(start=start(data_new), width=width(data_new)), space=as.character(rname(data_new)))
#Now using GappedAligment instead of shortread
ann_range <- RangedData(IRanges(start=miRNA_all_loc$start - offset, end=miRNA_all_loc$end + offset), space=miRNA_all_loc$chr, name=miRNA_all_loc$name)
dum <- findOverlaps(data_new, ann_range, type="within")
counts <- table(subjectHits(dum))
names <- miRNA_all_loc[as.numeric(names(counts)),"name"]
counts_new <- aggregate(as.numeric(counts), list(as.character(names)), sum)
summary_count <- array(0, dim=length(levels(miRNA_all_loc$name)))
names(summary_count) <- levels(miRNA_all_loc$name)
ind <- match(counts_new[,1], names(summary_count))
summary_count[ind] <- counts_new[,2]
if(length(unique(queryHits(dum))) >0) {
leftover <- data_new[-unique(queryHits(dum)),]
} else {
leftover <- data_new
}
gc()
return(list(summary_count, leftover))
}
|
7463a398f67d91fb7be763dfe2e0ecac56cdf4c4
|
98e35c12223a91da629901de4e7ad15ad2936863
|
/PGLS.R
|
720d01edff9a66558a6598985b006de85437d56b
|
[] |
no_license
|
rmissagia/insectivory-Akodontini
|
826b6a4dafe740c4886e823ba8c2bdb4ba6eaff1
|
d9a966039d0298b8a6dcef1e4740332a3957230a
|
refs/heads/main
| 2023-01-14T14:45:45.016476
| 2020-11-22T20:11:14
| 2020-11-22T20:11:14
| 314,570,460
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,186
|
r
|
PGLS.R
|
#directory and libraries
library(phytools)
library(caper)
library(geiger)
library(ape)
#Allometry analysis
#read_tree
tree <- read.tree("ako_tree.nwk")
#read_data
data <- read.csv("alldata6.csv", header = T)
data
#comparative_data_file
cd <- comparative.data(data=data, phy=tree, names.col=X, vcv=TRUE, vcv.dim=3, warn.dropped = TRUE)
?comparative.data
#Table 1
#models
m1<-pgls(BF_log~logCS, cd, lambda="ML", param.CI = 0.95)
summary(m1)
plot.pgls(m1)
#check for outliers
res<- residuals(m1, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m1$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m2<-pgls(MADMinc~logCS, cd, lambda="ML", param.CI = 0.95)
summary(m2)
plot.pgls(m2)
#check for outliers
res<- residuals(m2, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m2$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m3<-pgls(MASMinc~logCS, cd, lambda="ML", param.CI = 0.95)
summary(m3)
plot.pgls(m3)
#check for outliers
res<- residuals(m3, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m3$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m4<-pgls(MATMinc~logCS, cd, lambda="ML", param.CI = 0.95)
summary(m4)
plot.pgls(m4)
#check for outliers
res<- residuals(m4, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m4$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
#Table 2
#models
m1<-pgls(BF_log~Diet, cd, lambda="ML", param.CI = 0.95)
summary(m1)
plot.pgls(m1)
#check for outliers
res<- residuals(m1, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m1$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m2<-pgls(MADMinc~Diet, cd, lambda="ML", param.CI = 0.95)
summary(m2)
plot.pgls(m2)
#check for outliers
res<- residuals(m2, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m2$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m3<-pgls(MASMinc~Diet, cd, lambda="ML", param.CI = 0.95)
summary(m3)
plot.pgls(m3)
#check for outliers
res<- residuals(m3, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m3$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m4<-pgls(MATMinc~Diet, cd, lambda="ML", param.CI = 0.95)
summary(m4)
plot.pgls(m4)
#check for outliers
res<- residuals(m4, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m4$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m5<-pgls(logCS~Diet, cd, lambda="ML", param.CI = 0.95)
summary(m5)
plot.pgls(m5)
#check for outliers
res<- residuals(m5, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m5$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
#Table 3
#models
m1<-pgls(BF_log~logCS, cd, lambda="ML", param.CI = 0.95)
summary(m1)
plot.pgls(m1)
#check for outliers
res<- residuals(m1, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m1$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m2<-pgls(BF_log~MADMinc, cd, lambda="ML", param.CI = 0.95)
summary(m2)
plot.pgls(m2)
#check for outliers
res<- residuals(m2, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m2$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m3<-pgls(BF_log~MADMinc+logCS, cd, lambda="ML", param.CI = 0.95)
summary(m3)
plot.pgls(m3)
#check for outliers
res<- residuals(m3, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m3$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m4<-pgls(BF_log~MADMinc:logCS, cd, lambda="ML", param.CI = 0.95)
summary(m4)
plot.pgls(m4)
#check for outliers
res<- residuals(m4, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m4$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m5<-pgls(BF_log~MADMinc*logCS, cd, lambda="ML", param.CI = 0.95)
summary(m5)
plot.pgls(m5)
#check for outliers
res<- residuals(m5, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m5$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m6<-pgls(BF_log~MASMinc, cd, lambda="ML", param.CI = 0.95)
summary(m6)
plot.pgls(m6)
#check for outliers
res<- residuals(m6, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m6$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m7<-pgls(BF_log~MASMinc+logCS, cd, lambda="ML", param.CI = 0.95)
summary(m7)
plot.pgls(m7)
#check for outliers
res<- residuals(m7, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m7$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m8<-pgls(BF_log~MASMinc:logCS, cd, lambda="ML", param.CI = 0.95)
summary(m8)
plot.pgls(m8)
#check for outliers
res<- residuals(m8, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m8$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m9<-pgls(BF_log~MASMinc*logCS, cd, lambda="ML", param.CI = 0.95)
summary(m9)
plot.pgls(m9)
#check for outliers
res<- residuals(m9, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m9$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m10<-pgls(BF_log~MATMinc, cd, lambda="ML", param.CI = 0.95)
summary(m10)
plot.pgls(m10)
#check for outliers
res<- residuals(m10, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m10$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m11<-pgls(BF_log~MATMinc+logCS, cd, lambda="ML", param.CI = 0.95)
summary(m11)
plot.pgls(m11)
#check for outliers
res<- residuals(m11, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m11$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m12<-pgls(BF_log~MATMinc:logCS, cd, lambda="ML", param.CI = 0.95)
summary(m12)
plot.pgls(m12)
#check for outliers
res<- residuals(m11, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m12$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m13<-pgls(BF_log~MATMinc*logCS, cd, lambda="ML", param.CI = 0.95)
summary(m13)
plot.pgls(m13)
#check for outliers
res<- residuals(m13, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m13$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m14<-pgls(BF_log~MATMinc+MADMinc+MASMinc, cd, lambda="ML", param.CI = 0.95)
summary(m14)
plot.pgls(m14)
#check for outliers
res<- residuals(m14, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m14$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m15<-pgls(BF_log~MATMinc:MADMinc:MASMinc, cd, lambda="ML", param.CI = 0.95)
summary(m15)
plot.pgls(m15)
#check for outliers
res<- residuals(m15, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m15$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m16<-pgls(BF_log~MATMinc*MADMinc*MASMinc, cd, lambda="ML", param.CI = 0.95)
summary(m16)
plot.pgls(m16)
#check for outliers
res<- residuals(m16, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m16$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m17<-pgls(BF_log~MATMinc+MADMinc+MASMinc+logCS, cd, lambda="ML", param.CI = 0.95)
summary(m17)
plot.pgls(m17)
#check for outliers
res<- residuals(m17, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m17$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m18<-pgls(BF_log~MADMinc:MASMinc:MASMinc:logCS, cd, lambda="ML", param.CI = 0.95)
summary(m18)
plot.pgls(m18)
#check for outliers
res<- residuals(m18, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m18$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m19<-pgls(BF_log~MADMinc*MASMinc*MASMinc*logCS, cd, lambda="ML", param.CI = 0.95)
summary(m19)
plot.pgls(m19)
#check for outliers
res<- residuals(m19, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m19$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m20<- pgls(BF_log~Diet, cd, lambda="ML", param.CI = 0.95)
summary(m20)
plot.pgls(m20)
#check for outliers
res<- residuals(m20, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m20$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m21<- pgls(BF_log~logCS+Diet, cd, lambda="ML", param.CI = 0.95)
summary(m21)
plot.pgls(m21)
#check for outliers
res<- residuals(m21, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m21$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m22<- pgls(BF_log~logCS:Diet, cd, lambda="ML", param.CI = 0.95)
summary(m22)
plot.pgls(m22)
#check for outliers
res<- residuals(m22, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m22$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m23<- pgls(BF_log~logCS*Diet, cd, lambda="ML", param.CI = 0.95)
summary(m23)
plot.pgls(m23)
#check for outliers
res<- residuals(m23, phylo = TRUE)
res1<- res/sqrt(var(res))[1] #standardises residuals by sqrt of their variance
rownames(res1)<-rownames(m23$residuals) #matches the residuals up with the species names
rownames(res1)[(abs(res)>3)]#gives the names of the outliers
m24<- pgls(BF_log~1, cd, lambda="ML", param.CI = 0.95)
summary(m24)
plot.pgls(m24)
AICc_values<-c(m1$aicc, m2$aicc, m3$aicc, m4$aicc, m5$aicc, m6$aicc, m7$aicc, m8$aicc, m9$aicc, m10$aicc, m11$aicc, m12$aicc, m13$aicc, m14$aicc, m15$aicc, m16$aicc, m17$aicc, m18$aicc, m19$aicc, m20$aicc, m21$aicc, m22$aicc, m23$aicc, m24$aicc)
aicw(AICc_values)->AICc
models<-c("m1", "m2", "m3", "m4", "m5", "m6", "m7", "m8","m9", "m10", "m11", "m12","m13","m14", "m15", "m16", "m17","m18","m19", "m20", "m21", "m22", "m23", "m24")
summary<-data.frame(models, AICc)
summary[order(summary$delta),]->summary
summary
#R code for PGLS regressions on procrustes coordinates (adapted from Navalon et al. 2018)
library(geomorph)
#Read Procrustes coordinates
ako_coords <- read.csv("Proc.coords.csv", sep=",",row.names=1)
ako_coords1 <-(arrayspecs(ako_coords[,1:ncol(ako_coords)], 17, 2))
ako_coords2 <- two.d.array(ako_coords1)
ako_mean <- mshape(ako_coords2)
v.ako_mean <- t(as.vector(ako_mean))
write.csv(v.ako_mean, "ako_mean.csv")
#Read logCS data
log.CS <- read.csv("logCS.csv", sep=",",row.names=1)
log.CS1 <- as.matrix(log.CS)
#Read diet group data
diet.data <- read.csv("UBF_groups.csv", sep=",",row.names=1)
diet.data1 <-as.matrix(diet.data)
diet.data1<-as.factor(diet.data1)
log.CS2<-as.numeric(log.CS1)
#Set specific geomorph environment
ako_data <- geomorph.data.frame(coords = ako_coords2, diet = diet.data1, CS = log.CS2, phy = tree)
#PGLS regressions of size and diet on procrustes data
PGLS.coords_CS <- procD.pgls(coords ~ CS, data = ako_data, phy = phy, iter = 999, print.progress = T, SS.type = "II")
summary(PGLS.coords_CS)
PGLS.coords_CS <- procD.pgls(coords ~ diet, data = ako_data, phy = phy, iter = 999, print.progress = T, SS.type = "II")
summary(PGLS.coords_CS)
|
b2cefd2c240f2fafdb35d421631e7fa894d7662e
|
524c60ff871cb5ad3e1da91b9d6bbf63e5936dca
|
/man/single_ligand_activity_score_regression.Rd
|
09b2bd53efdba134c5a7fc1644df6cace1df0bad
|
[] |
no_license
|
saeyslab/nichenetr
|
855eae9667ea33563b4fc4eb79e601ab3bc96100
|
0e14cbe118f96160fd26fc7b9d947c6ee55b1158
|
refs/heads/master
| 2023-08-16T18:12:08.413347
| 2023-08-10T11:31:07
| 2023-08-10T11:31:07
| 120,286,519
| 380
| 139
| null | 2023-09-14T14:10:30
| 2018-02-05T09:58:45
|
R
|
UTF-8
|
R
| false
| true
| 2,087
|
rd
|
single_ligand_activity_score_regression.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/application_prediction.R
\name{single_ligand_activity_score_regression}
\alias{single_ligand_activity_score_regression}
\title{Perform a correlation and regression analysis between cells' ligand activities and property scores of interest}
\usage{
single_ligand_activity_score_regression(ligand_activities, scores_tbl)
}
\arguments{
\item{ligand_activities}{Output from the function `normalize_single_cell_ligand_activities`.}
\item{scores_tbl}{a tibble containing scores for every cell (columns: $cell and $score). The score should correspond to the property of interest}
}
\value{
A tibble giving for every ligand, the correlation/regression coefficients giving information about the relation between its activity and the property of interest.
}
\description{
\code{single_ligand_activity_score_regression} Performs a correlation and regression analysis between cells' ligand activities and property scores of interest.
}
\examples{
\dontrun{
weighted_networks = construct_weighted_networks(lr_network, sig_network, gr_network,source_weights_df)
ligands = list("TNF","BMP2","IL4")
ligand_target_matrix = construct_ligand_target_matrix(weighted_networks, ligands, ltf_cutoff = 0, algorithm = "PPR", damping_factor = 0.5, secondary_targets = FALSE)
potential_ligands = c("TNF","BMP2","IL4")
genes = c("SOCS2","SOCS3","IRF1","ICAM1","ID1","ID2","ID3")
cell_ids = c("cell1","cell2")
expression_scaled = matrix(rnorm(length(genes)*2, sd = 0.5, mean = 0.5), nrow = 2)
rownames(expression_scaled) = cell_ids
colnames(expression_scaled) = genes
ligand_activities = predict_single_cell_ligand_activities(cell_ids = cell_ids, expression_scaled = expression_scaled, ligand_target_matrix = ligand_target_matrix, potential_ligands = potential_ligands)
normalized_ligand_activities = normalize_single_cell_ligand_activities(ligand_activities)
cell_scores_tbl = tibble(cell = cell_ids, score = c(1,4))
regression_analysis_output = single_ligand_activity_score_regression(normalized_ligand_activities,cell_scores_tbl)
}
}
|
4208ade36178ef32612307821f06e8cebf6f7f8d
|
17f00c96c6ba3e95bff273d4cf34c9f234ee302a
|
/anRpackage/R/linmod.R
|
e4e249346fa29eae14c0002d9a140e299aa601c7
|
[] |
no_license
|
saafdk/Rpackage
|
a3aa3135121c75c4f0e53d5ee17b17f7a1c3c0be
|
556ae37212aa38a2bfe8be78b1e614d8ac6301bd
|
refs/heads/master
| 2021-04-13T18:59:13.888065
| 2020-03-22T19:42:15
| 2020-03-22T19:42:15
| 249,180,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 47
|
r
|
linmod.R
|
linmod <-
function(x, ...) UseMethod("linmod")
|
3d4d3f306ee30e63e53bc87b090cd7b2dc25c4fc
|
0e78d8bd40a089158d6e5c71a1cee60288804907
|
/R/generalFunctions.R
|
f07903d894a4d26cf921b74cf1ecc82f58f24122
|
[] |
no_license
|
GranderLab/acidAdaptedRNAseq
|
d16b54a57620097adef2901cf0a85355fc2d0d82
|
3d6ec07a1a71df7cba85e3f68205e955f63a1683
|
refs/heads/master
| 2021-03-27T16:31:23.290663
| 2018-10-25T15:39:59
| 2018-10-25T15:39:59
| 93,744,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,104
|
r
|
generalFunctions.R
|
#' namedListToTibble
#'
#' Converts a named list to a long data frame.
#'
#' @name namedListToTibble
#' @rdname namedListToTibble
#' @author Jason T. Serviss
#' @param l List. The list to be converted.
#' @keywords namedListToTibble
#' @examples
#'
#' l <- list(a=LETTERS[1:10], b=letters[1:5])
#' namedListToTibble(l)
#'
#' @export
#' @importFrom tibble tibble
namedListToTibble <- function(l) {
if (length(names(l)) != length(l)) {
stop("The list you submitted might not be named.")
}
if (!is.null(names(l[[1]]))) {
ni <- gsub(".*\\.(.*)$", "\\1", names(unlist(l)))
n <- rep(names(l), lengths(l))
tibble(
names = n,
inner.names = ni,
variables = unname(unlist(l))
)
} else {
n <- rep(names(l), lengths(l))
tibble(
names = n,
variables = unname(unlist(l))
)
}
}
#' getGO.db
#'
#' Get annotation from \link[GO.db]{GO.db}.
#'
#' @name getGO.db
#' @rdname getGO.db
#' @author Jason T. Serviss
#' @keywords getGO.db
#' @examples
#' getGO.db()
#'
#' @export
#' @importFrom AnnotationDbi keys
#' @import GO.db
getGO.db <- function() {
AnnotationDbi::select(GO.db, keys(GO.db, "GOID"), c("TERM", "ONTOLOGY"))
}
#' col64
#'
#' Diverging color palette.
#'
#' @name col64
#' @rdname col64
#' @author Jason T. Serviss
#' @keywords col64
#' @examples
#' col64()
#'
#' @export
col64 <- function() {
c(
"#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6",
"#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43",
"#8FB0FF", "#997D87", "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601",
"#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#00C2A0",
"#FFAA92", "#FF90C9", "#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B",
"#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500",
"#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C",
"#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED",
"#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9",
"#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700",
"#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837",
"#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625",
"#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534", "#FDE8DC",
"#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C"
)
}
#' Get ggplot legend.
#'
#'
#' @name g_legend
#' @rdname g_legend
#' @author Jason T. Serviss
#' @keywords g_legend
#' @param a.ggplot A ggplot.
#' @examples
#' #no example yet
#'
#' @export
#' @importFrom ggplot2 ggplot_gtable ggplot_build
g_legend <- function(a.ggplot){
tmp <- ggplot_gtable(ggplot_build(a.ggplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)
}
|
963acc0f464d988178561bdb43849191e994c7a6
|
1e843b4addebc76b4335a9111160193f1a902f4c
|
/stats/R/steepness.R
|
79bcc0d0e4705f77befad9185ea513feb00746d8
|
[] |
no_license
|
seannyD/ILMTurk_public
|
9bb4d4054fa34c334e8bd733c9f0e6795e138189
|
1288318a987146fe684cf4832b2156a48a3e2667
|
refs/heads/master
| 2021-04-28T06:15:17.860219
| 2018-02-20T12:47:55
| 2018-02-20T12:47:55
| 122,197,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,442
|
r
|
steepness.R
|
library(lme4)
library(infotheo)
library(lattice)
setwd("/Library/WebServer/Documents/ILMTurk/stats")
source("R/graphCode.R")
signals_Nij = read.csv("Data/Signals_Nij.csv",stringsAsFactors=F)
signals_MT = read.csv("Data/Signals_MT.csv",stringsAsFactors=F)
signals_SONA = read.csv("Data/Signals_SONA.csv",stringsAsFactors=F)
signals_MT$run = "MT"
signals_Nij$run = "Nij"
signals_SONA$run = "SONA"
library(gplots)
dx = signals_MT
pdf("graphs/summary/Steepness_MT.pdf")
plotmeans(steepness.sig.mean~curvature, data=dx, ylab="Mean signal steepness", xlab='Curvature')
dev.off()
#dx = signals_MT[signals_MT$meaning==2,]
#plotmeans(steepness.sig.mean~curvature, data=dx)
dx = signals_MT[signals_MT$curvature>0,]
# Null model
m0= lmer(steepness.sig.mean~1 + (1|chain2) + (1|meaning), data = dx)
# add curavture
m1= lmer(steepness.sig.mean~curvature + (1|chain2) + (1|meaning), data = dx)
# add generation
m2= lmer(steepness.sig.mean~curvature+gen + (1|chain2) + (1|meaning), data = dx)
# add interaction between curvature and generation
m3= lmer(steepness.sig.mean~curvature*gen + (1|chain2) + (1|meaning), data = dx)
# Quadratic term of curvature
m4= lmer(steepness.sig.mean~ I(curvature^2) + curvature*gen + (1|chain2) + (1|meaning), data = dx)
# Interaction between quadratic term of curvature and generation
m5= lmer(steepness.sig.mean~ I(curvature^2)*gen + curvature*gen + (1|chain2) + (1|meaning), data = dx)
anova(m0,m1,m2,m3,m4,m5)
|
1ccaf523644421fa4bbedfc8613ce69f513dbcac
|
c85ab5fc908a443eac6e96f6818857842346a6e7
|
/code/sandbox/play_glmnet_relaxo.R
|
e6e97fb65fa1387553f87ed01ea0663798e45a97
|
[] |
no_license
|
erflynn/sl_label
|
34d4df22f651a44317a9fb987970dfed6e1731a7
|
6e81605f4c336e0c1baa07abc168c72c9a9eaceb
|
refs/heads/master
| 2023-04-01T01:57:48.862307
| 2021-03-30T18:27:36
| 2021-03-30T18:27:36
| 244,698,848
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,251
|
r
|
play_glmnet_relaxo.R
|
require('glmnet')
# try BinomialExample
data(BinomialExample)
res <- cv.glmnet(x, y, family="binomial", trace=TRUE,
relax=TRUE, path=TRUE)
plot(res)
# this still looks like a mess except gamma=1... hmm
res2 <-cv.glmnet(x, y, family="binomial", trace=TRUE)
my_lambda <- res2$lambda.1se
fit <- glmnet(x,y, family="binomial", lambda=my_lambda)
relaxed_res2 <- relax.glmnet(fit, x=x, y=y,
family="binomial", path=TRUE)
par(mfrow=c(1,3))
plot(relaxed_res2, gamma=1)# default
plot(relaxed_res2, gamma=0.5)
plot(relaxed_res2, gamma=0)
relaxed_res2 <- relax.glmnet(fit, x=x, y=y, family="binomial", path=TRUE)
relaxed_res_gamma0 <- relax.glmnet(fit, x=x, y=y, gamma=0, family="binomial", path=TRUE)
relaxed_res_gamma0.5 <- relax.glmnet(fit, x=x, y=y, gamma=0.5, family="binomial", path=TRUE)
relaxed_res_gamma1 <- relax.glmnet(fit, x=x, y=y, gamma=1, family="binomial", path=TRUE)
# why aren't these the same? gamma = 1 should be lambda
relaxed_res_gamma0 <- relax.glmnet(fit, x=x, y=y, gamma=c(0, 0.25, 0.5, 0.75, 1), family="binomial", path=TRUE)
relaxed_res_gamma0.5 <- relax.glmnet(fit, x=x, y=y, gamma=0.5, family="binomial", path=TRUE)
pred <- predict(res, newx=x)
assess.glmnet(res, newx=x, newy=y) # results using lambda.1se
# grab some sex labeling data and look at it
# if (relax){
# fit <- glmnet(X_train2[,rand_genes],Y_train2, family="binomial", lambda=my.lambda, alpha=my.alpha)
# relaxo <- relax.glmnet(fit, x=X_train2[,rand_genes], y=Y_train2, gamma=0, family="binomial", path=TRUE, trace=TRUE)
# relax0.5 <- relax.glmnet(fit, x=X_train2[,rand_genes], y=Y_train2, gamma=0.5, family="binomial", path=TRUE, trace=TRUE)
# relax1 <- relax.glmnet(fit, x=X_train2[,rand_genes], y=Y_train2, gamma=1, family="binomial", path=TRUE, trace=TRUE)
#
# mat_coef <- coef(relaxo) %>% as.matrix()
# nonzero_coef <- mat_coef[mat_coef[,1]!=0,]
# coef_df <- data.frame(cbind("gene"=names(nonzero_coef), coef=nonzero_coef))
#
# print(nrow(coef_df))
# mat_coef <- coef(relaxo) %>% as.matrix()
# nonzero_coef2 <- data.frame(mat_coef[mat_coef[,1]!=0,])
# print(nrow(nonzero_coef2))
#
# }
# do we get better performance if we relax our results??
# IDK.
|
0292fecccb0b080fe6914ec26c52e8a74b1d2b7a
|
b34ce46b2047f1eaadb23497c0efa30947552d5a
|
/content/courses/estat_2/aulas/reg.R
|
5fe28f9873b8dcf6d2da7b58910458ee02e71fa1
|
[] |
no_license
|
rbstern/site
|
b0a6746f5c15fb18cc3953e90c1b2e7cc8a0f740
|
0834620b3e21935625fc56ca2283b2ce7d26175b
|
refs/heads/master
| 2023-04-28T00:12:50.423294
| 2023-04-24T01:24:35
| 2023-04-24T01:24:35
| 118,627,534
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 881
|
r
|
reg.R
|
library(glmnet)
# Simulação
n = 1000
d = 900
beta = rep(0, d)
beta[1] = 6
beta[2] = 3
X <- rnorm(n*d)
dim(X) = c(n, d)
Y = X %*% beta + rnorm(n)
meus_dados = data.frame(Y = Y, X)
# Ajuste da regressão por minimos quadrados
aux = lm(Y ~ ., data = meus_dados)
summary(aux)
# Ajuste da regressão por lasso
aux = cv.glmnet(X, Y)
aux$lambda.min
aux_2 = glmnet(X, Y, lambda = aux$lambda.min)
aux_2$beta
## Regressão polinomial
d = 16
n = 30
x <- rnorm(n)
y = 2*x^2 + x + 3 + rnorm(n)
x_star = NULL
for(j in 1:d) x_star = cbind(x_star, x^j)
meus_dados = data.frame(y = y, x_star)
aux = lm(y~., data = meus_dados)
beta = aux$coefficients
g = function(x) 2*x^2 + x + 3
g_fit = function(x)
{
resp = rep(NA, length(x))
for(ii in 1:length(x))
{
resp[ii] = sum(beta*(x[ii]^(0:20)))
}
resp
}
curve(g, from = -5, to = 5)
curve(g_fit, from = -5, to = 5, add = TRUE)
|
57f9db5fed79e48ff89b089491238b565fa40461
|
64efc9a1e80e4274c1f73f09711daf710ebc8c13
|
/run_analysis.R
|
3f8a113eb3b87aff9f9e58d048dbcad8c4f828ec
|
[] |
no_license
|
dksingh29/GettingandCleaningDataweek4project
|
ffdeed1132711e39688084bbb77f5e2e2ad41225
|
eea93a3459e2c49ed9b70dc418004c1bb6f4a617
|
refs/heads/master
| 2021-07-17T00:18:45.244369
| 2017-10-24T19:15:14
| 2017-10-24T19:15:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,088
|
r
|
run_analysis.R
|
library(dplyr)
#set working directory and download the Smartphone data from the provided url
setwd("insert your wd location")
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, destfile = "smartphone_activity_data.zip")
#unzip the file
unzip("smartphone_activity_data.zip", exdir="smartphone_activity_data")
#pull together the separate test data text files into one data frame called test_data
test_data_x <- read.table("smartphone_activity_data/UCI HAR Dataset/test/X_test.txt")
test_data_y <- read.table("smartphone_activity_data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("smartphone_activity_data/UCI HAR Dataset/test/subject_test.txt")
test_data <- cbind(subject_test, test_data_y, test_data_x)
colnames(test_data)[1] <- "subject"
colnames(test_data)[2] <- "activity"
#pull together the separate train data text files into one data frame called train_data
train_data_x <- read.table("smartphone_activity_data/UCI HAR Dataset/train/X_train.txt")
train_data_y <- read.table("smartphone_activity_data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("smartphone_activity_data/UCI HAR Dataset/train/subject_train.txt")
train_data <- cbind(subject_train, train_data_y, train_data_x)
colnames(train_data)[1] <- "subject"
colnames(train_data)[2] <- "activity"
#merge the test data and train data into one data frame
all_data <- rbind(test_data, train_data)
#pull in the features text file, this is the column names for columns 3:563 in the
#test_data, train_data and all_data data.frames
features <- read.table("smartphone_activity_data/UCI HAR Dataset/features.txt")
features <- as.vector(features[,2])
#change col names for all_data
for(i in 1:length(features)){
colnames(all_data)[i+2] <- features[i]
}
#select only the columns in all_data that are mean or standard deviations measurements
mean_std_data <- all_data[,grep("[Mm]ean|[Ss]td|activity|subject", colnames(all_data))]
#pull in the activity labels
activity_labels <- features <- read.table("smartphone_activity_data/UCI HAR Dataset/activity_labels.txt")
colnames(activity_labels)[1] <- "activity"
colnames(activity_labels)[2] <- "description"
#change the values in the activity column to match the description in the
#activity_labels data frame
mean_std_data$activity <- activity_labels[match(mean_std_data$activity,activity_labels$activity),2]
#Add descriptive variable names
colnames(mean_std_data) <- tolower(colnames(mean_std_data))
colnames(mean_std_data) <- sub("()-", "", colnames(mean_std_data), fixed=TRUE)
colnames(mean_std_data) <- sub("-", "", colnames(mean_std_data), fixed=TRUE)
colnames(mean_std_data) <- sub("()", "", colnames(mean_std_data), fixed=TRUE)
colnames(mean_std_data) <- sub(",", "", colnames(mean_std_data), fixed=TRUE)
#summarise the data with the average of each variable for each activity and each subject
summary_data <- mean_std_data %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
write.table(summary_data, file="Run_Analysis_Tidy_Data.txt", row.names=FALSE)
|
417a1d244c16552d67612be082ab68776a66a1f3
|
3cecba2d09c39746890c635c74ad33edcad24e84
|
/code/ode_data.R
|
605d7d212505bf7eca206fc6714de5928ae54a5e
|
[] |
no_license
|
Suvixx/Novel-recurrent-neural-network-for-modelling-biological-networksOscillatory-p53-interaction-dynamics
|
0fe0107c6829c5896748229234c595e9f3c924e3
|
63ad2d4abf1eb52403b3de8c8f8be7c51f3b2ca3
|
refs/heads/master
| 2022-11-24T09:55:04.621771
| 2020-08-06T02:59:25
| 2020-08-06T02:59:25
| 285,154,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
r
|
ode_data.R
|
library(deSolve)
time <- seq(from=0, to=1000, by = 1)
parameters <- c(gma = 2, alxy = 3.7, bitx = 1.5 , alzero = 1.1 , ay = 0.9)
state <- c(x = 1, y0 = 0, y1 = 0.54)
odernn <- function(t, state, parameters){
with(as.list(c(state, parameters)), {
dx = gma * x - alxy * x * y1
dy0 = bitx * x - alzero * y0
dy1 = alzero * y0 - ay * y1
return(list(c(dx, dy0, dy1)))
})
}
out <- ode(y = state, times = time, func = odernn, parms = parameters)
outdf <- as.data.frame(out)
library(reshape2)
outm <- melt(outdf, id.vars='time')
library(ggplot2)
p <- ggplot(outm, aes(time, value, color = variable)) +
#geom_point()+
geom_line(size = 0.5, linetype = "solid")
print(p)
write.csv(outdf, "D:/extended_ara/iiit-D/ode-data.csv", sep = ",")
write.csv(outm, "D:/extended_ara/iiit-D/ode-meltdata.csv", sep = ",")
|
0a1a07e45d8c68eaf7463ffef15f1b1c34c461c0
|
1ba5b7c213871eb2b9aa5d194fa403f87d728193
|
/R/getLabel.R
|
34efacf5206f660e1c569ab9afdb2b953f67cf32
|
[
"MIT"
] |
permissive
|
noelnamai/RNeo4j
|
6a0c42ffe5a6f3f9ffc19d15ad25453696ea3760
|
4af57a9b00593109155e9f2c55108fe8b94c8f0b
|
refs/heads/master
| 2020-04-01T23:02:26.894316
| 2015-04-17T18:03:26
| 2015-04-17T18:03:26
| 34,324,382
| 1
| 0
| null | 2015-04-21T12:02:52
| 2015-04-21T12:02:51
|
R
|
UTF-8
|
R
| false
| false
| 764
|
r
|
getLabel.R
|
getLabel = function(object) UseMethod("getLabel")
getLabel.default = function(x) {
stop("Invalid object. Must supply a graph or node object.")
}
getLabel.graph = function(graph) {
url = attr(graph, "node_labels")
headers = setHeaders(graph)
response = http_request(url, "GET", "OK", httpheader=headers)
result = fromJSON(response)
if(length(result) == 0) {
message("No labels in the graph.")
return(invisible(NULL))
}
return(result)
}
getLabel.node = function(node) {
url = attr(node, "labels")
headers = setHeaders(node)
response = http_request(url, "GET", "OK", httpheader=headers)
result = fromJSON(response)
if(length(result) == 0) {
message("No labels on the node.")
return(invisible(NULL))
}
return(result)
}
|
be28c811de805f118ce686af64ad60df3658d049
|
4164b00d5a99f4ff19fd1cfef7f3e8d0ebb62d1b
|
/DataMining-Benchmark-Conversion/benchmark_resultsConversion.R
|
67a243509f5d9669ef5311320a1f442a787381cc
|
[] |
no_license
|
mutual-ai/IBE_Benchmark-OpenML
|
c99c6d3744b94c2eb5d201bc9fab3bb343ec1b85
|
35a43c38969bb57c4a4f589bfe93a66c5eca3d8b
|
refs/heads/master
| 2020-07-11T08:49:37.818896
| 2016-11-11T16:21:22
| 2016-11-11T16:21:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,146
|
r
|
benchmark_resultsConversion.R
|
rm(list = ls())
OS = "win"
library(mlr)
library(gridExtra)
library(ggplot2)
library(cowplot)
library(reshape2)
source(file = "DataMining-Benchmark-Conversion/benchmark_defs.R")
################################################################################################################################
# Creation of the dataset
################################################################################################################################
## Load and convert the reasults to a data frame ----
load(file = "Data/Results/benchmark_results_snow_small-medium-allLearners_strat_All.RData")
load(file = "Data/Results/clas_time.RData")
leaner.id.lr = "classif.logreg"
learner.id.randomForest = "classif.randomForest"
unwantedLearners = c( "classif.cvglmnet..elasticnet", "classif.penalized.ridge", "classif.penalized.lasso",
"classif.multinom", "classif.cvglmnet.ridge", "classif.cvglmnet.lasso.vanilla", "classif.cvglmnet.lasso")
unwantedMeasures = c("mmce.test.mean")
# models difference
load(file = "Data/Research/DifferenceModel/ImportanceResults-all.RData")
load(file = "Data/Research/DifferenceModel/pdp.weigheddifferenceAll.RData")
load(file = "Data/Research/Target/target.sigma.0.5.RData")
clas_used = clas_used[c(1:length(result)),]
# create the importance.list
importance.list.aggr.l1 = sapply(importance.list, function(x) sum(abs(x$rf.permutation.imp-x$lr.permutation.imp))/length(x$rf.permutation.imp))
importance.list.aggr.l2 = sapply(importance.list, function(x) sqrt(sum((x$rf.permutation.imp-x$lr.permutation.imp)^2))/length(x$rf.permutation.imp))
importance.list.aggr.rank = sapply(importance.list, function(x) sum(abs(rank(x$rf.permutation.imp)-rank(x$lr.permutation.imp)))/length(x$rf.permutation.imp))
importance.df = data.frame(l1 = importance.list.aggr.l1,
l2 = importance.list.aggr.l2,
rank = importance.list.aggr.rank)
# remove error message for pdp
res.errorMessages.pdp = which(!sapply(pdp.weigheddifference, function(x) typeof(x)=="list"))
result = result[-res.errorMessages.pdp]
clas_used = clas_used[-res.errorMessages.pdp,]
importance.df = importance.df[-res.errorMessages.pdp,]
pdp.weigheddifference = pdp.weigheddifference[-res.errorMessages.pdp]
target.sigma = target.sigma[-res.errorMessages.pdp]
# pdp.df
pdp.df = do.call("rbind", pdp.weigheddifference)
# remove n>p
res.highdimension = which(clas_used$NumberOfFeatures>clas_used$NumberOfInstances)
result = result[-res.highdimension]
clas_used = clas_used[-res.highdimension,]
importance.df = importance.df[-res.highdimension,]
pdp.df = pdp.df[-res.highdimension,]
target.sigma = target.sigma[-res.highdimension]
# remove the ones with error messages
res.errorMessages = which(!sapply(result, function(x) typeof(x)=="list"))
result = result[-res.errorMessages]
clas_used = clas_used[-res.errorMessages,]
importance.df = importance.df[-res.errorMessages,]
pdp.df = pdp.df[-res.errorMessages,]
target.sigma = target.sigma[-res.errorMessages]
# aggregate the results
res.perfs = lapply(result, function(x) getBMRAggrPerformances(x, as.df=TRUE))
# remove unwanted learners
res.perfs = lapply(res.perfs, function(x) subset(x,!(learner.id %in% unwantedLearners)))
# remove unwanted measures
if (length(unwantedMeasures)>0) {
index.unwantedMeasures = which(names(res.perfs[[1]]) %in% unwantedMeasures)
res.perfs = lapply(res.perfs, function(x) x[,-index.unwantedMeasures])
}
# detect and removes the nas
res.perfs.nas = which(sapply(res.perfs, function(x) any(is.na(x))))
if (!(identical(integer(0), res.perfs.nas))) {
res.perfs = res.perfs[-res.perfs.nas]
clas_used = clas_used[-res.perfs.nas,]
importance.df = importance.df[-res.perfs.nas,]
pdp.df = pdp.df[-res.perfs.nas,]
target.sigma = target.sigma[-res.perfs.nas]
}
# convert to a data.frame
res.perfs.df = do.call("rbind", res.perfs)
# get the difference of performances
perfsAggr.LR = subset(res.perfs.df, learner.id == leaner.id.lr)
perfsAggr.RF = subset(res.perfs.df, learner.id == learner.id.randomForest)
perfsAggr.diff = perfsAggr.RF[,3:ncol(perfsAggr.RF)]-perfsAggr.LR[,3:ncol(perfsAggr.LR)]
perfsAggr.diff.melted = melt(perfsAggr.diff)
detach(package:reshape2, unload = TRUE)
## Compute the dataset of difference with the features
# number of features
p = clas_used$NumberOfFeatures
# number of numeric attributes
pnum = clas_used$NumberOfNumericFeatures
# number of categorical attributes
psymbolic = clas_used$NumberOfSymbolicFeatures
# number of samples
n = clas_used$NumberOfInstances
# n/p
psurn = p/n
# Numerical attributes rate
pnumrate = pnum/p
# Nominal attributes rate
psymbolicrate = psymbolic/p
# %Cmin Percentage of elements of the minority class
Cmin = clas_used$MinorityClassSize/n
# %Cmax Percentage of elements of the majority class
Cmax = clas_used$MajorityClassSize/n
df.bmr.diff = data.frame(perfsAggr.diff,
logp = log(clas_used$NumberOfFeatures),
logn = log(clas_used$NumberOfInstances),
logdimension = log(clas_used$dimension),
logpsurn = log(clas_used$NumberOfFeatures/clas_used$NumberOfInstances),
logdimensionsurn = log(clas_used$dimension/clas_used$NumberOfInstances),
lograpportMajorityMinorityClass = log(clas_used$MajorityClassSize/clas_used$MinorityClassSize),
pnum, psymbolic, pnumrate, psymbolicrate, Cmin, Cmax,
brierlogreg = perfsAggr.LR$brier.test.mean,
logbrierlogreg = log(perfsAggr.LR$brier.test.mean),
sqrtbrierlogreg = sqrt(perfsAggr.LR$brier.test.mean),
target.sigma
)
paste("Is there any nas ? :", any(is.na(df.bmr.diff)))
measures.names = sapply(MEASURES, function(x) paste0(x$id,".test.mean"))
features.names = names(df.bmr.diff)[which(!(names(df.bmr.diff) %in% measures.names))]
## Compute the ranks
convertModifiedBMRToRankMatrix <- function(bmr.all, measure = NULL, ties.method = "average") {
measure.name = paste(measure$id,".test.mean", sep = "")
df = aggregate(bmr.all[[measure.name]], by = list(task.id = bmr.all$task.id,
learner.id = bmr.all$learner.id),
FUN = mean)
# calculate ranks, rank according to minimize option of the measure
if (!measure$minimize)
df$x = -df$x
df = plyr::ddply(df, "task.id", function(d) {
d$alg.rank = rank(d$x, ties.method = ties.method)
return(d)
})
# convert into matrix, rows = leaner, cols = tasks
df = reshape2::melt(df, c("task.id", "learner.id"), "alg.rank")
df = reshape2::dcast(df, learner.id ~ task.id )
task.id.names = setdiff(colnames(df), "learner.id")
mat = as.matrix(df[, task.id.names])
rownames(mat) = df$learner.id
colnames(mat) = task.id.names
return(mat)
}
# Save it ----
save(df.bmr.diff, res.perfs.df, convertModifiedBMRToRankMatrix, perfsAggr.diff.melted, perfsAggr.diff, pdp.df, clas_used, file = "Data/Results/df.bmr.RData")
|
e42da885cd9b4311363319adf5ceab5de9cfcfcc
|
3a5ae60a34608840ef484a901b61a363b1167756
|
/man/alignHSLength.Rd
|
22113c02bca03562e128dfb0382b54e6d9d10598
|
[] |
no_license
|
SWS-Methodology/hsfclmap
|
3da8ca59a1ceb90564ec70a448a6f0340ca86420
|
eb2bc552fcce321b3dd7bc8655b092bc7a428e1e
|
refs/heads/master
| 2021-01-17T17:35:59.484994
| 2016-12-19T17:47:53
| 2016-12-19T17:47:53
| 70,464,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,081
|
rd
|
alignHSLength.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/alignHSLength.R
\name{alignHSLength}
\alias{alignHSLength}
\title{Prepares for numerical comparison vector of hs codes from
trade dataset with hs codes range from mapping table.}
\usage{
alignHSLength(hs, mapdataset)
}
\arguments{
\item{hs}{Vector of original hs codes from trade data set.}
\item{mapdataset}{Data frame with mapping table containing
at least columns area, flow, fromcode, tocode}
}
\value{
A list with two components:
* hs - vector with extended hs codes
* mapdataset - data frame with extended fromcode and to code
columns.
}
\description{
Prepares for numerical comparison vector of hs codes from
trade dataset with hs codes range from mapping table.
}
\details{
Alignes length of hs codes in three places: vector from trade
dataset, fromcode and tocode columns of mapping dataset.
Maximum length is determined and all shorter codes are extended
on right-hand side: trade data hs code and mapping
fromcode are extended by 0, mapping tocode is extended by 9.
}
|
939c37317aedc2d3faf5c0640c214394cf9ca083
|
1b9504a60eef0e9bb371a77ffbb1dfd240074a9c
|
/scripts/time_buy_histogram.R
|
90038e6af654da6179c733bc729da60059d3571c
|
[] |
no_license
|
mohamedabolfadl/ML_Pipe
|
3784fe0097ba9f2be3b84bbcaa3e52845420f892
|
34a4da544b9bedc034c62c8dddd4868cbcde8686
|
refs/heads/master
| 2021-09-12T14:43:11.507177
| 2018-04-17T19:04:15
| 2018-04-17T19:04:15
| 106,018,697
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 729
|
r
|
time_buy_histogram.R
|
library(ggplot2)
df_all<-read_csv(file="C:/Users/m00760171/Desktop/Templates/trunk/FX/H1_PF_2_TR_15_2001-2016/H1_all_targets.csv")
df<-as.data.frame(df_all$buy_time)
names(df)<-"buy_time"
#df<-as.data.frame(TP_time[1:2000])
df_prof=as.data.frame(df[df$buy_time>0 & df$buy_time<2000,])
names(df_prof)<-"TP_time"
ggplot(data=df_prof, aes(df_prof$TP_time)) + geom_histogram(bins = 100)
summary(df_prof)
#qplot(df_prof$TP_time, geom="histogram")
df_loss=as.data.frame(df[df$buy_time>-2000 & df$buy_time<0,])
names(df_loss)<-"TP_time"
ggplot(data=df_loss, aes(df_loss$TP_time)) + geom_histogram(bins = 50)
summary(df_loss)
#qplot(df_loss$TP_time, geom="histogram")
#ggplot(data=df, aes(df$TP_time)) + geom_histogram(bins = 1)
|
40e262a6af0620d40fc138fccf2f66e5e8d5d59d
|
9430e5cd40071a7a0a5e92a3a17ee4706538f0d3
|
/man/getDiseaseListSim.Rd
|
8370a6b62edd87e21fe7a888a795d715e6d2dece
|
[] |
no_license
|
MoudFassad/HPOSim
|
63314cf60d420dc402fb42411f9557af82687f61
|
03a2559b5d0cedc8db6a6e207236a1cd220a6763
|
refs/heads/master
| 2022-03-30T07:25:05.146874
| 2020-01-09T12:38:10
| 2020-01-09T12:38:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,078
|
rd
|
getDiseaseListSim.Rd
|
\name{getDiseaseListSim}
\alias{getDiseaseListSim}
\title{ Pairwise Similarity for a List of Diseases}
\description{
Given a list of diseases, the function calculates the pairwise similarities for any two diseases in the list using different strategies.
}
\usage{
getDiseaseListSim(diseaselist,combinemethod="funSimMax",
method="Resnik",ontology="PA",normalization=FALSE,
normalizationmethod="Lin",verbose=FALSE)
}
\arguments{
\item{diseaselist}{ character vector of disease OMIM IDs }
\item{combinemethod}{ method to calculate the similarity between diseases based on , one of "max", "mean", "funSimMax", "funSimAvg" and ""BMA }
\item{method}{ method to compute the similarity of HPO terms, one of "Resnik", "JiangConrath", "Lin", "simIC", "relevance" and "Wang" }
\item{ontology}{ the ontology used for similarity calculation }
\item{normalization}{ normalize similarities yes/no}
\item{normalizationmethod}{ one of "sqrt", "Lin" and "Tanimoto"}
\item{verbose}{ print out some information }
}
\details{
The combine method to calculate the pairwise disease similarity between disease can either be:
\describe{
\item{"max"}{the maximum similarity between any two HPO terms}
\item{"mean"}{the average similarity between any two HPO terms1[1]}
\item{funSimMax}{ the average of best matching HPO term similarities. Take the maximum of the scores achieved by assignments of HPO terms from disease 1 to disease 2 and vice versa. [2]}
\item{funSimAvg}{ the average of best matching HPO term similarities. Take the average of the scores achieved by assignments of HPO terms from disease 1 to disease 2 and vice versa. [2]}
\item{"BMA"}{best match average approach [3]}
}
}
\value{
n*n similarity matrix (n = number of diseases)
}
\author{
Yue Deng<anfdeng@163.com>
}
\seealso{
\code{\link{getDiseaseSim}}
}
\examples{
list<-c("OMIM:101900","OMIM:102000","OMIM:143470")
getDiseaseListSim(list,combinemethod="funSimAvg")
}
\keyword{ manip }
|
1a40d841d0b6491e792192beb6451ac28f6d4747
|
aeacd6ee9ca56233afcda3405314d6db86baee95
|
/SupplementMaterials/WA_Data_Analysis_ANZJS/RCode/CreateTable9.R
|
450bbcd59f7f8f410d8f900aa3145f3eaa00bc61
|
[] |
no_license
|
rakstats/VarSelectOnLinnet
|
76b378f3c31f84ed7b0786c07fd62ad3f576b2c2
|
49d25a8c148df5ebd561249d607177484a342a47
|
refs/heads/master
| 2021-08-21T20:50:18.959662
| 2020-09-24T01:30:22
| 2020-09-24T01:30:22
| 225,319,963
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,235
|
r
|
CreateTable9.R
|
##################
# Load packages #
#################
library(sp)
library(spatstat)
library(dplyr)
library(maptools)
library(glmnet)
library(stringr)
library(here)
library(doParallel)
library(xtable)
############################
# Load the useful functions#
############################
source(here::here("RCode", "VarSelectUtils.R"))
#############################
#####################################################
# Load Cross-validation and regularized fit results #
#####################################################
#################################################
#1. Load lasso Cross-validation and lasso fit #
##############################################################################
# 1a. Load Lasso CV and fit results for Berman-Turner (BT) approximation
lassoMarkedBTCV <- readRDS(here::here("RegularizedRegResults", "lassoMarkedBT.rds"))
lassoMarkedBTFit <- readRDS(here::here("RegularizedRegResults", "lassoMarkedBTFit.rds"))
print(lassoMarkedBTCV)
print(lassoMarkedBTFit)
coef(lassoMarkedBTCV, s="lambda.min")
coef(lassoMarkedBTCV, s="lambda.1se")
lambdaMin <- lassoMarkedBTCV$lambda.min
lambda1Se <- lassoMarkedBTCV$lambda.1se
lambdaAvg <- mean(c(lambdaMin,lambda1Se))
lambdaGmean <- sqrt(lambdaMin*lambda1Se)
log(lambdaGmean)
number_of_nonzero_coef(coef(lassoMarkedBTCV, s=lambdaMin))
number_of_nonzero_coef(coef(lassoMarkedBTCV, s=lambda1Se))
number_of_nonzero_coef(coef(lassoMarkedBTCV, s=lambdaAvg))
number_of_nonzero_coef(coef(lassoMarkedBTCV, s=lambdaGmean))
#coef(lassoMarkedBTCV, s=lambdaAvg)
lassoBTCoef00 <- coef(lassoMarkedBTCV, s=lambdaGmean) # Selects 16 variables
lassoBTCoef0 <- as.data.frame(as.matrix(lassoBTCoef00))
names(lassoBTCoef0) <- c("B-T (lasso)")
lassoBTCoef0$Variable <- rownames(lassoBTCoef0)
lassoBTCoef <- lassoBTCoef0[c(2,1)]
Col1 <- lassoBTCoef[1:45,]
Col4 <- lassoBTCoef[46:90, ]
###############################################################################
ridgeMarkedBTCV <- readRDS(here::here("RegularizedRegResults", "ridgeMarkedBT.rds"))
plot(ridgeMarkedBTCV)
lambdaRidgeMin <- ridgeMarkedBTCV$lambda.min
lambdaRidge1Se <- ridgeMarkedBTCV$lambda.1se
lambdaRidgeAvg <- mean(c(lambdaRidgeMin, lambdaRidge1Se))
lambdaRidgeGmean <- sqrt(lambdaRidgeMin*lambdaRidge1Se)
get_max_deviance(ridgeMarkedBTCV)
options(scipen=999)
sapply(c(lambdaRidgeMin, lambdaRidge1Se, lambdaRidgeAvg, lambdaRidgeGmean),
FUN = function(z){get_fraction_deviance_explained(X=ridgeMarkedBTCV, gamma=z)})
ridgeBTCoef00 <- coef(ridgeMarkedBTCV, s=lambdaRidgeMin)
ridgeBTCoef0 <- as.data.frame(as.matrix(ridgeBTCoef00))
names(ridgeBTCoef0) <- c("B-T (ridge)")
ridgeBTCoef0$Variable <- rownames(ridgeBTCoef0)
ridgeBTCoef <- ridgeBTCoef0[c(2,1)]
Col2 <- ridgeBTCoef[1:45,]
Col5 <- ridgeBTCoef[46:90, ]
###################################################################################
enetMarkedBTCV <- readRDS(here::here("RegularizedRegResults", "enetMarkedBT.rds"))
plot(enetMarkedBTCV)
lambdaenetMin <- enetMarkedBTCV$lambda.min
lambdaenet1Se <- enetMarkedBTCV$lambda.1se
lambdaenetAvg <- mean(c(lambdaenetMin,lambdaenet1Se))
lambdaenetGmean <- sqrt(lambdaenetMin*lambdaenet1Se)
log(lambdaenetGmean)
number_of_nonzero_coef(coef(enetMarkedBTCV, s=lambdaenetMin))
number_of_nonzero_coef(coef(enetMarkedBTCV, s=lambdaenet1Se))
number_of_nonzero_coef(coef(enetMarkedBTCV, s=lambdaenetAvg))
number_of_nonzero_coef(coef(enetMarkedBTCV, s=lambdaenetGmean))
enetBTCoef0 <- as.data.frame(as.matrix(coef(enetMarkedBTCV, s=lambdaenetGmean)))
names(enetBTCoef0) <- c("B-T (e-net)")
enetBTCoef0$Variable <- rownames(enetBTCoef0)
enetBTCoef <- enetBTCoef0[c(2,1)]
Col3 <- enetBTCoef[1:45,]
Col6 <- enetBTCoef[46:90, ]
###################################################################################
Table8a <- Col1 %>% left_join(Col2, by = "Variable") %>% left_join(Col3, by="Variable")
Table8b <- Col4 %>% left_join(Col5, by = "Variable") %>% left_join(Col6, by="Variable")
###################################################################################
Table8a[Table8a$Variable == "SPLI_SPEED", "Variable"] <- "SPD_LIM"
Table8a[Table8a$Variable == "HOAL_CURVE", "Variable"] <- "H_CURVE"
Table8a[Table8a$Variable == "TOTAL_PAVE", "Variable"] <- "TOT_P"
Table8a[Table8a$Variable == "TOTAL_SEAL", "Variable"] <- "TOT_S"
Table8a[Table8a$Variable == "TRAFFICABL", "Variable"] <- "TRFABL"
Table8a[Table8a$Variable == "NO_OF_LANE", "Variable"] <- "N_LANE"
Table8a[Table8a$Variable == "SPLI_SPEED2", "Variable"] <- "SPD_LIM2"
Table8a[Table8a$Variable == "HOAL_CURVE2", "Variable"] <- "H_CURVE2"
Table8a[Table8a$Variable == "TOTAL_PAVE2", "Variable"] <- "TOT_P2"
Table8a[Table8a$Variable == "TOTAL_SEAL2", "Variable"] <- "TOT_S2"
Table8a[Table8a$Variable == "TRAFFICABL2", "Variable"] <- "TRFABL2"
Table8a[Table8a$Variable == "NO_OF_LANE2", "Variable"] <- "N_LANE2"
Table8a[Table8a$Variable == "SHOULDER_S", "Variable"] <- "SHLDR"
Table8a[Table8a$Variable == "FLOODWAY", "Variable"] <- "FLDWY"
Table8a[Table8a$Variable == "BRIDGE", "Variable"] <- "BRDG"
Table8a[Table8a$Variable == "SPLI_SPEEDxKERB_L", "Variable"] <- "SPD_LIMxKERB_L"
Table8a[Table8a$Variable == "HOAL_CURVExKERB_L", "Variable"] <- "H_CURVExKERB_L"
Table8a[Table8a$Variable == "TOTAL_PAVExKERB_L", "Variable"] <- "TOT_PxKERB_L"
Table8a[Table8a$Variable == "TOTAL_SEALxKERB_L", "Variable"] <- "TOT_SxKERB_L"
Table8a[Table8a$Variable == "TRAFFICABLxKERB_L", "Variable"]<- "TRFABLxKERB_L"
Table8a[Table8a$Variable == "NO_OF_LANExKERB_L", "Variable"]<- "N_LANExKERB_L"
Table8a[Table8a$Variable == "SHOULDER_SxKERB_L", "Variable"]<- "SHLDRxKERB_L"
Table8a[Table8a$Variable == "FLOODWAYxKERB_L", "Variable"]<- "FLDWYxKERB_L"
Table8a[Table8a$Variable == "BRIDGExKERB_L", "Variable"]<- "BRDGxKERB_L"
Table8a[Table8a$Variable == "SPLI_SPEEDxSHOULDER_S", "Variable"] <- "SPD_LIMxSHLDR"
Table8a[Table8a$Variable == "HOAL_CURVExSHOULDER_S", "Variable"] <- "H_CURVExSHLDR"
Table8a[Table8a$Variable == "TOTAL_PAVExSHOULDER_S", "Variable"] <- "TOT_PxSHLDR"
Table8a[Table8a$Variable == "TOTAL_SEALxSHOULDER_S", "Variable"] <- "TOT_SxSHLDR"
Table8a[Table8a$Variable == "TRAFFICABLxSHOULDER_S", "Variable"] <- "TRFABLxSHLDR"
Table8a[Table8a$Variable == "NO_OF_LANExSHOULDER_S", "Variable"] <- "N_LANExSHLDR"
Table8a[Table8a$Variable == "KERB_RxSHOULDER_S", "Variable"] <- "KERB_RxSHLDR"
Table8a[Table8a$Variable == "FLOODWAYxSHOULDER_S", "Variable"] <- "FLDWYxSHLDR"
Table8a[Table8a$Variable == "BRIDGExSHOULDER_S", "Variable"] <- "BRDGxSHLDR"
Table8a[Table8a$Variable == "SPLI_SPEEDxKERB_R", "Variable"] <- "SPD_LIMxKERB_R"
Table8a[Table8a$Variable == "HOAL_CURVExKERB_R", "Variable"] <- "H_CURVExKERB_R"
Table8a[Table8a$Variable == "TOTAL_PAVExKERB_R", "Variable"] <- "TOT_PxKERB_R"
Table8a[Table8a$Variable == "TOTAL_SEALxKERB_R", "Variable"] <- "TOT_SxKERB_R"
Table8a[Table8a$Variable == "TRAFFICABLxKERB_R", "Variable"] <- "TRFABLxKERB_R"
Table8a[Table8a$Variable == "NO_OF_LANExKERB_R", "Variable"] <- "N_LANExKERB_R"
Table8a[Table8a$Variable == "FLOODWAYxKERB_R", "Variable"] <- "FLDWYxKERB_R"
Table8a[Table8a$Variable == "BRIDGExKERB_R", "Variable"] <- "BRDGxKERB_R"
##############################################################################
# Table8b
##############################################################################
Table8b[Table8b$Variable == "MARKSxSPLI_SPEED", "Variable"] <- "MarkxSPD_LIM"
Table8b[Table8b$Variable == "MARKSxHOAL_CURVE", "Variable"] <- "MarkxH_CURVE"
Table8b[Table8b$Variable == "MARKSxTOTAL_PAVE", "Variable"] <- "MarkxTOT_P"
Table8b[Table8b$Variable == "MARKSxTOTAL_SEAL", "Variable"] <- "MarkxTOT_S"
Table8b[Table8b$Variable == "MARKSxTRAFFICABL", "Variable"] <- "MarkxTRFABL"
Table8b[Table8b$Variable == "MARKSxNO_OF_LANE", "Variable"] <- "MarkxN_LANE"
Table8b[Table8b$Variable == "MARKSxSPLI_SPEED2", "Variable"] <- "MarkxSPD_LIM2"
Table8b[Table8b$Variable == "MARKSxHOAL_CURVE2", "Variable"] <- "MarkxH_CURVE2"
Table8b[Table8b$Variable == "MARKSxTOTAL_PAVE2", "Variable"] <- "MarkxTOT_P2"
Table8b[Table8b$Variable == "MARKSxTOTAL_SEAL2", "Variable"] <- "MarkxTOT_S2"
Table8b[Table8b$Variable == "MARKSxTRAFFICABL2", "Variable"] <- "MarkxTRFABL2"
Table8b[Table8b$Variable == "MARKSxNO_OF_LANE2", "Variable"] <- "MarkxN_LANE2"
Table8b[Table8b$Variable == "MARKSxSHOULDER_S", "Variable"] <- "MarkxSHLDR"
Table8b[Table8b$Variable == "MARKSxFLOODWAY", "Variable"] <- "MarkxFLDWY"
Table8b[Table8b$Variable == "MARKSxBRIDGE", "Variable"] <- "MarkxBRDG"
Table8b[Table8b$Variable == "MARKSxSPLI_SPEEDxKERB_L", "Variable"] <- "MarkxSPD_LIMxKERB_L"
Table8b[Table8b$Variable == "MARKSxHOAL_CURVExKERB_L", "Variable"] <- "MarkxH_CURVExKERB_L"
Table8b[Table8b$Variable == "MARKSxTOTAL_PAVExKERB_L", "Variable"] <- "MarkxTOT_PxKERB_L"
Table8b[Table8b$Variable == "MARKSxTOTAL_SEALxKERB_L", "Variable"] <- "MarkxTOT_SxKERB_L"
Table8b[Table8b$Variable == "MARKSxTRAFFICABLxKERB_L", "Variable"]<- "MarkxTRFABLxKERB_L"
Table8b[Table8b$Variable == "MARKSxNO_OF_LANExKERB_L", "Variable"]<- "MarkxN_LANExKERB_L"
Table8b[Table8b$Variable == "MARKSxSHOULDER_SxKERB_L", "Variable"]<- "MarkxSHLDRxKERB_L"
Table8b[Table8b$Variable == "MARKSxFLOODWAYxKERB_L", "Variable"]<- "MarkxFLDWYxKERB_L"
Table8b[Table8b$Variable == "MARKSxBRIDGExKERB_L", "Variable"]<- "MarkxBRDGxKERB_L"
Table8b[Table8b$Variable == "MARKSxSPLI_SPEEDxSHOULDER_S", "Variable"] <- "MarkxSPD_LIMxSHLDR"
Table8b[Table8b$Variable == "MARKSxHOAL_CURVExSHOULDER_S", "Variable"] <- "MarkxH_CURVExSHLDR"
Table8b[Table8b$Variable == "MARKSxTOTAL_PAVExSHOULDER_S", "Variable"] <- "MarkxTOT_PxSHLDR"
Table8b[Table8b$Variable == "MARKSxTOTAL_SEALxSHOULDER_S", "Variable"] <- "MarkxTOT_SxSHLDR"
Table8b[Table8b$Variable == "MARKSxTRAFFICABLxSHOULDER_S", "Variable"] <- "MarkxTRFABLxSHLDR"
Table8b[Table8b$Variable == "MARKSxNO_OF_LANExSHOULDER_S", "Variable"] <- "MarkxN_LANExSHLDR"
Table8b[Table8b$Variable == "MARKSxKERB_RxSHOULDER_S", "Variable"] <- "MarkxKERB_RxSHLDR"
Table8b[Table8b$Variable == "MARKSxFLOODWAYxSHOULDER_S", "Variable"] <- "MarkxFLDWYxSHLDR"
Table8b[Table8b$Variable == "MARKSxBRIDGExSHOULDER_S", "Variable"] <- "MarkxBRDGxSHLDR"
Table8b[Table8b$Variable == "MARKSxSPLI_SPEEDxKERB_R", "Variable"] <- "MarkxSPD_LIMxKERB_R"
Table8b[Table8b$Variable == "MARKSxHOAL_CURVExKERB_R", "Variable"] <- "MarkxH_CURVExKERB_R"
Table8b[Table8b$Variable == "MARKSxTOTAL_PAVExKERB_R", "Variable"] <- "MarkxTOT_PxKERB_R"
Table8b[Table8b$Variable == "MARKSxTOTAL_SEALxKERB_R", "Variable"] <- "MarkxTOT_SxKERB_R"
Table8b[Table8b$Variable == "MARKSxTRAFFICABLxKERB_R", "Variable"] <- "MarkxTRFABLxKERB_R"
Table8b[Table8b$Variable == "MARKSxNO_OF_LANExKERB_R", "Variable"] <- "MarkxN_LANExKERB_R"
Table8b[Table8b$Variable == "MARKSxFLOODWAYxKERB_R", "Variable"] <- "MarkxFLDWYxKERB_R"
Table8b[Table8b$Variable == "MARKSxBRIDGExKERB_R", "Variable"] <- "MarkxBRDGxKERB_R"
#######################################################################################
Table8 <- cbind(Table8a, Table8b)
#######################################################################################
print(xtable(Table8, digits = 3), include.rownames = FALSE)
|
1749b3a8a5d819507e3b89265d48e32e63978aef
|
4129370548d7fab3ce00528da7d05cf5c41839fc
|
/man/get_league_averages.Rd
|
b405a5800adfee73950f5140fc963d2f6942beae
|
[
"MIT"
] |
permissive
|
m-clark/five38clubrankings
|
c8ec6e7b90744cd3cb9a775bca5f7d31f62eb780
|
901ef6721fbe780431b0c7ae3eed615f38b0d610
|
refs/heads/master
| 2021-08-06T17:00:19.568274
| 2018-09-18T23:10:29
| 2018-09-18T23:10:29
| 135,958,695
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 741
|
rd
|
get_league_averages.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_league_averages.R
\name{get_league_averages}
\alias{get_league_averages}
\title{Get league averages}
\usage{
get_league_averages(rankings, drop_leagues = TRUE)
}
\arguments{
\item{rankings}{Rankings created by \code{get_club_rankings}}
\item{drop_leagues}{Drop some miscellaneous teams that made it to Champions
or Europa League but whose domestic league is not included. Default is
TRUE.}
}
\value{
A data frame of league rankings
}
\description{
Get average rankings per league
}
\details{
Takes the rankings you create with \code{get_club_rankings} and gets
league averages.
}
\examples{
library(five38clubrankings)
get_league_averages(rankings_2018)
}
|
82e4c864f4d0d9f5a311156ec850d76a8d88f8a9
|
b579d9aa0ac8f35170bc532e2e0b8b91d6f9c1ab
|
/plot3.R
|
3ea85e20502504a7d038aa60e7cf76b77e86cabe
|
[] |
no_license
|
data2knowledge/ExData_Plotting1
|
bb6545038a52d6cce02868d41efa6a45eab66424
|
d1e041827ebed8394ec2fc9343e1a7b2c39a3aee
|
refs/heads/master
| 2021-01-18T02:12:57.195285
| 2014-09-05T03:43:40
| 2014-09-05T03:43:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,289
|
r
|
plot3.R
|
#require(sqldf)
#assume "household_power_consumption.txt" file is under working directory
#load sqldf package
library(sqldf)
#load only the required power consumtion data for 1st Feb 2007 and 2nd Feb 2007
power_consumption_dataset<-read.csv.sql(file='household_power_consumption.txt',
sep=";",
sql="select * from file where Date = '1/2/2007' or Date = '2/2/2007'",
header=TRUE)
#comebine date and time string
date_and_time_string<-paste(power_consumption_dataset$Date,power_consumption_dataset$Time)
newdate<-strptime(date_and_time_string,"%d/%m/%Y %H:%M:%S",tz="EST5EDT")
#declare a png grapchic device
png(file="plot3.png", width = 480,height = 480)
#construct and display the plot
par(mar=c(5,5,2,2))
plot(newdate,power_consumption_dataset$Sub_metering_1,type="n",ylab="Energy sub meeting",xlab="")
points(newdate,power_consumption_dataset$Sub_metering_1,type="l",col="black")
points(newdate,power_consumption_dataset$Sub_metering_2,type="l",col="red")
points(newdate,power_consumption_dataset$Sub_metering_3,type="l",col="blue")
legend("topright",legend=c("Sub_meeting_1","Sub_meeting_2","Sub_meeting_3"),
col=c("black","red","blue"),cex=1,lty =c("solid","solid","solid"),lwd=1)
#close the png graphic device
dev.off()
|
bddea9914fdc336f9172165a48a73792efef70bb
|
b26855cd7c18444ba2ea1089e4946eebc28f3e62
|
/plot2.R
|
11cf1a31e83c3c65e765fabbbb28529ffcc104b2
|
[] |
no_license
|
rhcarver/Coursera_Exp_Data_Project-1
|
7b2c86c1af3eea6b0633fe59b0eb36dd88b6cf30
|
6d0cf20dca2dad154ff9e8f01ed8ca7bf4946f77
|
refs/heads/master
| 2021-01-10T05:11:26.931007
| 2015-10-10T21:24:18
| 2015-10-10T21:24:18
| 44,028,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,083
|
r
|
plot2.R
|
# plot2: Project 1 EDA Coursera -- plot2
# Read data re: Electric Power Consumption
# for dates Feb 1 and 2 2007
# Time-series line plot of Global Active Power daily by minute
#
####################
#
# Set working directory
setwd("C:/Users/rcarver/Dropbox/In process/Coursera/Exploratory Data analysis/Project 1")
#
# add libraries
library(utils)
library(lattice)
# Now read in rows for the 2 days in Feb
f <- "household_power_consumption.txt"
miss <- c("?", "")
mydata <-read.table(f, header = TRUE, sep=";", na.strings=miss)
# Now subset mydata to just 2 days in Feb
mydata <- subset(mydata, Date=="1/2/2007" |Date == "2/2/2007")
# Convert dates and times from course discussion help
mydata$Date <- as.Date(mydata$Date, format = "%d/%m/%Y")
mydata$timetemp <- paste(mydata$Date, mydata$Time)
mydata$Time <- strptime(mydata$timetemp, format = "%Y-%m-%d %H:%M:%S")
# Now create plot 2
dev.new(width=480, height=480)
y<-mydata$Global_active_power
x<-mydata$Time
plot(x,y,type="l",xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png")
dev.off()
|
32debedbecc25b33389ec2d0d26530fed1d85ba4
|
297dd29c203c3fb3847df230b88297c31bc8314a
|
/rabea/Rabea.R
|
9c2064beedc61760cae2cfb9ea5d78d88a89d96d
|
[
"MIT"
] |
permissive
|
rehanzfr/R_Codes
|
f5bc876213fc0793ebc9c5c48f782f61de87fe24
|
b4460a5516660963d5f988598656de13f77a3d54
|
refs/heads/master
| 2023-05-11T08:48:36.062784
| 2023-05-01T17:20:17
| 2023-05-01T17:20:17
| 585,261,050
| 0
| 0
|
MIT
| 2023-01-04T18:13:35
| 2023-01-04T18:13:34
| null |
UTF-8
|
R
| false
| false
| 5,030
|
r
|
Rabea.R
|
install.packages("tidyverse")
install.packages("readxl")
install.packages("ggplot2")
install.packages("gridExtra")
library(readxl)
library(tidyverse)
library(ggplot2) # for creating graphs
library(gridExtra)
# Folder in which excel files are placed
folder <- "C:/Users/hanza/OneDrive/Desktop/rabea_contigs"
setwd(folder)
#### QueryCover: For all files in a folder
files = list.files(path = folder, full.names = TRUE, pattern = ".xlsx")
print(files)
for (i in seq_along(files)) {
#### For all sheets in a file
plot_list <- list() # create an empty list to store plots
for (j in seq_along(excel_sheets(files[i]))) {
df <- read_excel(files[i], sheet = j)
sheetname <- excel_sheets(files[i])[j]
names_col <- df$`Query Cover`
x_cut <- cut(names_col, breaks = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1), labels= c("0-0.1","0.1-0.2","0.2-0.3","0.3-0.4","0.4-0.5","0.5-0.6","0.6-0.7","0.7-0.8","0.8-0.9","0.9-1"), include.lowest = TRUE)
freq <- table(x_cut)
sorted_freq <- freq[order(freq)]
df_freq <- data.frame(value = names(sorted_freq), frequency = as.numeric(sorted_freq))
plot <- ggplot(df_freq, aes(x = value , y = frequency)) +
geom_bar(stat = "identity", fill="steelblue") +
geom_text(aes(label=frequency), vjust=0)+
# xlab("Query Cover") +
#ylab("Frequency") +
ggtitle(label = sheetname) +
theme(plot.title=element_text(size=8, hjust=0.5, face="italic", color="black"))+
theme(axis.text.x = element_text(size=7, angle = 45, hjust=1))
plot_list[[j]] <- plot # store the plot in the list
}
pdf(file = paste("QueryCover_", tools::file_path_sans_ext(basename(files[i])),".pdf") , # The directory you want to save the file in
width = 12, # The width of the plot in inches
height = 8) # The height of the plot in inches
grid.arrange(grobs = plot_list, ncol = 5,left = "Frequency", bottom="Query Cover",top="Frequency Plots") # change ncol as desired
print(paste(tools::file_path_sans_ext(basename(files[i]))))
dev.off()
#ggsave(paste(tools::file_path_sans_ext(basename(files[i])),".pdf"), width = 8, height = 6, units = "in")
}
#### PerIdentity: For all files in a folder
for (i in seq_along(files)) {
#### For all sheets in a file
plot_list <- list() # create an empty list to store plots
for (j in seq_along(excel_sheets(files[i]))) {
df <- read_excel(files[i], sheet = j)
sheetname <- excel_sheets(files[i])[j]
names_col <- df$`Per. ident`
x_cut <- cut(names_col, breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100), labels= c("0-10","10-20","20-30","30-40","40-50","50-60","60-70","70-80","80-90","90-100"), include.lowest = TRUE)
freq <- table(x_cut)
sorted_freq <- freq[order(freq)]
df_freq <- data.frame(value = names(sorted_freq), frequency = as.numeric(sorted_freq))
plot <- ggplot(df_freq, aes(x = value , y = frequency)) +
geom_bar(stat = "identity", fill="darkorange1") +
geom_text(aes(label=frequency), vjust=0)+
# xlab("Query Cover") +
#ylab("Frequency") +
ggtitle(label = sheetname) +
theme(plot.title=element_text(size=8, hjust=0.5, face="italic", color="black"))+
theme(axis.text.x = element_text(size=7, angle = 45, hjust=1))
plot_list[[j]] <- plot # store the plot in the list
}
pdf(file = paste("PerIden_", tools::file_path_sans_ext(basename(files[i])),".pdf") , # The directory you want to save the file in
width = 12, # The width of the plot in inches
height = 8) # The height of the plot in inches
grid.arrange(grobs = plot_list, ncol = 5,left = "Frequency", bottom="PerIdentity",top="Frequency Plots") # change ncol as desired
print(paste(tools::file_path_sans_ext(basename(files[i]))))
dev.off()
#ggsave(paste(tools::file_path_sans_ext(basename(files[i])),".pdf"), width = 8, height = 6, units = "in")
}
#### All data into one Dataframe
library(readxl)
library(purrr)
# Get a list of all Excel files in directory
excel_files <- list.files(pattern = "\\.xlsx$")
sheets_list <- lapply(excel_files, function(file) {
sheets <- excel_sheets(file)
set_names(sheets, sheets) %>% lapply(function(sheet) {
read_excel(file, sheet)
})
})
# Combine all sheets into a single data frame
combined_data <- bind_rows(do.call(c, sheets_list), .id = "file_sheet")
R_species<- data.frame(combined_data$`Scientific Name`)
freq_all <- R_species %>% group_by_all() %>% count %>% rename("Occurances"="n")
freq_greater_than_20 <- R_species %>% group_by_all() %>% count %>% filter(n >= 20)
# Filter Combine based on Query Coverage and Percent Identity
filteredData<- combined_data %>% filter(`Query Cover`>0.4 & `Per. ident`>80)
Our_species<- data.frame(filteredData$`Scientific Name`)
freq_all_our_species <- Our_species %>% group_by_all() %>% count %>% rename("Occurances"="n")
freq_greater_than_20_our_species <- Our_species %>% group_by_all() %>% count %>% filter(n >= 4)
|
5d2180694ed506f316ddac6ea7334bda262ae238
|
8f9fea74327fb383b19bdc95b1b1cf703136f433
|
/R/plotter.R
|
cbd20d422a553d86015deee4a5b02290f585c575
|
[
"Apache-2.0"
] |
permissive
|
rtlemos/rcsurplus1d
|
a4b98c7ca5e7e16c74506f954de8e4a5059260a3
|
69ef6212b0df416f2ab15ffb147dcd5cc7e93e56
|
refs/heads/master
| 2021-04-30T16:20:27.719003
| 2020-06-19T01:19:07
| 2020-06-19T01:19:07
| 56,413,138
| 1
| 0
| null | 2016-04-17T02:24:12
| 2016-04-17T01:16:24
| null |
UTF-8
|
R
| false
| false
| 9,292
|
r
|
plotter.R
|
#' rcplotter: reference class that plots results of surplus model fits
#'
#' @field buffer matrix.
#' @field palettes list.
#' @field default_palettes list.
#'
#' @import rcvirtual
#' @import grid
#'
# #' @export rcplotter
# #' @exportClass rcplotter
rcplotter <- setRefClass(
Class = 'rcplotter',
contains = 'rcvirtual.plotter',
fields = list(buffer = 'matrix', palettes = 'list',
default_palettes = 'list'),
methods = list(
initialize = function(){
"Initializes the printer object"
#default palettes are colour-blind friendly
dfp <- c("#000000", "#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#D55E00", "#CC79A7")
.self$default_palettes <- list(
line = dfp,
fill = dfp,
CI = c(0.8,0.7),
neg_zero_pos = c('blue','white','red'),
zero_pos = c('white', 'black'))
.self$palettes <- list(
line = .self$default_palettes$line,
fill = .self$default_palettes$fill,
CI = .self$default_palettes$CI,
neg_zero_pos = .self$default_palettes$neg_zero_pos,
zero_pos = .self$default_palettes$zero_pos
)
},
set_buffer_size = function(nr, nc){
"Sets up plotter's buffer size (number of rows and columns)"
.self$buffer <- array(list(),c(nr,nc))
},
set_palette = function(argument, value){
"Sets the printer's colour palettes"
if (argument == 'all') {
nm <- names(.self$palettes)
if (value[1] == 'default') {
lapply(seq(along = nm), function(i) {
.self$set_palette(nm[i], .self$default_palettes[[i]])
})
} else {
lapply(seq(along = nm), function(i) {
.self$set_palette(nm[i], value)
})
}
} else {
pos <- which(names(.self$palettes) == argument)
if (length(pos) == 0) {
stop('Palette not found: ', argument)
} else if (value[1] == 'default') {
.self$palettes[[pos]] <- .self$default_palettes[[pos]]
} else {
.self$palettes[[pos]] <- value
}
}
},
set_in_buffer = function(myplot, xpos, ypos){
'Places a plot in the buffer'
.self$buffer[xpos,ypos] <- list(myplot)
},
get_palette = function(type){
if (type == 'all') type <- names(.self$palettes)
out <- mapply(1:length(type), FUN = function(i) {
pos <- which(names(.self$palettes) == type[i])
if (length(pos) == 0) {
return(type[i])
} else {
print(paste0(type[i],' = c("',
paste0(.self$palettes[[pos]], collapse = '", "') ,'")'
), quote = FALSE)
return('found')
}
})
if (any(out != 'found')) {
stop('Palette(s) not found: ', out[out != 'found'])
}
},
get_buffer_plot = function(){
'Prints the plots in the buffer'
vplayout <- function(x, y) {
viewport(layout.pos.row = x, layout.pos.col = y)
}
nr <- nrow(.self$buffer)
nc <- ncol(.self$buffer)
grid::grid.newpage()
grid::pushViewport(grid::viewport(layout = grid.layout(nr, nc)))
for (ii in 1:nr) for (jj in 1:nc) {
myplot <- .self$buffer[ii,jj][[1]]
if (!is.null(myplot)) {
print(myplot, vp = vplayout(ii,jj))
}
}
},
get_one_density_plot = function(out, mytitle, xpos=1, ypos=1){
'Plots the posterior density of one hyperparameter'
df <- data.frame(x = out)
myplot <- ggplot(data = df, aes(x = x)) + geom_density() +
xlab(mytitle) + ylab('density') +
scale_colour_manual(values = .self$palettes$line, guide = FALSE)
.self$set_in_buffer(myplot,xpos,ypos)
},
get_density_plot = function(out, mytitle, dolegend = TRUE,
xpos = 1, ypos = 1){
'Plots the posterior density of >= 1 hyperparameters'
nl <- nlevels(out$model)
fc <- .self$palettes$fill[1:nl]
auxplot <- ggplot(data = out, aes(x = x, fill = model)) +
geom_density(alpha = 0.2) +
xlab(mytitle) + ylab('density')
if (dolegend) {
myplot <- auxplot +
scale_fill_manual(values = .self$palettes$fill) +
theme(legend.position = "top")
} else {
myplot <- auxplot +
scale_fill_manual(values = .self$palettes$fill,
guide = FALSE)
}
.self$set_in_buffer(myplot,xpos,ypos)
},
get_scatterplot = function(out, mytitle, xpos = 1, ypos = 1){
'Plots a scatterplot of two hyperparameters'
nl <- nlevels(out$model)
fc <- .self$palettes$fill[1:nl]
myplot <- ggplot(data = out, aes(x = x, y = y, colour = model)) +
geom_point(cex = 2, alpha = 0.2) +
xlab(mytitle[1]) + ylab(mytitle[2]) +
scale_colour_manual(values = .self$palettes$fill, guide = FALSE)
.self$set_in_buffer(myplot, xpos, ypos)
},
get_ts_fit_plot = function(out, mytitle, mylabs, xpos = 1, ypos = 1){
'Plots a time series plot with observed and fit (median plus 95 CI)'
myplot <- ggplot(data = out,aes(x = year, y = obs, z = model)) +
geom_ribbon(data = out,
aes(x = year, ymin = low95, ymax = high95),
fill = grey(.self$palettes$CI[1]),
alpha = .self$palettes$CI[2]) +
geom_line(data = out,
aes(x = year, y = median, color = model)) +
geom_point() +
scale_color_manual(values = .self$palettes$line) +
xlab(mylabs[1]) + ylab(mylabs[2]) + labs(title = mytitle)
.self$set_in_buffer(myplot, xpos, ypos)
},
get_data_plot = function(dt, do_catch = TRUE, do_effort = FALSE,
do_cpue = FALSE, one_row = TRUE){
nplots <- sum(c(do_catch, do_effort, do_cpue))
if (nplots == 0) return()
nr <- if (one_row) 1 else nplots
nc <- if (one_row) nplots else 1
bufr <- array(list(), c(nr, nc))
ridx <- 1
cidx <- 1
mtitle <- if (one_row) c('Time series of catch',
'Time series of effort',
'Time series of CPUE'
) else c('Time series of catch',
'Time series of effort',
'Time series of CPUE')
yl <- if (one_row) c('catch', 'effort', 'CPUE [ton/h]'
) else c('catch', 'effort', 'CPUE')
if (do_catch) {
catch_plot <- ggplot( dt, aes(x = year, y = catch) ) +
geom_line() +
geom_point() +
ylab(yl[1]) +
ggtitle(mtitle[1])
bufr[ridx,cidx] <- list(catch_plot)
if (one_row) cidx <- cidx + 1 else ridx <- ridx + 1
}
if (do_effort) {
effort_plot <- ggplot(dt, aes(x = year, y = effort)) +
geom_line() +
geom_point() +
ylab(yl[2]) +
ggtitle(mtitle[2])
bufr[ridx,cidx] <- list(effort_plot)
if (one_row) cidx <- cidx + 1 else ridx <- ridx + 1
}
if (do_cpue) {
cpue_plot <- ggplot(dt, aes(x = year, y = catch / effort)) +
geom_line() +
geom_point() +
ylab(yl[3]) +
ggtitle(mtitle[3])
bufr[ridx,cidx] <- list(cpue_plot)
if (one_row) cidx <- cidx + 1 else ridx <- ridx + 1
}
vplayout <- function(x, y) {
viewport(layout.pos.row = x, layout.pos.col = y)
}
grid.newpage()
pushViewport(viewport(layout = grid.layout(nr, nc)))
for (ii in 1:nr) for (jj in 1:nc) {
myplot <- bufr[ii,jj][[1]]
if (!is.null(myplot)) {
print(myplot, vp = vplayout(ii,jj))
}
}
}
)
)
|
e54cd99901adea1ceb7d14ef14ced1ccb41b3301
|
9ee587651e82c3efdf58036364c197829ffa57e1
|
/Chapter3_EcosystemComparison/10.05.2022_autoregressivemodels.R
|
7f1eae1b5081aeb9e5e3d12d01d561d0441d6472
|
[
"Apache-2.0"
] |
permissive
|
QutEcoacoustics/spatial-acoustics
|
7f0fd2af6663200ab529a2f8979eec56a0bf2e40
|
5e8eaba29576a59f85220c8013d0b083ddb70592
|
refs/heads/master
| 2023-04-15T09:50:44.063038
| 2023-03-14T23:36:36
| 2023-03-14T23:36:36
| 222,621,976
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,102
|
r
|
10.05.2022_autoregressivemodels.R
|
library(lubridate)
#Autoregressive models
rm(list = ls())
set.seed(123)
set.group <- "bird"
getDataPath <- function (...) {
return(file.path("C:/Users/n10393021/OneDrive - Queensland University of Technology/Documents/PhD/Project/Chapter3_SoundscapeEcosystemComparation", ...))
}
data_og <- read.csv(getDataPath("09.05.2022_data.csv")) %>%
mutate_at(c(3:6,47,48), ~(scale(.) %>% as.vector(.))) %>%
filter(RFclass == set.group) %>%
group_by(site, point, month, RFclass, period) %>%
mutate(n = n()) %>%
dplyr::select(everything(), -c(X, X.1, Recording_time, avg_rain_previous_months, future_ndvi, avg_temp_previous_months)) %>%
distinct()
data_og$date_r <- ymd(data_og$date_r)
data_og$day <- day(data_og$date_r)
data_og$week <- week(data_og$date_r)
data_og$month <- month(data_og$date_r)
data_og$year <- year(data_og$date_r)
ts <- ts(data_og[,10:33])
acf(ts)
ar <- ar.ols(data_og, order.max = 1, demean = F, intercept = T)
print(ar)
coeftest(ar)
ma <- arima(ts, order = c(0,0,1))
print(ma)
ts.plot(ts)
MA_fit <- ts - resid(ma)
points(MA_fit, type = "l", col = 2, lty = 2)
|
51a4caf80f24775fb3f9a05865c904fe37c87ec0
|
8c66b045e8f6c7e1b9fff3deb33a39d8b58d3bef
|
/man/label_ref_snp.Rd
|
fe30d227c72900b33252e472c2f12241ee971304
|
[
"MIT"
] |
permissive
|
fboehm/countalleles
|
1f10c7bd824ce8a2901edf1690c2e8966b3eec35
|
fa59eb7dbace10372d7886f8cdfeb2a03aee020c
|
refs/heads/master
| 2016-09-06T00:13:06.828895
| 2015-05-20T16:39:49
| 2015-05-20T16:39:49
| 35,918,384
| 0
| 1
| null | 2015-05-20T16:36:16
| 2015-05-20T01:28:08
|
R
|
UTF-8
|
R
| false
| false
| 643
|
rd
|
label_ref_snp.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/make_ref_table.R
\name{label_ref_snp}
\alias{label_ref_snp}
\title{Label one allele (A/C/T/G) as reference and the other as other for use in determining numeric count genotypes.}
\usage{
label_ref_snp(gv_actg)
}
\arguments{
\item{gv_actg}{A genotype vector (coded as A/C/T/G) for a single SNP locus.}
}
\value{
a character vector of length 2 with names reference and other
}
\description{
Label one allele (A/C/T/G) as reference and the other as other for use in determining numeric count genotypes.
}
\examples{
label_ref_snp(c("TT", "CT", "CC", "TC"))
}
|
89a1d1dead909ede19e27da02eb5ba58c7443fd7
|
9c53f6a0e7c059f46c9e446e1396ede06d4a0958
|
/Week3/Code/DataWrangTidy.R
|
dfa8eaba61a28c225f494c228b2594ef3a101491
|
[] |
no_license
|
tisssu/CMEECourseWork
|
9f5dd832b7d227fccd85ea27199953858428d2ae
|
31482f38cb0fe0a60025ce864f59a1372e583f32
|
refs/heads/master
| 2020-03-30T19:31:48.387316
| 2019-08-29T13:02:41
| 2019-08-29T13:02:41
| 151,547,136
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,025
|
r
|
DataWrangTidy.R
|
library("dplyr")
library("tidyr")
# input the dataset
MyData <- as.matrix(read.csv("../Data/PoundHillData.csv",header = F, stringsAsFactors = F))
MyMetaData <- read.csv("../Data/PoundHillMetaData.csv",header = T, sep=";", stringsAsFactors = F)
class(MyData)
# head the data
dplyr::tbl_df(MyData)
#change the "" with 0
MyData[MyData == ""] = 0
MyData <- t(MyData)
colnames(MyData)
TempData <- as.data.frame(MyData[-1,],stringsAsFactors = F)
colnames(TempData) <- MyData[1,] # assign column names from original data
rownames(TempData) <- NULL
MyWrangledData = tidyr::gather(TempData,k=Species,value=Count,-Cultivation,-Block,-Plot,-Quadrat)
head(TempData)
MyWrangledData[, "Cultivation"] <- as.factor(MyWrangledData[, "Cultivation"])
MyWrangledData[, "Block"] <- as.factor(MyWrangledData[, "Block"])
MyWrangledData[, "Plot"] <- as.factor(MyWrangledData[, "Plot"])
MyWrangledData[, "Quadrat"] <- as.factor(MyWrangledData[, "Quadrat"])
MyWrangledData[, "Count"] <- as.integer(MyWrangledData[, "Count"])
str(MyWrangledData)
|
a361b4e389aaf02ce8a863ff0347503804bb26e6
|
933125137583b8683b765a94003779012b0020c2
|
/inst/doc/pace.R
|
a1083887c55c88c30e26aa0ec7ec4441bbf4461f
|
[] |
no_license
|
cran/activatr
|
cc0508650ae46bb6380976942fbdde33f5076811
|
64cf23cd3a23476890aa284fd99393fdf74dae01
|
refs/heads/master
| 2023-05-26T11:17:01.271304
| 2023-05-01T21:00:02
| 2023-05-01T21:00:02
| 334,079,713
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,779
|
r
|
pace.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ----parse--------------------------------------------------------------------
library(activatr)
# Get the running_example.gpx file included with this package.
filename <- system.file(
"extdata",
"running_example.gpx.gz",
package = "activatr"
)
df <- parse_gpx(filename)
## ----table, echo=FALSE, results='asis'----------------------------------------
knitr::kable(head(df, 5))
## ----speed--------------------------------------------------------------------
df <- mutate_with_speed(df)
## ----speedtable, echo=FALSE, results='asis'-----------------------------------
knitr::kable(head(df, 5))
## ----pace---------------------------------------------------------------------
df$pace <- speed_to_mile_pace(df$speed)
## ----pacetable, echo=FALSE, results='asis'------------------------------------
knitr::kable(head(df, 5))
## ----paceformatter, warning = FALSE, message = FALSE, fig.show = "hold"-------
library(ggplot2)
library(dplyr)
library(lubridate)
ggplot(filter(df, as.numeric(pace) < 1200)) +
geom_line(aes(x = time, y = as.numeric(pace)), color = "blue") +
scale_y_reverse() +
ylab("pace")
ggplot(filter(df, as.numeric(pace) < 1200)) +
geom_line(aes(x = time, y = as.numeric(pace)), color = "blue") +
scale_y_reverse(label = pace_formatter) +
ylab("pace")
## ----leadlag, warning = FALSE, message = FALSE--------------------------------
df <- mutate_with_speed(df, lead = 10, lag = 10)
df$pace <- speed_to_mile_pace(df$speed)
ggplot(filter(df, as.numeric(pace) < 1200)) +
geom_line(aes(x = time, y = as.numeric(pace)), color = "blue") +
scale_y_reverse(label = pace_formatter) +
ylab("pace")
|
aed5a8edfe33d842f765b911d4aab68990617d32
|
de8d9db7e76f391e849705c01e863f52edfc7260
|
/profile.R
|
add1cf7a1eef10aaa554e9b2edd235c4af0021f1
|
[] |
no_license
|
pmur002/gggrid-report
|
a9101c231d9dadc06f65e879238c3f45bb290a89
|
3ad7aa77e12c145f8a3872af6f6e18f5ac6284cf
|
refs/heads/main
| 2023-04-19T18:54:32.235675
| 2021-05-31T02:51:43
| 2021-05-31T02:51:43
| 370,555,766
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
r
|
profile.R
|
library(profvis)
source("rahlf-plot.R")
latex <- readLines("rahlf-text.tex")
library(gridGraphics)
grid.echo()
downViewport("graphics-plot-1")
Rprof("dvir-prof.out")
library(dvir)
grid.latex(latex, preamble="", postamble="", engine=luatexEngine, tinytex=FALSE,
x=unit(1, "cm"), y=unit(1, "npc") - unit(1, "cm"), just=c("left", "top"))
## Had to "kill" the session, then hand modify the end of "dvir-prof.out"
## to remove an incomplete line
## Then skip the next line ...
Rprof(NULL)
## ... and, within a new R session, generate profvis report
p <- profvis(prof_input="dvir-prof.out")
## Full profiling output can be quite large
## awk -e 'NR < 20000 { print }' dvir-prof.out > dvir-prof-sample.out
## p <- profvis(prof_input="dvir-prof-sample.out")
htmlwidgets::saveWidget(p, "dvir-prof.html")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.