blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
219679f82bfe04d24816e215f25a8748b9908e29
|
bcc47704e21f119f2c5910c46d7e44d207177ffd
|
/feature_eng.r
|
647507f5e3f3e681993bd1eee55df9be51a8c115
|
[] |
no_license
|
Allisterh/NeuralNetworks
|
a960bc5a18b53c450ee1ad87017a6db88cea2d28
|
0f3a72c3e155d17f75047628450c65a264fa52c4
|
refs/heads/master
| 2022-04-07T17:32:10.518153
| 2020-02-11T15:01:39
| 2020-02-11T15:01:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,066
|
r
|
feature_eng.r
|
rm(list = ls())
set.seed(2018)
x <- seq(-10, 10, length.out = 100)
y <- seq(-10, 10, length.out = 100)
MyData <- expand.grid(x = x, y = y)
MyData$z <-as.factor(sqrt(MyData$x^2+MyData$y^2)>5)
Index_Temp=sample(nrow(MyData), 0.05*nrow(MyData))
MyData$z[Index_Temp]<-FALSE
Index_Temp=sample(nrow(MyData), 0.05*nrow(MyData))
MyData$z[Index_Temp]<-TRUE
library(ggplot2)
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = z))+theme_bw()
trainIndex<-sample(nrow(MyData), 0.1 *nrow(MyData))
MyDataTrain=MyData[trainIndex,]
ggplot(MyDataTrain, aes(x, y))+geom_point(aes(colour = factor(z)))+ theme_bw()
GLM_Model<-glm(z~.,data=MyDataTrain, family = "binomial")
MyData$Predicted_z_glm<-predict(GLM_Model,MyData, type="response")
summary(MyData$Predicted_z_glm)
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_glm>0.5))+theme_bw()
library(pROC)
roc(MyData$z, MyData$Predicted_z_glm)
MyData$Predicted_z_glm<-NULL
library(rpart)
Tree_Model<-rpart (z~.,data=MyDataTrain, method='class')
library(rattle)
fancyRpartPlot(Tree_Model)
T=predict(Tree_Model,MyData)
MyData$Predicted_z_tree=T[,2]
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_tree>0.5))+theme_bw()
roc(MyData$z, T[,2])
MyData$Predicted_z_tree<-NULL
MyData$d=sqrt(MyData$x^2+MyData$y^2)
MyDataTrain=MyData[trainIndex,]
GLM_Model<-glm(z~.,data=MyDataTrain, family = "binomial")
MyData$Predicted_z_glm<-predict(GLM_Model,MyData, type="response")
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_glm>0.5))+theme_bw()
roc(MyData$z, MyData$Predicted_z_glm)
MyData$Predicted_z_glm<-NULL
Tree_Model<-rpart (z~.,data=MyDataTrain, method='class')
fancyRpartPlot(Tree_Model)
T=predict(Tree_Model,MyData)
MyData$Predicted_z_tree=T[,2]
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_tree>0.5))+theme_bw()
roc(MyData$z, T[,2])
MyData$d<-NULL
MyData$Predicted_z_tree<-NULL
#####################################################################################################
MyData$T=sqrt(MyData$x^2+MyData$y^2)
MyData$z1 <-(MyData$T>5)
MyData$z2 <- (MyData$T <8)
MyData$z<-MyData$z2& MyData$z1
MyData$z1<-NULL
MyData$z2<-NULL
MyData$T<-NULL
Index_Temp=sample(nrow(MyData), 0.05*nrow(MyData))
MyData$z[Index_Temp]<-FALSE
Index_Temp=sample(nrow(MyData), 0.05*nrow(MyData))
MyData$z[Index_Temp]<-TRUE
library(ggplot2)
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = z))+theme_bw()
trainIndex<-sample(nrow(MyData), 0.1 *nrow(MyData))
MyDataTrain=MyData[trainIndex,]
ggplot(MyDataTrain, aes(x, y))+geom_point(aes(colour = factor(z)))+ theme_bw()
GLM_Model<-glm(z~.,data=MyDataTrain, family = "binomial")
MyData$Predicted_z_glm<-predict(GLM_Model,MyData, type="response")
summary(MyData$Predicted_z_glm)
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_glm>0.5))+theme_bw()
roc(MyData$z, MyData$Predicted_z_glm)
MyData$Predicted_z_glm<-NULL
Tree_Model<-rpart (z~.,data=MyDataTrain, method='class')
fancyRpartPlot(Tree_Model)
T=predict(Tree_Model,MyData)
MyData$Predicted_z_tree=T[,2]
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_tree>0.5))+theme_bw()
roc(MyData$z, T[,2])
MyData$Predicted_z_tree<-NULL
MyData$d=sqrt(MyData$x^2+MyData$y^2)
MyDataTrain=MyData[trainIndex,]
GLM_Model<-glm(z~.,data=MyDataTrain, family = "binomial")
MyData$Predicted_z_glm<-predict(GLM_Model,MyData, type="response")
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_glm>0.5))+theme_bw()
roc(MyData$z, MyData$Predicted_z_glm)
MyData$Predicted_z_glm<-NULL
Tree_Model<-rpart (z~.,data=MyDataTrain, method='class')
fancyRpartPlot(Tree_Model)
T=predict(Tree_Model,MyData)
MyData$Predicted_z_tree=T[,2]
ggplot(MyData, aes(x, y)) + geom_raster(aes(fill = Predicted_z_tree>0.5))+theme_bw()
roc(MyData$z, T[,2])
MyData$d<-NULL
MyData$Predicted_z_tree<-NULL
#######################################################################################
MyData$z2<-NULL
|
61a08f6f8f03a51ae945c3eeb4fc7dd94ce8d957
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/FarmTest/examples/plot.farm.scree.Rd.R
|
f6c05ba16ab56eff22c134f4b6fdbe3f07f4b12b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 429
|
r
|
plot.farm.scree.Rd.R
|
library(FarmTest)
### Name: plot.farm.scree
### Title: Diagnostic plots from factor-finding
### Aliases: plot.farm.scree
### ** Examples
set.seed(100)
p = 100
n = 20
epsilon = matrix(rnorm( p*n, 0,1), nrow = n)
B = matrix(rnorm(p*3,0,1), nrow=p)
fx = matrix(rnorm(3*n, 0,1), nrow = n)
X = fx%*%t(B)+ epsilon
output = farm.scree(X, cv=FALSE)
plot(output)
plot(output, scree.plot=FALSE, col="blue", main="Customized plot")
|
c92f0e76ca4d4ca0cb61a1bd68f9a732c27a2e86
|
2e2d340f2d59d1e0e4260305e69e794476874bbe
|
/utils/load_packages.R
|
b204144a4a4db81f2c9c5f702d81be6ef5bdb15c
|
[] |
no_license
|
aedobbyn/twitch
|
6fb5f4a15e707f8ec85ad65457aa47420a82798a
|
7cbeb0a8db49e418e4c9e901bf6d54b9af0d150a
|
refs/heads/master
| 2020-03-08T13:15:04.109072
| 2018-04-05T04:01:48
| 2018-04-05T04:01:48
| 128,153,109
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80
|
r
|
load_packages.R
|
library(tidyverse)
library(here)
library(feather)
library(RMySQL)
library(glue)
|
afe236664e3ad3cca50bfcf64639188db64ef2a5
|
5e1159311fa252b2dba45129c8335cee2a1b652e
|
/R/as.plantuml.logical.R
|
d30304a13214175a71295e6a61f7e674481f44d9
|
[] |
no_license
|
ShigiDono/plantuml
|
780c19df90cbf2ef81a6d95520433769eeb5ff2f
|
821852bba9ee2f8958910712e0ef58569124e7d2
|
refs/heads/master
| 2020-03-15T19:45:23.417049
| 2018-05-06T07:55:39
| 2018-05-06T07:55:39
| 132,316,737
| 0
| 0
| null | 2018-05-06T07:35:16
| 2018-05-06T07:35:16
| null |
UTF-8
|
R
| false
| false
| 634
|
r
|
as.plantuml.logical.R
|
#' Convert a character to a \code{plantuml} object
#'
#' Convert a \code{character} to a \code{plantuml} object.
#' This can be plotted.
#'
#' @param x character sting containing plantuml code.
#'
#' @return object of class \code{plantuml} which can be plotted.
#' @export
#'
#' @examples
#' \dontrun{
#' x <- '
#' @startuml --> "First Activity" -->[You can put also labels] "Second Activity"
#' -->
#' @enduml
#' '
#' x <- plantuml( x )
#' plot( x ) }
#'
as.plantuml.logical <- function(
x,
complete = FALSE,
nm = NULL
) {
return(
as.plantuml.vma(
x = x,
complete = complete,
nm = nm
)
)
}
|
b38899277db193343503f82e8baf1d564cd021fb
|
d11508807942f8c4f2aa85e8a51cb2d45b0c8d4d
|
/man/plot.glide.Rd
|
eb712490a771310f4b4f12cedd7368cdd76a1975
|
[] |
no_license
|
cran/GLIDE
|
37056ac28b2c11f2c63393bab61deefeb5007609
|
2410bfd3cb405eea74686ed93c5983a943f3d591
|
refs/heads/master
| 2022-06-22T11:31:41.697096
| 2022-05-25T22:00:02
| 2022-05-25T22:00:02
| 87,986,624
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,266
|
rd
|
plot.glide.Rd
|
\name{plot.glide}
\alias{plot.glide}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
A function used to draw the q-q plot
}
\description{
It is used to draw the q-q plot
}
\usage{
\method{plot}{glide}(x,qcutoff=0.2,xlab="Expected null p-values (log base 10)",
ylab="Observed p-values (log base 10)",...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
x is a dataframe, it is the outcome of the glide function.
}
\item{qcutoff}{
qcutoff is the q-value cutoff.
}
\item{xlab}{
xlab is the default x label.
}
\item{ylab}{
ylab is the default y label.
}
\item{...}{
the rest plot arguments.
}
}
\value{No return value, q-q plot will be generated.}
\author{
James Y. Dai and X. Wang
}
\examples{
\donttest{
##first generate output using the glide function
#load an example data
data(simdata)
simdat=simdata$simdat
coeff=simdata$coeff
formula=as.formula("outcome~age+sex+pc1+pc2+pc3")
genotype_columns=which(grepl("^SNP",colnames(simdat)))
#run glide
out=glide(formula=formula,exposure_coeff=coeff,genotype_columns,data=simdat,np=100000,
qcutoff=0.2,parallel=TRUE,corenumber=1,verbose=TRUE)
##draw the plot
plot.glide(out)
}
}
|
64e5ede7d06dff2c0cc5a69fa6d23d777c3bee1e
|
df0497b741e158c5aa53ee9e2659d538dbb801e7
|
/package_load.R
|
e565087f9d74c45dbe3631a25ab272f42242559b
|
[
"MIT"
] |
permissive
|
allisonbertie5/tamu_datathon_2020
|
5980066e06dab5e711da9c8c94eb57deb576e536
|
4a3e9b7aefb473cb4053f53b9fcc96eb3049fce8
|
refs/heads/main
| 2022-12-29T19:58:25.461352
| 2020-10-18T16:09:59
| 2020-10-18T16:09:59
| 305,138,171
| 1
| 0
|
MIT
| 2020-10-18T15:51:12
| 2020-10-18T15:51:11
| null |
UTF-8
|
R
| false
| false
| 109
|
r
|
package_load.R
|
library(tidycensus)
library(googledrive)
library(tidyverse)
library(stringr)
library(googlesheets4)
|
e15956cd51c575f7454e0037c22df9249bcffd7d
|
ffb90a8ff6c0ce42877da20850a5c7a488424733
|
/QuantStrat/03-nXema.r
|
cc4fc5d35a32fc462e68729fd9dbfd1a43220e2e
|
[] |
no_license
|
UTexas80/splGoldenDeathX
|
78f008298642b6fd3aa3bb910c488d9e14a3f608
|
f4367ff357ecdd8e0132888d6768e8c600850132
|
refs/heads/master
| 2023-05-02T18:29:58.171278
| 2023-04-25T16:41:56
| 2023-04-25T16:41:56
| 191,051,469
| 2
| 1
| null | 2019-10-14T00:19:59
| 2019-06-09T20:18:54
|
R
|
UTF-8
|
R
| false
| false
| 5,306
|
r
|
03-nXema.r
|
################################################################################
# 1.0 Setup
################################################################################
setup(nXema)
################################################################################
# 3.0 Indicators
################################################################################
# browser()
# x <- list("EMA", 4, 20, "020")
# z<-paste("'",as.character(x),"'",collapse=", ",sep="")
# indicators(x)
# ------------------------------------------------------------------------------
# indicators(EMA, 4, 20, "020")
# indicators(EMA, 4, 50, "050")
# indicators(EMA, 4, 100, "100")
# indicators(EMA, 4, 200, "200")
# indEMA <- cbind(EMA,4,t(cross(20,50,100,200)),sprintf("%03d",t(cross(20,50,100,200))))
# apply(crossEMA, 1, function(x)do.call(indicators, as.list(x)))
apply(crossEMA, 1, function(x)
indicators(
x[1],
as.integer(x[2]),
as.integer(x[3]),
x[4]))
str(getStrategy(nXema)$indicators)
# ------------------------------------------------------------------------------
# browser()
ApplyIndicators(nXema) # apply indicators
# browser()
# nXema_mktdata_ind <- applyIndicators(
# strategy = strategy.st,
# mktdata = SPL.AX)
################################################################################
# 4.0 Signals
################################################################################
# browser()
AddSignals("sigFormula",c("ema.020","ema.050","ema.100","ema.200"), deathX, "trigger", TRUE , nXema, "shortEntry")
AddSignals("sigFormula",c("ema.020","ema.050","ema.100","ema.200"), deathXno, "trigger", TRUE , nXema, "shortExit")
# add.signal(strategy.st,
# name = "sigFormula",
# arguments = list(
# columns = c("EMA.020","EMA.050","EMA.100", "EMA.200"),
# formula = deathX,
# label = "trigger",
# cross = TRUE),
# label = paste(nXema, "shortEntry", sep = "_"))
# ------------------------------------------------------------------------------
#add.signal(strategy.st,
# name = "sigFormula",
# arguments = list
# (columns = c("EMA.020","EMA.050","EMA.100", "EMA.200"),
# formula = deathXno,
# label = "trigger",
# cross = TRUE),
# label = paste(nXema, "shortExit", sep = "_"))
# ------------------------------------------------------------------------------
str(getStrategy(nXema)$signals)
ApplySignals(nXema)
################################################################################
# 5.0 Rules https://tinyurl.com/y93kc22r
################################################################################
rules(paste(nXema, "shortEntry", sep = "_"), TRUE, -1000, "short", "market", "Open", "market", 0, "enter")
rules(paste(nXema, "shortExit", sep = "_"), TRUE, 1000, "short", "market", "Open", "market", 0, "exit")
# rules("dXema_shortEntry", 1:10, xlab="My x axis", ylab="My y axis")
# add.rule(strategy.st,
# name = "ruleSignal",
# arguments = list(
# sigcol = "dXema_shortEntry",
# sigval = TRUE,
# orderqty = -1000,
# orderside = "short",
# ordertype = "market",
# prefer = "Open",
# pricemethod = "market",
# TxnFees = 0),
# osFUN = osMaxPos),
# type = "enter",
# path.dep = TRUE)
# ------------------------------------------------------------------------------
# add.rule(strategy.st,
# name = "ruleSignal",
# arguments = list(
# sigcol = "dXema_shortExit",
# sigval = TRUE,
# orderqty = "all",
# orderside = "short",
# ordertype = "market",
# prefer = "Open",
# pricemethod = "market",
# TxnFees = 0),
# type = "exit",
# path.dep = TRUE)
################################################################################
# 6.0 Position Limits
################################################################################
positionLimits(maxpos, minpos)
################################################################################
# 7.0 Strategy
################################################################################
Strategy(nXema)
################################################################################
# 8.0 Evaluation - update P&L and generate transactional history
################################################################################
evaluation()
################################################################################
# 9.0 Trend - create dashboard dataset
################################################################################
report(nXema)
# ------------------------------------------------------------------------------
|
06942082b4a4b4f144f6f217d79318e2aec84fe8
|
f78121fe0d58d63c1f537077fc434bb112e8a565
|
/4-Exploratory Data Analysis/Programming Assignment 2/plot5.R
|
4a469d348fcf3c3803141b945ceb2e9a7699909a
|
[] |
no_license
|
ManmohitRekhi/-datasciencecoursera
|
4fb237d5caa63b7006dc806d2073c66255a1a187
|
13ad2c7c12505f3c6db5edd9c02bbbe8307f0f67
|
refs/heads/master
| 2021-01-18T21:32:01.358620
| 2014-06-22T19:09:11
| 2014-06-22T19:09:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 948
|
r
|
plot5.R
|
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
png(filename = "plot5.png", height = 720, width = 720)
par(bg = "transparent")
scccopy <- SCC[SCC$EI.Sector == "Mobile - On-Road Diesel Heavy Duty Vehicles" | SCC$EI.Sector == "Mobile - On-Road Diesel Light Duty Vehicles" | SCC$EI.Sector == "Mobile - On-Road Gasoline Heavy Duty Vehicles" | SCC$EI.Sector == "Mobile - On-Road Gasoline Light Duty Vehicles", 1]
NEI <- NEI[NEI$fips == "24510",]
NEI1 <- NEI[NEI$SCC %in% scccopy,]
t1 <- (sum(NEI1[NEI1$year == 1999,4]))
t2 <- (sum(NEI1[NEI1$year == 2002,4]))
t3 <- (sum(NEI1[NEI1$year == 2005,4]))
t4 <- (sum(NEI1[NEI1$year == 2008,4]))
y <- c(t1,t2,t3,t4)
x <- unique(NEI1$year)
plot(x, y, pch = 19, type = "b", lty = 2, col = "blue", xlab = "Years", ylab = "PM2.5 (Tons / Year)")
title("BALTIMORE CITY PM2.5 EMISSION FROM MOTOR VEHICAL SOURCES ")
text(x,y,round(y,2), cex = 1, adj = c(0.5,-0.7))
dev.off()
|
e4b3de80e71c9784eb037e90b4922e4c83613ab3
|
6713b68c912af377c741b26fe31db0fe6f6194d4
|
/2nd Term/Visualisation/Sample Codes/Lecture_4.R
|
8a663694094ce0c4f316c18d708660bb3d142709
|
[] |
no_license
|
Lanottez/IC_BA_2020
|
820e8d9c1dbb473ed28520450ec702f00c6684ed
|
8abd40c6a5720e75337c20fa6ea89ce4588016af
|
refs/heads/master
| 2023-08-25T05:52:08.259239
| 2021-11-03T07:27:11
| 2021-11-03T07:27:11
| 298,837,917
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 647
|
r
|
Lecture_4.R
|
library(tidyverse)
# Look up documentation on mpg
?mpg
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy, color ="blue"))
# color outside of aes() function
ggplot(data = mpg) +
geom_point(mapping = aes(x = displ, y = hwy), color ="blue")
# Look up the function details
?facet_grid
# midwest is another data frame that comes with ggplot2
# you can use View() to see the table in RStudio
midwest %>% View()
ggplot(data = mpg)+
geom_point(mapping = aes(x = displ, y = hwy))
# facet technique
ggplot(data = mpg)+
geom_point(mapping = aes(x = displ, y = hwy)) +
facet_grid(vars(drv), vars(cyl), labeller = label_both)
|
a3a0b57e9fbd16121f6d6f2bd8dc9d84f9404964
|
8ff090148036ff72039a086f69a142dfa9a6bc99
|
/R/plateAdjust.R
|
4402a22f21d5a3a8955f53a2175c335ffaeed9ad
|
[
"MIT"
] |
permissive
|
schyen/somefxns
|
d01483414bf3c5e913403bb2a296533349dcdb38
|
6ac702bf6fa8b81348710ee3be2deed949c269bb
|
refs/heads/master
| 2021-06-25T03:19:45.079807
| 2019-08-06T13:15:08
| 2019-08-06T13:15:08
| 146,625,419
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,359
|
r
|
plateAdjust.R
|
#' plateAdjust
#'
#' normalized absorbance values by adjusting to blank
#'
#' @param plateDF dataframe. first spreadsheet of victor file
#' @param metadata dataframe. well metadata. must have columns:
#' Well, platerow, platecol, strain, curveID, welltype, media, abx, wellconc
#' @param well_include string or vector of string. Default NULL. Which wells to
#' include in analysis
#' @param blank_by \code{c('row','col', 'T0')}. Default \code{'row'}.
#' Sets orientation of blanks.
#' \code{'row'} means blanks are in the same row as corresponding wells
#' \code{'col'} means blanks are in the same column as corresponding wells
#' \code{'timepoint'} means blank by reading at time 0 (aka repeat 1)
#' @param blank_label string. Default \code{'blank'}.
#' How are blanks identified in metadata
#'
#' @import dplyr
#' @return
#' full dataframe of with metadata, absorbance, adjusted absorbance in one dataframe
#' @export
plateAdjust <- function(plateDF, metadata, well_include=NULL, blank_by = 'row',
blank_label = 'blank') {
if(!blank_by %in% c('row','col','T0')) {
stop("blank_by must be either 'row', 'col' or 'T0'")
}
if (blank_by == 'row') blank_by <- 'platerow'
if (blank_by== 'col') blank_by <- 'platecol'
newcolname = colnames(plateDF)
newcolname[length(newcolname)] <- 'abs'
colnames(plateDF) <- newcolname
# adding metadata
full <- merge(plateDF, metadata, 'Well')
# converting time to minute
full$minute <- (full$Repeat - 1) * 10
full$hour <- full$minute/60
# remove wells
if(!is.null(well_include)) {
full <- full[full$Well %in% well_include,]
}
full <- rename(full, plateno = Plate)
if(blank_by != 'T0') {
absAdjust <- function(d) {
blankval <- d$abs[d$welltype == blank_label]
d$adj <- d$abs - blankval
return(d)
}
# adjust absorbance using blank
full <- full %>%
group_by(!! sym(blank_by), Repeat) %>%
do(absAdjust(.)) %>%
as.data.frame()
}
else {
absAdjust <- function(d) {
blankval <- d$abs[d$Repeat == 1]
d$adj <- d$abs - blankval
return(d)
}
full <- full %>%
group_by(Well) %>%
arrange(Repeat) %>%
do(absAdjust(.)) %>%
as.data.frame()
# remove timepoint 0
full <- filter(full, Repeat != 1)
}
return(full)
}
|
f5ec6fa5259d33d1ad45f59f33965bfa6b43f3a3
|
a51f14302c8e4a2a0a48dc636d035c4e6669f686
|
/R/finalizeSkript.R
|
83527790d0698b35012b6e6dd1e511d00a0e6c7f
|
[] |
no_license
|
holgerman/toolboxH
|
b135c6033c015ac0c4906392f613945f1d2763ad
|
fb8a98ee4629dc5fef14b88f2272d559d5d40f30
|
refs/heads/master
| 2022-07-07T22:26:39.857012
| 2022-06-23T14:56:23
| 2022-06-23T14:56:23
| 100,366,781
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,845
|
r
|
finalizeSkript.R
|
### end up a skript
#' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param myfilename PARAM_DESCRIPTION, Default: filename
#' @param saveTheImage PARAM_DESCRIPTION, Default: F
#' @param dostr PARAM_DESCRIPTION, Default: F
#' @param mypathwd PARAM_DESCRIPTION, Default: pathwd
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if(interactive()){
#' #EXAMPLE1
#' }
#' }
#' @rdname finalizeSkript
#' @export
finalizeSkript <- function(myfilename=filename, saveTheImage=F, dostr=F,mypathwd=pathwd){
# 130418 variante do pdf entfernt, da man compile notebook in RStudio machen kann
if(exists('myfilename') ==F) myfilename = "string_myfilenme_not_defined"
if(exists('mypathwd') ==F) pathwd = getwd()
message("==================================================================================")
message("\n\nWarnings found so far:\n\n")
print(table(names(warnings() )))
message("==================================================================================")
message("\n\nSession Info::\n\n")
print(sessionInfo())
if(dostr==T) {
message("==================================================================================")
message("\n\nInfos about R-object included in R Session:\n\n")
for(i in ls()) {
if(mode(get(i)) != "function"){
print("_________________________________")
print(i)
str(get(i))
}
}
}
if(saveTheImage==T) {
setwd(mypathwd)
save_filename <- paste0("obj/", myfilename, ".RData")
message(paste("image saved under :\n", save_filename))
save.image(file=save_filename)
## ZU TESTZWECKEN load(
}
message("==================================================================================")
message("\n\nTotal Time:\n\n")
if(exists("time0")) print(Sys.time() -time0)
}
|
ffdd92d0b756ea2f9c90021f81674e1e43fa5655
|
b15d2a39498cad4bdbfcc55a55f4f3d34f4221e4
|
/R/exaggerate_img_control.R
|
20493821cea5aa92ebc92dd5a685ab176f334be4
|
[
"MIT"
] |
permissive
|
srvanderplas/ShoeScrubR
|
a9465d28ecea4286b27a617bfa5a6b2b2fe8c5b1
|
972bb2a0092c5bf62bbe5521b54f689f2ad8465e
|
refs/heads/master
| 2020-07-23T02:20:37.169159
| 2019-11-25T22:33:23
| 2019-11-25T22:33:23
| 207,416,080
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,832
|
r
|
exaggerate_img_control.R
|
#' Exaggerate an image to a mask-like appearance with control of parameters
#'
#' @param img Image
#' @param gaussian_d diameter of brush to use for gaussian blur
#' @param threshold_val threshold value to use on normalized, inverted, blurred
#' image
#' @param opening_d diameter to use for image opening (despeckling)
#' @param closing_d diameter to use for image closing (exaggeration)
#' @export
exaggerate_img_control <- function(img, gaussian_d = 125, threshold_val = .125,
opening_d = 7, closing_d = 301) {
tmp <- clean_initial_img(img, gaussian_d = gaussian_d,
threshold_val = threshold_val)
tmp <- img_open_close(tmp, opening_d = opening_d, closing_d = closing_d)
tmp
}
img_open_close <- function(img, opening_d, closing_d, opening_shape = "disc", closing_shape = "disc") {
if (is.list(img)) {
return(lapply(img, img_open_close, opening_d = opening_d, closing_d = closing_d,
opening_shape = opening_shape, closing_shape = closing_shape))
}
tmp <- img %>%
EBImage::opening(EBImage::makeBrush(opening_d, shape = opening_shape)) %>%
EBImage::closing(EBImage::makeBrush(closing_d, shape = closing_shape))
attr(tmp, "operation") <- append(attr(img, "operation"),
list(list(type = "exaggerate",
opening_d = opening_d,
closing_d = closing_d)))
tmp
}
#' Invert, smooth, and threshold an image
#'
#' @param img Image
#' @param gaussian_d diameter of brush to use for gaussian blur
#' @param threshold_val threshold value to use on normalized, inverted, blurred
#' image. If threshold is 0, it will be automatically determined using
#' a heuristic approach.
#' @export
clean_initial_img <- function(img, gaussian_d = 25, threshold_val = .15) {
. <- NULL
if (is.list(img)) {
return(lapply(img, clean_initial_img, gaussian_d = gaussian_d, threshold_val = threshold_val))
}
tmp <- img %>%
EBImage::filter2(EBImage::makeBrush(gaussian_d, "gaussian")) %>%
EBImage::normalize() %>%
magrittr::subtract(1, .)
if (threshold_val == 0) {
thresholds <- seq(.05, .5, .01)
t_mean <- sapply(thresholds, function(x) mean(tmp > x))
threshold_val <- thresholds[which.min(abs(t_mean - 0.055))]
message("Image cleaned using a threshold of ", threshold_val)
}
tmp <- tmp %>%
(function(.) . > threshold_val)
attr(tmp, "operation") <- append(attr(img, "operation"),
list(list(type = "clean",
gaussian_d = gaussian_d,
threshold_val = threshold_val)))
tmp
}
#' Get the center of a binary image
#'
#' @description Calculate the center of mass of a binary image, with or without trimming
#' @param img image/matrix
#' @param trim Trim 5\% from each side of the image? (Useful for removing page boundary issues)
#'
#' @export
binary_center <- function(img, trim = T) {
if (is.list(img)) {
return(lapply(img, binary_center, trim = trim))
}
# stopifnot(all(unique(img) %in% c(0, 1)))
d1sum <- apply(img != 0, 1, sum)
if (trim) {
d1sum_trim <- rep(0, length(d1sum))
d1sum_trim[ceiling(.05*length(d1sum)):floor(.95*length(d1sum))] <- 1
} else {
d1sum_trim <- 1
}
d1sum <- d1sum*d1sum_trim
d1sum <- d1sum/sum(d1sum)
d1sum <- sum((1:(dim(img)[1])) * d1sum)
d2sum <- apply(img != 0, 2, sum)
if (trim) {
d2sum_trim <- rep(0, length(d2sum))
d2sum_trim[ceiling(.05*length(d2sum)):floor(.95*length(d2sum))] <- 1
} else {
d2sum_trim <- 1
}
d2sum <- d2sum*d2sum_trim
d2sum <- d2sum/sum(d2sum)
d2sum <- sum((1:(dim(img)[2])) * d2sum)
round(c(d1sum, d2sum))
}
|
f1b8d7302abad4a6e52c8de9d8c8d25ca86de525
|
0bc0bab452751f75ce118b9970a7e61efeee0e7d
|
/Simulation_testing.R
|
3eebc7ae22c311c2b12517a4065331758481f546
|
[] |
no_license
|
JuanMatiasBraccini/Git_move.rate_joint.estimation
|
fb2f6f2918df476652e588bf07ab2a9c66f3e719
|
65bc924fadc4bb5d4e2300e1cac505e9553a887d
|
refs/heads/master
| 2021-06-12T22:08:57.085697
| 2021-04-28T05:21:07
| 2021-04-28T05:21:07
| 191,689,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,181
|
r
|
Simulation_testing.R
|
#Movement model conditioned on recaptures only (McGarvey line of thought)
#note: individual-based model that compares the probability of occurrying in a particular zone
# after an given time at liberty with the observed recapture zone.
rm(list=ls(all=TRUE))
library(expm)
library(ggplot2)
library(reshape)
if(!exists('handl_OneDrive')) source('C:/Users/myb/OneDrive - Department of Primary Industries and Regional Development/Matias/Analyses/SOURCE_SCRIPTS/Git_other/handl_OneDrive.R')
source(handl_OneDrive("Analyses/SOURCE_SCRIPTS/Population dynamics/fn.fig.R"))
#----DATA SECTION ----
#number of sharks released per zone in year 1 (for conventional and acoustic tags)
N.sharks_conv=1000
N.sharks_acous=100
#note: all sharks released in year 1
#years with recaptures/detections
Yrs=1:3
Yr.rec_daily=365*Yrs # in days
#number of samples taken (observed recaptures or detections)
n.obs.conv=200
n.obs.acous=250
#----PARAMETERS SECTION ----
#movement probabilities (for time step of 1 day)
From_1_to_1=0.9995
From_1_to_2=1-From_1_to_1
move.from.1=c(From_1_to_1,From_1_to_2,0) #assumption of no movement to non-adjacent zone in 1 day
From_2_to_2=0.9995
From_2_to_1=0.00025
move.from.2=c(From_2_to_1,From_2_to_2,1-(From_2_to_1+From_2_to_2))
From_3_to_3=0.9995
From_3_to_2=1-From_3_to_3
move.from.3=c(0,From_3_to_2,From_3_to_3) #assumption of no movement to non-adjacent zone in 1 day
Nzone=3
Pin.pars=c(From_1_to_1,From_2_to_1,From_2_to_2,From_3_to_3)
names(Pin.pars)=c("P11","P21","P22","P33")
#recapture/detection rate
#note: this captures mortality and recapture/detection probabilities
see.rate_conv=.7 # rate of recapturing shark with conventional tags
see.rate_acous=.9 #lower rate for acoustic due to just mortality
#number of simulations
n.sims=1000
#----PROCEDURE SECTION ----
#population movement transition matrix, daily time step, i.e. probability of movement in 1 day (rows sum to 1)
Mov.Mat=matrix(c(move.from.1,move.from.2,move.from.3),ncol=Nzone,nrow=Nzone,byrow=T)
#Functions
fn.logit=function(x) log(x/(1-x))
fn.inv.logit=function(x) exp(x)/(1+exp(x))
posfun=function(x,eps)
{
if(x>=eps) return (x)
if(x<eps)
{
fpen<<-fpen+0.01*(x-eps)^2
return(eps/(2-(x/eps)))
}
}
#simulate sampling (recaptures / detections)
fn.sample=function(D,nObs)
{
From=1:nrow(D[[1]])
To=1:ncol(D[[1]])
From_to=expand.grid(From,To)
From_to=From_to[order(From_to$Var1),]
From_to=paste(From_to$Var1,From_to$Var2,sep="_")
Dat=D
for(i in 1:length(D))
{
x=D[[i]]
Obs=c(t(x))
names(Obs)=From_to
Obs=rep(names(Obs),Obs)
Dat[[i]]=data.frame(Days.liberty=as.numeric(names(D)[i]),From=substr(Obs,1,1),To=substr(Obs,3,3))
}
Dat=do.call(rbind,Dat)
index=sample(1:nrow(Dat),nObs) #simulate sampling of tagged population
return(Dat[index,])
}
#likelihood
fn.like=function(MAT,DATA)
{
NLL=0
for(i in 1:nrow(DATA))
{
d=DATA[i,]
Move=MAT %^% d$Days.liberty
id.rel=which(rownames(Move)==d$From)
id.rec=which(colnames(Move)==d$To)
Pred.Prob=Move[id.rel,id.rec]
NLL=NLL-log(Pred.Prob)
}
return(NLL)
}
#individual-based model
fn.move=function(pars)
{
#Put movement pars in matrix
P.1=fn.inv.logit(pars[1]) #inverse logit
P.2=fn.inv.logit(pars[2:3])
P.3=fn.inv.logit(pars[4])
#get missing parameter
P.1=c(P.1,1-P.1,0)
P.2=c(P.2,1-sum(P.2))
P.3=c(0,1-P.3,P.3)
#put pars in matrix
Mov.mat=as.matrix(rbind(P.1,P.2,P.3))
colnames(Mov.mat)=rownames(Mov.mat)=1:nrow(Mov.mat)
#Calculate likelihood of convetional tagging observations
neg.LL.conv=0
neg.LL.conv=fn.like(MAT=Mov.mat,DATA=Data_conv)
#Calculate likelihood of acoustic tagging observations
neg.LL.acous=0
neg.LL.acous=fn.like(MAT=Mov.mat,DATA=Data_acous)
#total neg log like
neg.LL=neg.LL.conv+neg.LL.acous
return(neg.LL)
}
#Run simulation testing
Store.sims=vector('list',n.sims)
system.time(for(n in 1:n.sims) #takes 37 seconds per iteration
{
#1. Generate recaptures/detections based on movement matrix and number of sharks released
#conventional tagging
Rec.list=vector('list',length(Yr.rec_daily))
names(Rec.list)=Yr.rec_daily
Rec.list[[1]]=floor(N.sharks_conv*(Mov.Mat%^% Yr.rec_daily[1])*see.rate_conv) #release (in day 1) and move sharks in year 1
for(i in 2:length(Yr.rec_daily)) #subsequent years
{
Move=Mov.Mat %^% 365 #move shark one more year
Rec.list[[i]]=floor((Rec.list[[i-1]]%*%Move)*see.rate_conv)
}
#acoustic tagging
Detec.list=vector('list',length(Yr.rec_daily))
names(Detec.list)=Yr.rec_daily
Detec.list[[1]]=floor(N.sharks_acous*(Mov.Mat%^% Yr.rec_daily[1])*see.rate_acous)
for(i in 2:length(Yr.rec_daily))
{
Move=Mov.Mat %^% 365 #move sharks one more year
Detec.list[[i]]=floor((Detec.list[[i-1]]%*%Move)*see.rate_acous)
}
#2. Create observations
#conventional tagging
Data_conv=fn.sample(D=Rec.list,nObs=n.obs.conv)
#acoustic tagging
Data_acous=fn.sample(D=Detec.list,nObs=n.obs.acous)
#Data_acous$From_to=with(Data_acous,paste(From,To,sep="_")); with(Data_acous,table(Days.liberty,From_to))
#3. Estimate parameters
Pars= c(0.99, 0.005, 0.99, 0.99)
Pars=fn.logit(Pars)
fit=optim(Pars,fn.move,method="Nelder-Mead", control = list(trace=T))
#4. Store pars
Store.sims[[n]]=fn.inv.logit(fit$par)
})
Store.sims=do.call(rbind,Store.sims)
#Plot simulations and original par values
Store.pin.pars=matrix(rep(Pin.pars,n.sims),nrow=n.sims,byrow=T)
ERROR=Store.sims
#ERROR[,match(c("P11","P22","P33"),names(Pin.pars))]=round(ERROR[,match(c("P11","P22","P33"),names(Pin.pars))],6)
#ERROR[,match(c("P21"),names(Pin.pars))]=round(ERROR[,match(c("P21"),names(Pin.pars))],6)
ERROR=Store.pin.pars-ERROR
#ERROR=ERROR/Store.pin.pars
colnames(ERROR)=names(Pin.pars)
setwd(handl_OneDrive("Analyses/Movement rate estimation/Joint.estim_ind.base.mod/Simulation_testing"))
Do.jpeg="YES"
Do.tiff="NO"
fn.fig("boxplot",2400, 1600)
par(las=1)
p=ggplot(data=melt(ERROR), aes(as.factor(X2), value)) + ylim(-4e-4,4e-4)
p=p+ geom_violin(fill="grey80",adjust=2) + geom_jitter(height = 0,width = 0.15,color="grey20")
p=p+labs(y="Error",x="Estimated parameter")+ geom_hline(aes(yintercept=0),lty=2,colour="grey50")
p+theme(axis.text=element_text(size=12),
axis.title=element_text(size=14,face="bold"),
panel.background = element_rect(fill = "white", colour = "grey50"))
dev.off()
XLI=list(c(.99875,1.000075),c(-3e-4,9e-4),c(.99875,1.000075),c(.99875,1.000075))
fn.fig("density",2400, 2400)
par(mfcol=c(2,2),mai=c(.65,.85,.1,.1),oma=c(.1,.1,.1,.1),las=1,mgp=c(2,.6,0))
for(i in 1:ncol(Store.sims))
{
plot(density(Store.sims[,i],adjust=10),type='l',lwd=2,ylab="",xlab=names(Pin.pars)[i],main="",
cex.lab=1.5,cex.axis=1.25,xlim=XLI[[i]])
abline(v=Pin.pars[i],lwd=2,lty=2,col="grey60")
}
mtext("Density",2,outer=T,las=3,line=-2,cex=2)
dev.off()
|
06ea33f8e159d9cc7319a99714eafbf7b41b9b1e
|
be94025d16afa0b7d02ce9959479bec180a59e06
|
/R/data.R
|
87dfc00e9dd1fe3a61ea572c5eb18155edc3c24b
|
[] |
no_license
|
ouidata/creationPackageLive
|
fb11024fa8a1d064fa80ecc3a7753b753872f867
|
ba8acce21a217ce5d59ed1aa679387d84cf777ac
|
refs/heads/master
| 2020-06-14T17:09:59.098257
| 2019-07-03T14:14:46
| 2019-07-03T14:14:46
| 195,062,531
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 72
|
r
|
data.R
|
#
# smc_with_js contient les informations suivantes ...
#
"smc_with_js"
|
8a51f63689e76548bca4aeba585ea8284f822b5a
|
e1cbbf8791b0ac6d40f6d5b397785560105441d9
|
/R/lmomsRCmark.R
|
24bf2cc37af7b0e176cd227813092dd2c18bc51a
|
[] |
no_license
|
wasquith/lmomco
|
96a783dc88b67017a315e51da3326dfc8af0c831
|
8d7cc8497702536f162d7114a4b0a4ad88f72048
|
refs/heads/master
| 2023-09-02T07:48:53.169644
| 2023-08-30T02:40:09
| 2023-08-30T02:40:09
| 108,880,810
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,381
|
r
|
lmomsRCmark.R
|
"lmomsRCmark" <-
function(x, rcmark=NULL, nmom=5, flip=NA, flipfactor=1.1) {
n <- length(x);
if(nmom > n)
stop("More L-moments requested by parameter 'nmom' than data points available in 'x'");
if(length(unique(x)) == 1)
stop("all values are equal--Lmoments can not be computed");
if(is.null(rcmark)) rcmark <- rep(0,n);
if(n != length(rcmark))
stop("sample size != right-censoring marker (rcmark)");
rcmark <- as.numeric(rcmark);
if(! is.na(flip)) {
if(is.logical(flip)) {
if(flip) {
if(flipfactor < 1) {
warning("flipfactor < 1, setting to unity");
flipfactor <- 1;
}
flip <- flipfactor*max(x);
x <- flip - x;
}
} else {
x <- as.numeric(flip) - x;
}
}
ix <- sort(x, index.return=TRUE)$ix;
x <- x[ix]; rcmark <- rcmark[ix];
L <- R <- rep(NA, nmom);
for(i in 1:nmom) {
L[i] <- lmomRCmark(x, rcmark=rcmark, r=i, sort=FALSE);
if(i == 2) R[i] <- L[2]/L[1];
if(i >= 3) R[i] <- L[i]/L[2];
}
z <- list(lambdas=L,
ratios=R,
trim=0,
lefttrim=NULL,
rightrim=NULL,
n=n,
n.cen=sum(rcmark),
flip=flip,
source="lmomsRCmark");
return(z);
}
|
373524e8389ceb68c526bd23962981cd4bd14e8b
|
8084df2f83350ee1886a7d71cee8c89078e0419c
|
/D3.R
|
94919bdfce4bcb4f86fb63c882b0eedf9fa76e1e
|
[] |
no_license
|
bw4sz/NetworkTime
|
30d2e71801914c50dbdf0dc0392130a00bfb6fea
|
27461b3c97402da1683ca89d7177338c7086999d
|
refs/heads/master
| 2021-01-17T09:41:54.782230
| 2017-03-19T18:08:17
| 2017-03-19T18:08:17
| 27,722,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,429
|
r
|
D3.R
|
library(d3Network)
require(chron)
require(bipartite)
require(ggplot2)
require(ape)
require(reshape2)
require(sna)
require(stringr)
require(maptools)
require(taxize)
require(picante)
require(dplyr)
library(scales)
###################
#Source Functions
###################
gitpath<-"C:/Users/Ben/Documents/NetworkTime/"
source(paste(gitpath,"NetworkSource.R",sep=""))
############################################
##############Read In Data##################
############################################
#moprh data
#read in flower morphology data, comes from Nectar.R
fl.morph<-read.csv(paste(droppath,"Thesis/Maquipucuna_SantaLucia/Results/FlowerMorphology.csv",sep=""))
#First row is empty
fl.morph<-fl.morph[-1,]
#Bring in Hummingbird Morphology Dataset, comes from
hum.morph<-read.csv(paste(droppath,"Thesis/Maquipucuna_SantaLucia/Results/HummingbirdMorphology.csv",sep=""))
#bring in clade data
clades<-read.csv(paste(gitpath,"InputData//CladeList.txt",sep=""),header=FALSE)[,-1]
colnames(clades)<-c("Clade","Genus","Species","double","English")
clades<-clades[,1:5]
#Bring in Phylogenetic Data
trx<-read.tree(paste(gitpath,"InputData\\hum294.tre",sep=""))
#format tips
new<-str_extract(trx$tip.label,"(\\w+).(\\w+)")
#get duplicates
trx<-drop.tip(trx,trx$tip.label[duplicated(new)])
#name tips.
trx$tip.label<-str_extract(trx$tip.label,"(\\w+).(\\w+)")
#Read in trait distance between species, run from Morphology.R
sp.dist<-read.csv("C:/Users/Ben/Dropbox/Thesis/Maquipucuna_SantaLucia/Results/HummingbirdDist.csv",row.names=1)
#Read in plant phylogeny
pco<-read.csv(paste(gitpath,"InputData/PlantRelatedness.csv",sep=""))
#read in interaction data
datf<-read.csv("C:/Users/Ben/Dropbox/Thesis/Maquipucuna_SantaLucia/Results/Network/HummingbirdInteractions.csv",row.names=1)
#remove flower piercer
datf<-droplevels(datf[!datf$Hummingbird %in% c("White-lined Flowerpiercer","White-sided Flowerpiercer"),])
#reformatted names
missing<-levels(datf$Hummingbird)[!levels(datf$Hummingbird) %in% clades$English]
#correct spelling mistakes
levels(datf$Hummingbird)[levels(datf$Hummingbird) %in% "Booted Racketail"]<-"Booted Racket-tail"
levels(datf$Hummingbird)[levels(datf$Hummingbird) %in% "Crowned Woodnymph"]<-"Green-crowned Woodnymph"
levels(datf$Hummingbird)[levels(datf$Hummingbird) %in% "Violet-tailed Slyph"]<-"Violet-tailed Sylph"
datf<-droplevels(datf[!datf$Iplant_Double %in% c("",NA),])
#Uncorrected for sampling:
rawdat<-as.data.frame.array(table(datf$Iplant_Double,datf$Hummingbird))
#dissim
a<-melt(
as.matrix(
1-vegdist(
t(rawdat),method="horn"
)
)
)
colnames(a)<-c("To","From","value")
a<-a[a$value>.1,]
d3Network::d3SimpleNetwork(a[a$value<.4,],file="humbipartite.html",charge = -00)
#append id to data
#append to data
a$Source<-Nodes[match(a$Flowers,Nodes$Name),"Node"]
a$Target<-Nodes[match(a$Birds,Nodes$Name),"Node"]
#Old script
d3Network::d3ForceNetwork(Links=a,NodeID="Name",Nodes=Nodes,Target="Target",Value="value",Source="Source",file="humbipartiteForce.html",Group="group",charge = -400,d3Script = "http://d3js.org/d3.v3.min.js",opacity=.6,linkDistance = "function(d){return d.value * 5}",zoom=T)
#new script
networkD3::forceNetwork(Links=a,NodeID="Name",Nodes=Nodes,Target="Target",Value="value",Source="Source",Group="group",charge = -500,colourScale = "d3.scale.category10()") %>% networkD3::saveNetwork("humbipartiteForceNew.html",selfcontained=T)
|
bb740490a8255672c3f1c74c6cf6260b03c0918f
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/man/codecommit_create_pull_request_approval_rule.Rd
|
48ccc6db471d21223d90679d8bf25310ddc25dcd
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 2,439
|
rd
|
codecommit_create_pull_request_approval_rule.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codecommit_operations.R
\name{codecommit_create_pull_request_approval_rule}
\alias{codecommit_create_pull_request_approval_rule}
\title{Creates an approval rule for a pull request}
\usage{
codecommit_create_pull_request_approval_rule(
pullRequestId,
approvalRuleName,
approvalRuleContent
)
}
\arguments{
\item{pullRequestId}{[required] The system-generated ID of the pull request for which you want to create
the approval rule.}
\item{approvalRuleName}{[required] The name for the approval rule.}
\item{approvalRuleContent}{[required] The content of the approval rule, including the number of approvals
needed and the structure of an approval pool defined for approvals, if
any. For more information about approval pools, see the AWS CodeCommit
User Guide.
When you create the content of the approval rule, you can specify
approvers in an approval pool in one of two ways:
\itemize{
\item \strong{CodeCommitApprovers}: This option only requires an AWS account
and a resource. It can be used for both IAM users and federated
access users whose name matches the provided resource name. This is
a very powerful option that offers a great deal of flexibility. For
example, if you specify the AWS account \emph{123456789012} and
\emph{Mary_Major}, all of the following would be counted as approvals
coming from that user:
\itemize{
\item An IAM user in the account
(arn:aws:iam::\emph{123456789012}:user/\emph{Mary_Major})
\item A federated user identified in IAM as Mary_Major
(arn:aws:sts::\emph{123456789012}:federated-user/\emph{Mary_Major})
}
This option does not recognize an active session of someone assuming
the role of CodeCommitReview with a role session name of
\emph{Mary_Major}
(arn:aws:sts::\emph{123456789012}:assumed-role/CodeCommitReview/\emph{Mary_Major})
unless you include a wildcard (*Mary_Major).
\item \strong{Fully qualified ARN}: This option allows you to specify the fully
qualified Amazon Resource Name (ARN) of the IAM user or role.
}
For more information about IAM ARNs, wildcards, and formats, see \href{https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html}{IAM Identifiers}
in the \emph{IAM User Guide}.}
}
\description{
Creates an approval rule for a pull request.
See \url{https://www.paws-r-sdk.com/docs/codecommit_create_pull_request_approval_rule/} for full documentation.
}
\keyword{internal}
|
28fb698bd7401d068cdd929d6a2ef120635bc867
|
1bdfacbfb304b3056afe40a259374c19f7b80f50
|
/Result_Estimate.R
|
f585a560e20ee74c4c2223e507c88a914ab7b0ba
|
[] |
no_license
|
M-Atsuhiko/Gausian
|
ed71ee76ae183fa0283ec52b4b0bf62c1aaa2421
|
e84e9895eb8c93b39d5837a8aa20a2caab91a1e4
|
refs/heads/master
| 2020-05-18T13:50:49.084679
| 2015-02-10T09:53:40
| 2015-02-10T09:53:40
| 28,582,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,981
|
r
|
Result_Estimate.R
|
Result_Estimate <- function(MULTI_GENERATION){
Good_i <- c() #Good_Resultになった個体の添字
Max_F <- -1
Max_Mor <- -1
Max_Ca <- -1
Max_K <- -1
for(i in 1:N_INDIVIDUAL){
if(MULTI_GENERATION[[i]][["Result"]] == Mor_E)
MULTI_GENERATION[[i]][["Estimate"]] <- Morpho_penalty(MULTI_GENERATION[[i]][["TREE"]])
else if(MULTI_GENERATION[[i]][["Result"]] == EPSP_E)
MULTI_GENERATION[[i]][["Estimate"]] <- penalty(1,EPSP_PENALTY_MIEW,EPSP_PENALTY_SIGMA)
# else if(MULTI_GENERATION[[i]][["Result"]] == Bad_Result)
# MULTI_GENERATION[[i]][["Estimate"]] <- (MULTI_GENERATION[[i]][["Ratio"]]^(-1))*-1
else if(MULTI_GENERATION[[i]][["Result"]] == Good_Result ||
MULTI_GENERATION[[i]][["Result"]] == Bad_Result){
Good_i <- c(Good_i,i)
Max_F <- max(Max_F,MULTI_GENERATION[[i]][["Ratio"]])
Max_Mor <- max(Max_Mor,MULTI_GENERATION[[i]][["TREE_Volume"]])
Max_Ca <- max(Max_Ca,MULTI_GENERATION[[i]][["Ca_Amount"]])
Max_K <- max(Max_K,MULTI_GENERATION[[i]][["K_Amount"]])
}
}
for(i in Good_i){
#全ての項目に対して世代中での相対的な評価をする
Func_minus <- Function_ratio*(1 - MULTI_GENERATION[[i]][["Ratio"]]/Max_F)
Morpho_minus <- Morphology_ratio*(MULTI_GENERATION[[i]][["TREE_Volume"]]/Max_Mor)
K_Ratio <- 0
Ca_Ratio <- 0
if(WITH_K || WITH_Ca){
#コンダクタンス量はあまりにも小さいとアンダーフローするか、0除算になる危険性がある
if(WITH_K) K_Ratio <- MULTI_GENERATION[[i]][["K_Amount"]]/Max_K
if(WITH_Ca) Ca_Ratio <- MULTI_GENERATION[[i]][["Ca_Amount"]]/Max_Ca
Conductance_minus <- Conductance_ratio*(K_Ratio + Ca_Ratio)/(WITH_K + WITH_Ca)
}else{
Conductance_minus <- 0
}
Estimate_Value <- 100 - Func_minus - Morpho_minus - Conductance_minus
MULTI_GENERATION[[i]][["Estimate"]] <- Estimate_Value
}
return(MULTI_GENERATION)
}
|
112c3ad842fe0a027f6d7b86a7ecc18ad8a69f56
|
e407e8e724356282f85582eb8f9857c9d3d6ee8a
|
/tests/testthat/test-cumulative-coefficients.R
|
13c1f811ebe98aa822052efb5c236f330c0f2aeb
|
[
"MIT"
] |
permissive
|
adibender/pammtools
|
c2022dd4784280881f931e13f172b0057825c5e4
|
ab4caeae41748c395772615c70a0cd5e206ebfe6
|
refs/heads/master
| 2023-08-29T17:30:30.650073
| 2023-07-19T10:30:06
| 2023-07-19T10:30:06
| 106,259,608
| 43
| 14
|
NOASSERTION
| 2023-07-19T10:30:08
| 2017-10-09T08:55:47
|
R
|
UTF-8
|
R
| false
| false
| 741
|
r
|
test-cumulative-coefficients.R
|
context("Test cumulative coefficients functionality")
test_that("Cumulative coefficients work", {
df <- tumor[1:30, c("days", "status", "age")]
df$x1 <- as.factor(rep(letters[1:3], each = nrow(df) / 3L))
## pam
ped <- as_ped(df, formula = Surv(days, status)~ x1 + age)
pam <- mgcv::gam(ped_status ~ s(tend) + x1 + age, data = ped,
family = poisson(), offset = offset)
cumu_coef_pam <- get_cumu_coef(pam, ped, terms = c("age", "x1"), nsim = 20L)
expect_data_frame(cumu_coef_pam, nrows = 36L, ncols = 6L)
expect_equal(unique(cumu_coef_pam$variable), c("age", "x1 (b)", "x1 (c)"))
cumu_coef_pam <- get_cumu_coef(pam, ped, terms = c("(Intercept)", "age"))
expect_data_frame(cumu_coef_pam, nrows = 24L, ncols = 6L)
})
|
47e3f6e2a07efc2356f5b2cefefb13c1e381d953
|
99c84a4732231b83515cb3bf4fba2942dc0b0815
|
/code/01-manage-data.R
|
9c41a7ef2389c3cb3d59d129a4843ee3d7170a48
|
[] |
no_license
|
jfarland/prob-comp-2015
|
761a3382d882c88af47e36e110cf5ea7cff30591
|
9ac11c17a0899c20ccb4258ee8eafb2e51550874
|
refs/heads/master
| 2021-01-10T09:06:09.912251
| 2017-10-04T22:39:28
| 2017-10-04T22:39:28
| 44,548,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,796
|
r
|
01-manage-data.R
|
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
# Probabilistic Forecasting Competiion - Tao Hong's Energy Analytics Course
#
# Prepare and manage data sets used in forecasting
#
# Author: Jon T Farland <jonfarland@gmail.com>
#
# Copywright September 2015
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
#plotting and visual libraries
library("ggplot2")
library("lattice")
library("rworldmap")
#data management libraries
library("dplyr")
library("tidyr")
library("gdata")
library("reshape2")
library("lubridate")
library("timeDate")
#modeling and forecast libraries
library("forecast")
library("quantreg")
library("splines")
library("quantregForest")
library("mgcv")
#weather daya forecasts
library("weatherData")
#-----------------------------------------------------------------------------#
#
# Setup / Options
#
#-----------------------------------------------------------------------------#
# Current Directory
getwd()
#set the raw data as the current directory
setwd("/home/rstudio/projects/prob-comp-2015/data/rawdat")
#-----------------------------------------------------------------------------#
#
# Load Inputs
#
#-----------------------------------------------------------------------------#
#uncomment the next command to run a Python script to download PJM load data for the last 5 years
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-historical-load-data.py')
#download just the 2015 data as the competition ensues
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-2015-load-data.py')
#system('python /home/rstudio/projects/comp-2015/data/rawdat/00-pull-2015-load-data.py')
load1 <- read.csv("Release_1.csv")
load2 <- read.csv("Release_2.csv")
load3 <- read.csv("Release_3.csv")
load4 <- read.csv("Release_4.csv")
load5 <- read.csv("Release_5.csv")
names(load1)
sapply(load1,class)
plot(load1$T,load0$load)
plot(load1$Hour,load0$load)
#-----------------------------------------------------------------------------#
#
# Processing
#
#-----------------------------------------------------------------------------#
#load.data=rbind(load11, load12, load13, load14, load15)
#go from wide to long
load.long <- rbind(load1, load2, load3, load4, load5) %>%
mutate(tindx = mdy_h(paste(Date, Hour))-duration(1,"hours"),
mindx = month(tindx),
dow = weekdays(tindx),
year = year(tindx)) %>%
rename(dindx = Date, hindx = Hour, temp=T) %>%
select(tindx, hindx, dindx, mindx, year, load, dow, temp) %>%
arrange(dindx, hindx)
#shifted to hour beginning rather than hour ending
#quick checks
summary(load.long)
#-----------------------------------------------------------------------------#
#
# Graphics
#
#-----------------------------------------------------------------------------#
# load over time
plot1 <- plot(load.long$load ~ load.long$tindx)
plot2 <- plot(load.long$load ~ load.long$hindx)
plot3 <- plot(load.long$load ~ load.long$dindx)
#histograms and conditional histograms
histogram(~load | mindx, data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
histogram(~load | hindx, data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
histogram(~load , data = load.long, xlab="Load (MW)", ylab ="Density", col=c("red"))
#-----------------------------------------------------------------------------#
#
# Outputs
#
#-----------------------------------------------------------------------------#
#save out the data
setwd("/home/rstudio/projects/prob-comp-2015/data")
save(load.long,file="load-long.Rda")
#save(temp_act,file="temp-act.Rda")
#save(temp_fcst,file="temp-fcst.Rda")
write.csv()
writeRDS()
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-#
|
ac0cb673bd8d4a1b790868a4d7f13a13e5a3bda0
|
163ceeb94d49b70d43cd707cbc5de03164a1ce50
|
/R/RcppExports.R
|
175cd04c7e70b11bce78a3c20f16e33c8689748b
|
[] |
no_license
|
privefl/bigutilsr
|
e8cce921638d1327a1038f6ac9b237eae9ca87de
|
bb760d109193d2163e869d9d231a8fdcba2ac96e
|
refs/heads/master
| 2022-12-27T01:39:56.076386
| 2022-12-20T14:36:53
| 2022-12-20T14:36:53
| 199,856,656
| 11
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
glasso <- function(mat, lambda, maxiter_outer, maxiter_lasso, tol, verbose) {
.Call(`_bigutilsr_glasso`, mat, lambda, maxiter_outer, maxiter_lasso, tol, verbose)
}
sum_in_temp <- function(x, y, tmp_vec) {
.Call(`_bigutilsr_sum_in_temp`, x, y, tmp_vec)
}
sub_in_temp <- function(x, y, tmp_vec) {
.Call(`_bigutilsr_sub_in_temp`, x, y, tmp_vec)
}
scaleTau2_vector_rcpp <- function(x, tmp_dev, tmp_med) {
.Call(`_bigutilsr_scaleTau2_vector_rcpp`, x, tmp_dev, tmp_med)
}
dist_scaleTau2_matrix_rcpp <- function(Z) {
.Call(`_bigutilsr_dist_scaleTau2_matrix_rcpp`, Z)
}
roll_mean <- function(x, w) {
.Call(`_bigutilsr_roll_mean`, x, w)
}
rowSumsSq <- function(source) {
.Call(`_bigutilsr_rowSumsSq`, source)
}
|
c4afc0a1281e21ce2e86fd78a14b474a69913fee
|
93c0622c71200b59205fede951767246d4add492
|
/Monika Renska.R
|
fdf77a8135be5e12331682a78d4ef0473b991841
|
[] |
no_license
|
AdamJedz/Machine-Learning
|
06c86582c0bf7bb875442dda2aef55b01a538fec
|
d76dd4dbbdd662bafb65e358e0074c97c2032724
|
refs/heads/master
| 2020-04-12T19:24:54.929626
| 2019-09-04T11:41:30
| 2019-09-04T11:41:30
| 162,708,386
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,238
|
r
|
Monika Renska.R
|
library(shiny)
library(ISLR)
library(tidyverse)
#Bosotn to jeden z wbudowanych zbiorów danych do biblioteki MASS
college <- College
ui <- fluidPage(
radioButtons("private",
"Is private?",
c("Yes" = "Yes",
"No" = "No")),
plotOutput("plot1"),
selectInput("var", "Variable",
choices = college %>% select(-Private) %>% colnames()),
plotOutput("plot2")
)
server <- function(input, output) {
output$plot1 <- renderPlot({
college %>%
filter(Private == input$private) %>%
ggplot(aes(x = Apps, y = PhD)) +
geom_point(color = "brown4") +
labs(title = "Number of applications received vs percentage of faculties with PhD's") +
theme(plot.title = element_text(hjust = .5),
plot.subtitle = element_text(hjust = .5))
})
output$plot2 <- renderPlot({
college %>%
ggplot(aes_string(x = "Private", y = input$var)) +
geom_boxplot(aes(fill = Private)) +
labs(title = paste0("Comparison of ", input$var, " for private and public colleges.")) +
theme(plot.title = element_text(hjust = .5),
plot.subtitle = element_text(hjust = .5))
})
}
shinyApp(ui = ui, server = server)
|
9c73c83fc3985d534382a3aceede70f88bebf2af
|
e308da2d4deeab2298ec3356d35b0308b43ac02e
|
/R/helpers.R
|
fd69f85ab662997aaaa37f1570efe7d106c3ae23
|
[] |
no_license
|
hejtmy/eyelinkr
|
3d2c93352b6f0044c31581bbd6795dffcd54accf
|
022e8578bed3d2be53e73a7db99cb032cf10ddcf
|
refs/heads/master
| 2020-07-07T20:44:47.208886
| 2019-09-13T17:54:39
| 2019-09-13T17:54:39
| 171,920,561
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 479
|
r
|
helpers.R
|
contains_word <-function(ls, words){
#basically iterates through list and sees if at least one of the columns returns true
for(word in words){
if (sum(grepl(word, ls)) > 0) return(TRUE)
}
return(FALSE)
}
rename_column <- function(df, old_column, new_column){
colnames(df)[old_column == colnames(df)] <- new_column
return(df)
}
remove_columns <- function(df, column_names){
i_cols <- which(colnames(df) %in% column_names)
df[, i_cols] <- NULL
return(df)
}
|
97a30a71476a8af21a58791535bd3f42420daf5a
|
973434feaf2da5e67b1850335e9d7c75b158127f
|
/plot6.R
|
5b139251b7e4db0941e35b50198a54ff0ac20084
|
[] |
no_license
|
eshtee/Cousera-Exploratory-Data-Analysis-Course-Project-02
|
5932d759f7f3eb8fc4f56eae592abc25fdf2b254
|
5b64b24966f5dd2fcf4a92192b006098674b1482
|
refs/heads/master
| 2020-12-26T04:04:25.455040
| 2015-09-27T23:06:00
| 2015-09-27T23:06:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,860
|
r
|
plot6.R
|
#######################################################################################
# #
# Author: Anderson Hitoshi Uyekita #
# Exploratory Data Analysis #
# Course Project 02 - Week 3 - Coursera #
# File: plot6.R #
# #
#######################################################################################
############################### 1. Work Directory #####################################
# Saving the original work directory
root_original <- getwd()
# All manipulation data will start in the root.
setwd("~")
################################ 2. Work Directory ####################################
# Create a project directory
if(!file.exists("Project02"))
{
dir.create("Project02")
}
# Set as Work Directory
setwd("Project02")
################################ 3. Download Data #####################################
library(httr)
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
if(!file.exists("FNEI_data.zip"))
{
download.file(url, "FNEI_data.zip")
}
# Removing the URL
rm(url)
# Unzipping the power_consumption file
if(!file.exists("Source_Classification_Code.rds") | !file.exists("summarySCC_PM25.rds"))
{
unzip("FNEI_data.zip", list = FALSE, overwrite = TRUE)
}
file_unzipped <- c("./Source_Classification_Code.rds","./summarySCC_PM25.rds")
################################ 4. Loading the data ##################################
raw_dataset = list(data.frame(),data.frame())
for (i in 1:length(file_unzipped))
{
raw_dataset[[i]] <- readRDS(file_unzipped[i])
}
names(raw_dataset) <- c("SCC","NEI")
rm(file_unzipped)
#################################### 5. Plot 6 #######################################
library(ggplot2)
library(plyr)
SCC <- raw_dataset$SCC
NEI <- raw_dataset$NEI
subset_NEI <- NEI[(NEI$fips=="24510"|NEI$fips=="06037") & NEI$type=="ON-ROAD", ]
data_plot <- aggregate(Emissions ~ year + fips, subset_NEI, sum)
data_plot$fips[data_plot$fips=="24510"] <- "Baltimore, MD"
data_plot$fips[data_plot$fips=="06037"] <- "Los Angeles, CA"
png(filename = "plot6.png", width=640, height=480)
plot <- ggplot(data_plot) + aes(x = factor(year), y = Emissions, group = factor(fips), colour = factor(fips)) + geom_line() + labs(title = expression('Vehicle Emissions of PM'[2.5] ~ ' in Baltimore City and Los Angeles County'), x = "Year", y = expression("Total PM"[2.5] ~ "emission (tons)"), colour = "fips") + theme(plot.title = element_text(size = 8))
print(plot)
dev.off()
|
71d09ca8a50cd8d687d321696345485d4754f154
|
77ecee52e46e7f21a0bcab7cb168f7846a467255
|
/man/patchdistr_sews_predict.Rd
|
e804158c8c524b5a600b74c35f031153d8391990
|
[
"MIT"
] |
permissive
|
spatial-ews/spatialwarnings
|
fc4410f47f0217f1ab79a89b4b2691a574f8ad07
|
607523388e544e2b3993be447c808b116e2c191d
|
refs/heads/master
| 2023-07-06T05:01:57.738005
| 2023-07-03T10:32:56
| 2023-07-03T10:32:56
| 37,706,570
| 15
| 5
|
NOASSERTION
| 2022-03-10T00:13:55
| 2015-06-19T06:50:52
|
R
|
UTF-8
|
R
| false
| true
| 1,564
|
rd
|
patchdistr_sews_predict.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/task_patch_indic_methods.R
\name{patchdistr_sews_predict}
\alias{patchdistr_sews_predict}
\alias{predict.patchdistr_sews_single}
\title{predict method for patchdistr_sews objects}
\usage{
\method{predict}{patchdistr_sews_single}(object, ..., newdata = NULL, best_only = FALSE, xmin_rescale = FALSE)
}
\arguments{
\item{object}{An \code{\link{patchdistr_sews}} object}
\item{...}{Additional arguments (ignored)}
\item{newdata}{A vector of patch sizes at which the fit is returned (default
to 200 regularly-spaced values).}
\item{best_only}{Return values for only the best fit of each element (matrix)
in \code{object}, or return the values for all fitted distribution.}
\item{xmin_rescale}{If the xmin value used for fits is above one, then setting this
to \code{TRUE} will rescale the predicted probabilities so that they align on
the cumulative distribution of the observed patch sizes}
}
\value{
A list with component obs, a data.frame containing the observed
distribution values and pred, a data.frame containing the fitted
values.
}
\description{
Export the observed and fitted patch size distributions
}
\details{
The function \code{\link{patchdistr_sews}} fits competing
distribution models to the observed patch size distributions. This
functions is able to export the observed values and the fitted values
altogether.
}
\examples{
\dontrun{
patch_indics <- patchdistr_sews(forestgap)
predict(patch_indics)
}
}
\seealso{
\code{\link{patchdistr_sews}}
}
|
0b01d3a210542809d20aae90fe402dcaa7efc4df
|
c2ba85305d0db98f9fcda395fc48737592ef6c63
|
/plot2.R
|
4d30f7c76e875f495e372922df3c1b7855a02ff2
|
[] |
no_license
|
surabhin15/ExData_Plotting1
|
d0d1708a1a453327bb06e33f5f13521fe2d1bab2
|
3cb0d8fa4b54645d0fe8805b9e096bce1888438f
|
refs/heads/master
| 2021-06-28T03:48:09.203588
| 2017-09-18T08:01:44
| 2017-09-18T08:01:44
| 103,889,026
| 0
| 0
| null | 2017-09-18T03:53:31
| 2017-09-18T03:53:30
| null |
UTF-8
|
R
| false
| false
| 557
|
r
|
plot2.R
|
# Exploratory Data Week1
# Course Project Quiz 1
# Surabhi Naik
# Loading the data
epc <- read.table("household_power_consumption.txt", stringsAsFactors=FALSE, header = TRUE, sep = ";")
# Subsetting the data based on 1/2/2007 and 2/2/2007 dates
SubSetData <- epc[epc$Date %in% c("1/2/2007", "2/2/2007"),]
Date_Time <- strptime(paste(SubSetData$Date, SubSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
#Plotting the data (plot2.png)
plot(Date_Time, SubSetData$Global_active_power, type = "l", ylab = "Global Active Power (Kilowatts)")
dev.off()
|
0e9f4649ab4e3cf369124bfa7db3eca13d1d164a
|
c409ff3ea8b7c62efd962d37c83793d4fc0dc1bc
|
/man/cq_read_itanal.Rd
|
11cde9457ac975169eee9668b745d37f1fd78458
|
[
"MIT"
] |
permissive
|
markdly/conquestr
|
e225ecb1347957dc025c5c719d46624f56e01207
|
7994b3768e26acf1be4ac20821da66ba7f564deb
|
refs/heads/master
| 2021-04-30T10:41:43.513747
| 2018-09-12T05:46:02
| 2018-09-12T05:46:02
| 121,339,581
| 1
| 0
| null | 2018-09-12T05:46:03
| 2018-02-13T04:43:27
|
R
|
UTF-8
|
R
| false
| true
| 573
|
rd
|
cq_read_itanal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cq_itanal.R
\name{cq_read_itanal}
\alias{cq_read_itanal}
\title{Read in a traditional ConQuest item analysis file as text}
\usage{
cq_read_itanal(fname)
}
\arguments{
\item{fname}{A file path to an existing ConQuest itanal file. Can take other input as for readr::read_file}
}
\value{
A tibble containing parsed results of the itanal file.
}
\description{
Read in a traditional ConQuest item analysis file as text
}
\examples{
fname <- cq_example(display = FALSE)
df <- cq_read_itanal(fname)
}
|
71a730b68511a8955d661a96967ca97b4a89c9ca
|
e478b48a472bc520e3e9e545b8387a50e7a2669b
|
/Analytics_Edge/Kaggle_Competition/final_03.R.r
|
ecefff11eccecd2a31a1788b99db8b1771c56505
|
[] |
no_license
|
lastworden/Statistics
|
98fcfcd65986f7b85e2aa03b439a169f4be7a807
|
f831be57823b749d2001283b5089d63afe2d160c
|
refs/heads/master
| 2021-01-21T04:46:54.225238
| 2016-07-03T18:48:20
| 2016-07-03T18:48:20
| 55,667,255
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,728
|
r
|
final_03.R.r
|
pollTrain = read.csv("normPollElabTrain.csv")
pollTest = read.csv("normPollElabTest.csv")
names(pollTrain)
names(pollTest)
tr_label = pollTrain$Party
tst_ids = pollTest$USER_ID
Train = pollTrain
Train$Party = NULL
Test = pollTest
Test$USER_ID = NULL
library(rpart)
library(e1071)
library(randomForest)
library(caret)
preproc = preProcess(Train)
normTrain = predict(preproc,Train)
normTest = predict(preproc, Test)
km = kmeans(normTrain, centers = 3)
library(flexclust)
km.kcca = as.kcca(km,normTrain)
clusterTrain = predict(km.kcca)
clusterTest = predict(km.kcca, newdata = normTest)
table(clusterTest)
table(clusterTrain)
cTrain1 = subset(Train, clusterTrain == 1)
cTrain2 = subset(Train, clusterTrain == 2)
cTrain3 = subset(Train, clusterTrain == 3)
cTest1 = subset(Test, clusterTest == 1)
cTest2 = subset(Test, clusterTest == 2)
cTest3 = subset(Test, clusterTest == 3)
cTest1$USER_ID = subset(tst_ids, clusterTest == 1)
cTest2$USER_ID = subset(tst_ids, clusterTest == 2)
cTest3$USER_ID = subset(tst_ids, clusterTest == 3)
cTrain1$Party = subset(tr_label, clusterTrain == 1)
cTrain2$Party = subset(tr_label, clusterTrain == 2)
cTrain3$Party = subset(tr_label, clusterTrain == 3)
table(cTrain3$Party)
Tree1 = rpart(Party~exp(Q109244Yes) + exp(Q115611Yes) + exp(Q98197Yes) +
exp(Q98869Yes), data = cTrain1, method = "class")
Tree2 = rpart(Party~exp(Q109244Yes) + exp(Q115611Yes) + exp(Q98197Yes) +
exp(Q98869Yes), data = cTrain2, method = "class")
Tree3 = rpart(Party~exp(Q109244Yes) + exp(Q115611Yes) + exp(Q98197Yes) +
exp(Q98869Yes), data = cTrain3, method = "class")
predTree1= predict(Tree1,type="class")
predTree2= predict(Tree2,type="class")
predTree3= predict(Tree3,type="class")
t1 = table(cTrain1$Party,predTree1)
t1
sum(diag(t1))/sum(t1)
t2 = table(cTrain2$Party,predTree2)
t2
sum(diag(t2))/sum(t2)
t3 = table(cTrain3$Party,predTree3)
t3
sum(diag(t3))/sum(t3)
predTree1= predict(Tree1,newdata = cTest1,type="class")
predTree2= predict(Tree2,newdata = cTest2,type="class")
predTree3= predict(Tree3,newdata = cTest3,type="class")
result = data.frame(USER_ID = c(cTest1$USER_ID,cTest2$USER_ID,cTest3$USER_ID), Predictions=c(predTree1,predTree2,predTree3))
result$Predictions = ifelse(result$Predictions==1, "Democrat","Republican")
str(result)
table(result$Predictions)
write.csv(result, "Submission_FE_03.csv", row.names=FALSE)
datpca=prcomp(normTrain, scale. = T)
dim(datpca$x)
summary(datpca)
str(as.data.frame(datpca$x))
pcaDat = as.data.frame(datpca$x)
str(pcaDat)
pcaDat = pcaDat [,1:50]
str(pcaDat)
pcaDat$Party = pollTrain$Party
str(pcaDat$Party)
Log1 = randomForest(Party~., data = pcaDat)
t = table(pcaDat$Party,predict(Log1))
t
sum(diag(t))/sum(t)
|
3e055044eb9c90f63084f94444c1467d83b8dc67
|
8ec922bbf13640a10aacdd67192ce34585200b22
|
/R/CorrSurface.R
|
224a722d391a966a6964c224628912dca7dfdbe1
|
[] |
no_license
|
OUKUN0705/timeseriesmom
|
93a4c4e73589f7714efaf502de93a9df58f82f65
|
8d8b9d99a27f2cea6cc24c254b2ca22ef1b1d9cb
|
refs/heads/master
| 2021-01-11T06:49:42.116154
| 2017-02-01T08:56:04
| 2017-02-01T08:56:04
| 66,509,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,969
|
r
|
CorrSurface.R
|
#' CorrSurface
#'
#' The function
#'
#' @param price an object of time serise representing a price series.
#' @param lookback_seq a sequence of lookback length.
#' @param holddays_seq a sequence of holddays length.
#' @param Sign whether the returns or signs of returns are used to calculate
#' correlation, default is FALSE.
#' @param return_method method of calculating returns.
#'
#' @return a 3_D plot of correlation coefficient on lookback period and holding
#' period.
#' @export
#'
#' @examples
#' lookback_seq <- seq(from = 1, to = 200, by = 10)
#' holddays_seq <- seq(from = 1, to = 100, by = 10)
#' getSymbols("^GSPC", from = 2010, to = Sys.Date())
#' price <- GSPC[, 6]
#' cs <- CorrSurface(price, lookback_seq = lookback_seq, holddays_seq = holddays_seq,
#' Sign = TRUE, return_method = "log")
CorrSurface <- function(price, lookback_seq, holddays_seq, Sign = FALSE,
return_method = c("arithmetic", "log")) {
##################### check input arguments ##################
price <- as.xts(price)
if (sum(class(price) %in% c("zoo", "xts")) == 0)
stop("Please provide with time series object")
if (missing(lookback_seq))
stop("Need to specify lookback sequency.")
if (any(lookback_seq <= 0) | any(lookback_seq != lookback_seq))
stop("lookback should be positive integer.")
if (missing(holddays_seq))
stop("Need to specify holddays sequency.")
if (any(holddays_seq <= 0) | any(holddays_seq != holddays_seq))
stop("holddays should be positive integer.")
if (!is.logical(Sign))
stop("Sign should be logical variable.")
if (!return_method %in% c("arithmetic", "log"))
stop("return calculation method can only be 'arithmetic' or 'log'")
#################################
# result <- NULL
# for (l in lookback_seq) {
# for (h in holddays_seq) {
# result <- rbind(result, LookbackHoldCorr(price, lookback = l, holddays = h,
# Sign = Sign,
# return_method = return_method))
# }
# }
###### use functional programming to speed up computation potentially
# purrr::pmap applies paralell mapping, thus transform the loops sequency
lookback_seq2 <- sort(rep(lookback_seq, length(holddays_seq)))
holddays_seq2 <- rep(holddays_seq, length(lookback_seq))
result <- do.call(rbind,
pmap(list(lookback_seq2, holddays_seq2),
LookbackHoldCorr,
price = price,
return_method = return_method,
Sign = Sign
)
)
CorrSurface <- list(result = result,
lookback_seq = lookback_seq,
holddays_seq = holddays_seq,
Sign = Sign,
return_method = return_method)
class(CorrSurface) <- "CorrSurface"
return(CorrSurface)
}
|
07cfe0b0080bcc053cfde75a3fc3753bd49e5025
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/interplot/R/Interplot_plot.R
|
a1d0f6577006f989bf27e8d0594cf62fca713c32
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,222
|
r
|
Interplot_plot.R
|
#' Plot Conditional Coefficients in Models with Interaction Terms
#'
#' Graph based on the data frame of statistics about the conditional effect of an interaciton.
#'
#' @param m A model object including an interaction term, or, alternately, a data frame recording conditional coefficients. This data frame should includes four columns:
#' \itemize{
#' \item fake: The sequence of \code{var1} (the item whose effect will be conditioned on in the interaction);
#' \item coef1: The point estimates of the coefficient of \code{var1} at each break point.
#' \item ub: The upper bound of the simulated 95\% CI.
#' \item lb: The lower bound of the simulated 95\% CI.
#' }
#' @param var1 The name (as a string) of the variable of interest in the interaction term; its conditional coefficient estimates will be plotted.
#' @param var2 The name (as a string) of the other variable in the interaction term.
#' @param plot A logical value indicating whether the output is a plot or a dataframe including the conditional coefficient estimates of var1, their upper and lower bounds, and the corresponding values of var2.
#' @param hist A logical value indicating if there is a histogram of `var2` added at the bottom of the conditional effect plot.
#' @param var2_dt A numerical value indicating the frequency distibution of `var2`. It is only used when `hist == TRUE`. When the object is a model, the default is the distribution of `var2` of the model.
#' @param point A logical value determining the format of plot. By default, the function produces a line plot when var2 takes on ten or more distinct values and a point (dot-and-whisker) plot otherwise; option TRUE forces a point plot.
#' @param sims Number of independent simulation draws used to calculate upper and lower bounds of coefficient estimates: lower values run faster; higher values produce smoother curves.
#' @param xmin A numerical value indicating the minimum value shown of x shown in the graph. Rarely used.
#' @param xmax A numerical value indicating the maximum value shown of x shown in the graph. Rarely used.
#' @param ercolor A character value indicating the outline color of the whisker or ribbon.
#' @param esize A numerical value indicating the size of the whisker or ribbon.
#' @param ralpha A numerical value indicating the transparency of the ribbon.
#' @param rfill A character value indicating the filling color of the ribbon.
#' @param ... Other ggplot aesthetics arguments for points in the dot-whisker plot or lines in the line-ribbon plots. Not currently used.
#'
#' @details \code{interplot.plot} is a S3 method from the \code{interplot}. It generates plots of conditional coefficients.
#'
#' Because the output function is based on \code{\link[ggplot2]{ggplot}}, any additional arguments and layers supported by \code{ggplot2} can be added with the \code{+}.
#'
#' @return The function returns a \code{ggplot} object.
#'
#' @import ggplot2
#' @importFrom graphics hist
#'
#' @export
## S3 method for class 'data.frame'
interplot.plot <- function(m, var1, var2, plot = TRUE, hist = FALSE, var2_dt = NULL, point = FALSE,
sims = 5000, xmin = NA, xmax = NA, ercolor = NA, esize = 0.5, ralpha = 0.5, rfill = "grey70",
...) {
steps <- nrow(m)
levels <- sort(unique(m$fake))
ymin <- ymax <- vector() # to deal with the "no visible binding for global variable" issue
if (hist == FALSE) {
if (steps < 10 | point == T) {
if (is.na(ercolor))
{
ercolor <- "black"
} # ensure whisker can be drawn
coef.plot <- ggplot(m, aes_string(x = "fake", y = "coef1")) + geom_point(...) + geom_errorbar(aes_string(ymin = "lb",
ymax = "ub"), width = 0, color = ercolor, size = esize) + scale_x_continuous(breaks = levels) +
ylab(NULL) + xlab(NULL)
} else {
coef.plot <- ggplot(m, aes_string(x = "fake", y = "coef1")) + geom_line(...) + geom_ribbon(aes_string(ymin = "lb",
ymax = "ub"), alpha = ralpha, color = ercolor, fill = rfill) + ylab(NULL) + xlab(NULL)
}
return(coef.plot)
} else {
if (steps < 10 | point == T) {
if (is.na(ercolor))
{
ercolor <- "black"
} # ensure whisker can be drawn
yrange <- c(m$ub, m$lb, var2_dt)
maxdiff <- (max(yrange) - min(yrange))
break_var2 <- length(unique(var2_dt))
if (break_var2 >= 100)
break_var2 <- 100
hist.out <- hist(var2_dt, breaks = break_var2, plot = FALSE)
n.hist <- length(hist.out$mids)
dist <- hist.out$mids[2] - hist.out$mids[1]
hist.max <- max(hist.out$counts)
histX <- data.frame(ymin = rep(min(yrange) - maxdiff/5, n.hist), ymax = hist.out$counts/hist.max *
maxdiff/5 + min(yrange) - maxdiff/5, xmin = hist.out$mids - dist/2, xmax = hist.out$mids +
dist/2)
coef.plot <- ggplot()
coef.plot <- coef.plot + geom_rect(data = histX, aes(xmin = xmin, xmax = xmax, ymin = ymin,
ymax = ymax), colour = "gray50", alpha = 0, size = 0.5) #histgram
coef.plot <- coef.plot + geom_point(data = m, aes_string(x = "fake", y = "coef1")) +
geom_errorbar(data = m, aes_string(x = "fake", ymin = "lb", ymax = "ub"), width = 0,
color = ercolor, size = esize) + scale_x_continuous(breaks = levels) + ylab(NULL) +
xlab(NULL)
} else {
yrange <- c(m$ub, m$lb)
maxdiff <- (max(yrange) - min(yrange))
break_var2 <- length(unique(var2_dt))
if (break_var2 >= 100)
break_var2 <- 100
hist.out <- hist(var2_dt, breaks = break_var2, plot = FALSE)
n.hist <- length(hist.out$mids)
dist <- hist.out$mids[2] - hist.out$mids[1]
hist.max <- max(hist.out$counts)
histX <- data.frame(ymin = rep(min(yrange) - maxdiff/5, n.hist), ymax = hist.out$counts/hist.max *
maxdiff/5 + min(yrange) - maxdiff/5, xmin = hist.out$mids - dist/2, xmax = hist.out$mids +
dist/2)
# interplot.plot(m = coef, var1 = 'cyl', var2 = 'wt')
coef.plot <- ggplot()
coef.plot <- coef.plot + geom_rect(data = histX, aes(xmin = xmin, xmax = xmax, ymin = ymin,
ymax = ymax), colour = "gray50", alpha = 0, size = 0.5)
coef.plot <- coef.plot + geom_line(data = m, aes_string(x = "fake", y = "coef1")) +
geom_ribbon(data = m, aes_string(x = "fake", ymin = "lb", ymax = "ub"), alpha = ralpha,
color = ercolor, fill = rfill) + ylab(NULL) + xlab(NULL)
}
return(coef.plot)
}
}
|
0df494ea7c0c2b0417c2190992abaa9a50939e35
|
86f9f3fbaf06199bbe26e2aa93947d20b355ae7b
|
/R/analysis/control_only/summarise-control-only-DE-vs-variance.R
|
143ceb2739f29dea905b83512f7e256029019013
|
[] |
no_license
|
phycomlab/DE-analysis
|
7cd6e8d7b78be2a665b403190a9a7aea2c80e3b2
|
167f55a5bff09f23cfd5b535763e13db2bc278b3
|
refs/heads/master
| 2023-06-24T16:11:14.348901
| 2021-08-04T17:18:41
| 2021-08-04T17:18:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,023
|
r
|
summarise-control-only-DE-vs-variance.R
|
setwd("~/git/DE-analysis")
options(stringsAsFactors = F)
library(tidyverse)
library(magrittr)
library(data.table)
args = list(); source("R/functions/detect_system.R")
# list input files
input_dir = file.path(base_dir, "analysis", "expr_summary", "control_only")
input_files = list.files(input_dir, pattern = '*\\.csv\\.gz', full.names = TRUE)
# we don't need to summarize all data, so let's filter here
meta = data.frame(filename = basename(input_files)) %>%
mutate(idx = row_number()) %>%
separate(filename, into = c('dataset', 'label'), sep = '-') %>%
mutate_all(~ gsub("^.*=|\\.csv\\.gz", "", .)) %>%
type_convert()
# filter to control groups in simple experiments only
keep = c('Goldfarbmuren2020' = 'never', ## lung from never smokers
'Grubman2019' = 'Control', ## ALZ control brains
'Hrvatin2018' = '0h', ## light-deprived mice
'Huang2020' = 'control', ## colonic mucosa in healthy children
'Kang2018' = 'ctrl', ## unstimulated PBMCs
'Mathys2019' = 'Control', ## ALZ control brains
'Nagy2020' = 'Control', ## MDD control brains
'Reyfman2020' = 'Control', ## healthy lungs
'Rossi2019' = 'control', ## mice on a control diet
'Sathyamurthy2018' = 'control', ## healthy mouse spinal cord
'Smillie2019' = 'Healthy', ## healthy colon
'Tran2019' = 'Ctrl', ## uninjured RGCs
'Wilk2020' = 'Healthy', ## control PBMCs
'Wu2017' = 'control' ## control mice
) %>%
data.frame(dataset = names(.), label = .)
# filter metadata/files accordingly
meta0 = inner_join(meta, keep, by = c('dataset', 'label'))
input_files %<>% extract(meta0$idx)
# read all data
dats = map(input_files, fread)
# combine into a single data frame
dat = bind_rows(dats)
# last, we also need to load the DE results
DE = readRDS(file.path(base_dir, "analysis", "summary_data",
"control_only.rds"))
n_DE = readRDS("data/analysis/control_only/n_DE_genes.rds")
## outcome 1: write mean delta-variance for each cell type in each dataset
delta_vars = dat %>%
drop_na(pseudobulk_variance, shuffled_variance) %>%
mutate(delta_variance = shuffled_variance - pseudobulk_variance) %>%
group_by(dataset, label, cell_type) %>%
summarise(mean_delta = mean(delta_variance)) %>%
ungroup()
saveRDS(delta_vars, "data/analysis/control_only/delta_variance.rds")
## outcome 2: number of DE genes in each bin
bins = 10
xy0 = xy %>%
mutate(abs_delta_variance = abs(delta_variance))
bin_results = xy0 %>%
# bin expression levels
group_by(dataset, label, cell_type, de_test, shuffle_replicates) %>%
arrange(abs_delta_variance) %>%
mutate(bin = cut(row_number() / n(),
breaks = seq(0, bins) / bins),
bin = as.integer(bin)) %>%
ungroup() %>%
# count DE genes in each bin
group_by(dataset, label, cell_type, de_test, shuffle_replicates, bin) %>%
summarise(genes = sum(p_val_adj < 0.05)) %>%
ungroup()
saveRDS(bin_results, "data/analysis/control_only/genes_per_bin.rds")
|
005ef3bb2ca3537182ae7c79e2829d58685eb346
|
893c52eb5c29634e8b6ea95f5300f656d12960a8
|
/man/Methods_SFI.Rd
|
59bf4026ac1e715c20eab5d0888336213cb621b6
|
[] |
no_license
|
DPCscience/SFSI
|
7eef35cd9b2f29644638442dc5c16c2a2fde9fb3
|
2a584c4ae958dc81ad2eb22501b15d1780fd910d
|
refs/heads/master
| 2022-07-05T16:53:51.389301
| 2020-05-19T21:00:00
| 2020-05-19T21:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,513
|
rd
|
Methods_SFI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/R_utils_wC.R
\name{Methods_SFI}
\alias{summary.SFI}
\alias{plot.SFI}
\alias{coef.SFI}
\alias{fitted.SFI}
\title{SFI methods}
\usage{
\method{coef}{SFI}(object, ...)
\method{fitted}{SFI}(object, ...)
\method{summary}{SFI}(object, ...)
\method{plot}{SFI}(..., title=NULL, py=c("accuracy","MSE"))
}
\arguments{
\item{object}{An object of the class 'SFI'. One or more objects must be passed as \code{...} in the function \code{plot}}
\item{py}{Indicates whether to plot correlation (between observed and predicted) or MSE in y-axis}
\item{title}{Title of the plot}
\item{...}{Arguments to be passed:
\itemize{
\item \code{df}: Degrees of freedom: average number of predictors with non-zero coefficient (for function \code{coef})
\item \code{object}: One or more objects of the class 'SFI' (for function \code{plot})
\item Not needed for \code{summary} and \code{fitted} functions
}
}
}
\description{
\itemize{
\item \code{'coef'}: Retrieves the regression coefficients for each value of lambda for each individual in the testing set.
\item \code{'fitted'}: Returns the predicted values for each value of lambda (in columns) for each individual in the testing set (in rows). When using 'lars2' or 'solveEN' functions, a matrix \code{X} of predictors is needed
\item \code{'summary'}: Returns accuracy, MSE, df, achieved by each value of the vector lambda. Also returns summary for the SFI with maximum accuracy and with minimum MSE.
\item \code{'plot'}: Creates a plot of either accuracy or MSE versus the (average) number of predictors and versus lambda (in negative logarithm).
}
}
\examples{
require(SFSI)
data(wheat.E1)
data(wheat.G) # Genomic relationship matrix
y = tapply(Y[,"YLD"],Y[,"gid"],mean)
index <- intersect(rownames(G),names(y))
G = G[index,index]
y = as.vector(y[index])
fm1 = SFI(y,K=G,tst=1:100,trn=101:length(y))
fm2 = SFI(y,K=G,tst=1:100,trn=101:length(y),alpha=0.5,name="alpha=0.5")
yHat = fitted(fm1) # Predicted values for each SFI
corTST = summary(fm1)$accuracy # Testing set accuracy (correlation cor(y,yHat))
summary(fm1)$optCOR # SFI with maximum accuracy
summary(fm1)$optMSE # SFI with minimum MSE
plot(fm1,title=expression('corr('*y[obs]*','*y[pred]*') vs sparsity'))
plot(fm1,py="MSE",title='Mean Square Error vs sparsity')
plot(fm1,fm2)
}
\author{
Marco Lopez-Cruz (\email{lopezcru@msu.edu}) and Gustavo de los Campos
}
|
a0bc395f8709bc3fb34292abe9df4854b7504f22
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/configr/examples/config.help.Rd.R
|
134837ac155d8517c19e1ebb160e9348100c88ff
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
config.help.Rd.R
|
library(configr)
### Name: config.help
### Title: Function to access external helps about configurations format or
### other related information
### Aliases: config.help
### ** Examples
config.help()
## Not run:
##D config.help(1)
##D config.help('ini_git_search')
## End(Not run)
|
5db51de182ca0529e7fa4fbf5c8248f46936c263
|
b0bd43f8ce57af3cbdf037870c131e75bd412423
|
/class_exercise.R
|
a9215392feb8a745bc0360c0d035fa900f8d29d2
|
[] |
no_license
|
NickAnderson94/class_project
|
0a0d28e2cd01f8f8e76813387ad5f30d592dc6b1
|
373eb817ed5f027d74badbabfb56a4c777942532
|
refs/heads/master
| 2020-07-29T04:54:50.266922
| 2019-09-20T01:16:19
| 2019-09-20T01:16:19
| 209,677,361
| 0
| 0
| null | 2019-09-20T01:14:17
| 2019-09-20T01:14:15
| null |
UTF-8
|
R
| false
| false
| 29
|
r
|
class_exercise.R
|
#Nick Anderson
#Blue
#Royston
|
a559d89281c5209513fce86991c85ba2bb7c00a6
|
0f8fd1ff0c79632bf090b44f853acacfa8961319
|
/R/read_EMSTRANS.R
|
d7b8935b56ed58e05fd0f39f80170bbef209cf12
|
[] |
no_license
|
BAAQMD/CEIDARS
|
1fb5e66f5b703771ac111597e468f028114a3a28
|
8d31a1d68a24cb33b47945e85e9f841553b60f53
|
refs/heads/master
| 2022-08-30T09:28:35.463581
| 2022-08-26T13:41:16
| 2022-08-26T13:41:16
| 25,991,490
| 0
| 0
| null | 2022-08-26T13:41:18
| 2014-10-30T21:59:43
| null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
read_EMSTRANS.R
|
#' read_EMSTRANS
#'
#' @describeIn read_CEIDARS Read EMSTRANS-formatted file
#'
#' @export
read_EMSTRANS <- function (
path,
...
) {
EMSTRANS_cols <- readr::cols(
TRANS_ID = col_character(),
CO = col_integer(),
FACID = col_integer(),
AB = col_character(),
DIS = col_character(),
ACTION = col_character(),
DEV = col_integer(),
PROID = col_integer(),
POL = col_integer(),
UEMFACT = col_number(), # NOTE: actually 'integer' according to CEIDARS2.5 spec
CNTL1 = col_integer(),
CNTL2 = col_integer(),
CNTLEFF = col_number(),
EMFACT = col_number(), # NOTE: actually 'integer' according to CEIDARS2.5 spec
EMORIG = col_integer(),
EMREL = col_integer(),
CR_FLAG = col_integer(),
EMS = col_number(),
HRMAXEMS = col_number(),
METH = col_integer(),
REASCH = col_integer(),
EXEMS = col_number(),
UNREMS = col_number(),
POTENTIAL = col_number(),
EMS_FORECAST = col_character(),
EMSUP = col_date(format = "%Y%m%d"),
MAINTAINED = col_character(),
MEMO_EMS = col_character(),
OPERATOR = col_character(),
TDATE = col_date(format = "%Y%m%d"))
EMSTRANS_data <-
tbltools::read_csv(
path,
col_types = EMSTRANS_cols,
...)
return(EMSTRANS_data)
}
|
5796dbacc1913c0a9cbef2898a7f4363235d087f
|
3f833b9c66b13d8a583f418d6bb4ac90cc73f1b3
|
/R/a11y.R
|
b62ba9b1bb92e6cf7634fac77bf19625c8903e4d
|
[] |
no_license
|
gadenbuie/alix
|
7cd689268ee5648287db3ec0ed8a49b313cc2714
|
66949e5db197f5fe9b02bb64b3b81c968ca53739
|
refs/heads/main
| 2023-09-02T15:52:49.686695
| 2020-06-26T20:46:39
| 2020-06-26T20:46:39
| 275,244,252
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,503
|
r
|
a11y.R
|
#' Test your document or app for accessibility issues.
#'
#' Uses [a11y.css](https://github.com/ffoodd/a11y.css) to include warnings and
#' diagnostics about potential accessibility issues in your HTML document or
#' Shiny web app.
#'
#' @param language Your desired language
#' @param level The level of errors:
#'
#' * `all`: every messages (called `advice` by `a11y.css`)
#' * `warning`: warnings and errors;
#' * `obsolete`: obsolete stuff, warnings and errors;
#' * `error`: only errors.
#'
#' @return An `htmltools` tag list with the `a11y.css` dependency.
#' @export
a11y_css <- function(
language = c("en", "ar", "fr", "gr", "pt-br", "ru", "zh"),
level = c("all", "error", "obsolete", "warning")
) {
htmltools::tagList(html_dependency_a11y_css(language, level))
}
#' @describeIn a11y_css The `a11y.css` html dependency alone
#' @export
html_dependency_a11y_css <- function(
language = c("en", "ar", "fr", "gr", "pt-br", "ru", "zh"),
level = c("all", "error", "obsolete", "warning")
) {
language <- match.arg(language)
level = match.arg(level)
style_file <- if (level == "all") {
paste0("a11y-", language, ".css")
} else {
paste0("a11y-", language, "_", level, ".css")
}
htmltools::htmlDependency(
name = "a11y.css",
version = a11y_version(),
package = "alix",
src = "css",
stylesheet = style_file,
all_files = FALSE
)
}
a11y_version <- function() {
readLines(system.file("css", ".version", package = "alix"), warn = FALSE)
}
|
bd84818ead8c288fff12878ed2b337bb8b319bab
|
a9ba290ca051038e90a885fbfebc3813816b0bd0
|
/man/msik.Rd
|
e19d5b2bc0ebb1eafbb845c71986351e885004f5
|
[
"MIT"
] |
permissive
|
balachia/pcSoftmaxPack
|
42165ad440263558fc3bb659aa0211c365b97967
|
760f42668dfbae21866328c4f27141c6375754da
|
refs/heads/main
| 2023-06-17T11:31:22.199299
| 2021-07-21T02:47:26
| 2021-07-21T02:47:26
| 385,530,998
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 437
|
rd
|
msik.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ms_interval.R
\name{msik}
\alias{msik}
\title{Market Share - Interval; k (exponentiated) formulation}
\usage{
msik(kd, ki, d, prec = 1024)
}
\arguments{
\item{kd}{value at focal pole of decreasing items}
\item{ki}{value at focal pole of increasing items}
\item{d}{length of interval}
}
\description{
Market Share - Interval; k (exponentiated) formulation
}
|
c37f79c76822330d624e748b732ceeae0969e810
|
194d2083820452a6d75453f99d12f7eca518a2c7
|
/TCC final/estGAS.R
|
0809f48c445183a052bfbaf2b27e842714dbbeb5
|
[] |
no_license
|
franpallaoro/TCC
|
393affdc487ee476758b90b6e2260b70debf3611
|
89e90cd4a99288d8f7264345a7fe47005baec240
|
refs/heads/master
| 2023-04-18T22:10:00.117907
| 2021-05-11T03:36:48
| 2021-05-11T03:36:48
| 270,170,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 583
|
r
|
estGAS.R
|
estGAS <- function(data, i, j, k, Tns, ...){
#i-j janela
#k é o ativo 1...N
if(!require(GAS)){install.packages("GAS")}
library(GAS)
GASSpec = UniGASSpec(Dist = "std", ScalingType = "Identity",
GASPar = list(location = FALSE, scale = TRUE,
shape = FALSE))
fit = UniGASFit(GASSpec, data[i:j,k])
forGAS = UniGASFor(fit, 1)
var_for = forGAS@Forecast$Moments
sim = UniGASSim(fit = fit, T.sim = Tns)
var_sim = var(sim@Data$vY)
u_i = as.vector(fit@Estimates$vU)
return(list(u_i, var_sim, var_for))
}
|
271b1e46350e2a745f45cbe2bdd5c45dec7ca374
|
e979752c4498c5edf47791d8b7eaafb2730524bf
|
/sim20032009/calc/aod/sumAOD.R
|
0e28c9d98d3bfc15c6df1eeb5a37afebca7a3aee
|
[] |
no_license
|
ClaudiaGEscribano/aod_and_PV
|
0bf4d6c7398351aebdef7b9f9538246c5ee2bd49
|
77eaa4e454ce4ec4ec784795e2e89b8945bc1737
|
refs/heads/master
| 2021-03-27T15:41:09.158241
| 2018-12-10T14:58:39
| 2018-12-10T14:58:39
| 83,782,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,815
|
r
|
sumAOD.R
|
library(raster)
##bc <- stack("../data/AOD/aod_bc_monthly20032009.grd")
##su <- stack("../data/AOD/aod_su_monthly20032009.grd")
##ss <- stack("../data/AOD/aod_ss_monthly20032009.grd")
##sd <- stack("../data/AOD/aod_sd_monthly20032009.grd")
##or <- stack("../data/AOD/aod_or_monthly20032009.grd")
bc <- stack("../data/AOD/macc_regcm_20032009_bc.nc", varname='aero')
su <- stack("../data/AOD/macc_regcm_20032009_su.nc", varname='aero')
ss <- stack("../data/AOD/macc_regcm_20032009_ss.nc", varname='aero')
sd <- stack("../data/AOD/macc_regcm_20032009_sd.nc", varname='aero')
or <- stack("../data/AOD/macc_regcm_20032009_bc.nc", varname='aero')
idx <- seq(as.Date("2003-01-01"), as.Date("2009-12-31"), "month")
idx2 <- seq(1:84)
bc <- setZ(bc, idx2)
su <- setZ(su, idx2)
ss <- setZ(ss, idx2)
sd <- setZ(sd, idx2)
or <- setZ(or, idx2)
aod_all <- stack(bc, su, ss, sd, or)
idx3 <- rep(seq(1:84), 5)
aod_all <- setZ(aod_all, idx3)
suma_aod <- zApply(aod_all, by=idx3, fun='sum')
## lat lon de AOD
aodlat <- raster("../data/AOD/macc_regcm_20032009_bc.nc", varname='lat')
aodlon <- raster("../data/AOD/macc_regcm_20032009_bc.nc", varname='lon')
aodlat <- rasterToPoints(aodlat)
aodlon <- rasterToPoints(aodlon)
aodlonlat <- cbind(aodlon[,3], aodlat[,3])
# Specify the lonlat as spatial points with projection as long/lat
aodlonlat <- SpatialPoints(aodlonlat, proj4string = CRS("+proj=longlat +datum=WGS84"))
mycrs <- CRS("+proj=lcc +lat_1=43 +lat_2=43 +lat_0=43 +lon_0=15 +k=0.684241 +units=m +datum=WGS84 +no_defs")
paodlonlat <- spTransform(aodlonlat, CRSobj = mycrs)
paodlonlat
extent(paodlonlat)
projection(suma_aod) <- mycrs
extent(suma_aod) <- extent(paodlonlat)
## mapa
crs.lonlat <- CRS("+proj=longlat +datum=WGS84")
ext <- as.vector(extent(projectExtent(suma_aod, crs.lonlat)))
boundaries <- map('worldHires', fill=TRUE, interior=FALSE,exact=FALSE, xlim=ext[1:2], ylim= ext[3:4], plot=FALSE)
IDs <- sapply(strsplit(boundaries$names, ":"), function(x) x[1])
boundaries_sp<- map2SpatialPolygons(boundaries, IDs=IDs, proj4string=crs.lonlat)
boundaries_lcc <- spTransform(boundaries_sp, mycrs)
border_aod <- as(boundaries_lcc, 'SpatialLines') ## no funciona
save(border_aod, file='border_aod.Rdata')
#########################
writeRaster(suma_aod, filename='AOD_total_monthly20032009.grd', overwrite=TRUE)
## Total yearly AOD
suma_aod <- setZ(suma_aod, idx)
year <- function(x) as.numeric(format(x, '%y'))
suma_aodY <- zApply(suma_aod, by=year, 'mean')
writeRaster(suma_aodY, filename='AOD_total_yearly20032009.grd', overwrite=TRUE)
## Anual cicle AOD
month <- function(x) as.numeric(format(x, '%m'))
suma_aodCiclo <- zApply(suma_aod, by=month, 'mean')
writeRaster(suma_aodCiclo, filename='AOD_total_ciclo20032009.grd', overwrite=TRUE)
## SEASONAL:
monthIndex <- function(k){
c <- c(1:7)
j <- k
for (i in 1:7){
c[i] <- j
j <- j+12}
return(c)
}
lista <- list()
for (i in 1:12) lista[[i]] <- monthIndex(i)
## lista con un rasterBrick por mes
Meses <- lapply(lista, FUN=function(x) subset(suma_aod, x))
enero <- Meses[[1]]
febrero <- Meses[[2]]
diciembre <- Meses[[12]]
DJF_aod<- stack(enero, febrero, diciembre)
DJF_aod<- mean(DJF_aod)
writeRaster(DJF_aod, filename='DJF_aod.grd')
marzo <- Meses[[3]]
abril <- Meses[[4]]
mayo <-Meses[[5]]
MAM_aod<- stack(marzo, abril, mayo)
MAM_aod<- mean(MAM_aod)
writeRaster(MAM_aod, filename='MAM_aod.grd')
junio <- Meses[[6]]
julio <- Meses[[7]]
agosto <- Meses[[8]]
JJA_aod<- stack(junio, julio, agosto)
JJA_aod<- mean(JJA_aod)
writeRaster(JJA_aod, filename='JJA_aod.grd')
septiembre <- Meses[[9]]
octubre <- Meses[[10]]
noviembre <- Meses[[11]]
SON_aod<- stack(septiembre, octubre, diciembre)
SON_aod<- mean(SON_aod)
writeRaster(SON_aod, filename='SON_aod.grd')
|
13a1a4b316bb19c5a2aa18e20b36eb97eab50ad8
|
c5a59ef72d1872a6fb6cf8bde2a7798967c66d5b
|
/R/psi2.r
|
3fba18b078decc903eeefd842dfe6ee3b42e0182
|
[] |
no_license
|
hjanime/hm-splice-pipe
|
9ddcc3aa4e678dca068f125cda67db6f6eb24a45
|
edafa685dd9a079738e635d5d60927a6a7f4981d
|
refs/heads/master
| 2021-01-21T09:11:08.209177
| 2014-07-18T14:23:52
| 2014-07-18T14:23:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,948
|
r
|
psi2.r
|
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("plyr"))
option_list <- list(make_option(c("-a", "--inp1"), help="tsv input 1"),
make_option(c("-b", "--inp2"), help="tsv input 2"),
make_option(c("-s", "--sss"), help="strength"),
make_option(c("-n", "--name"), help="name"),
make_option(c("-m", "--symb"), help="symbol"),
make_option(c("-o", "--out"), help="sts output"))
opt <- parse_args(OptionParser(option_list=option_list))
source("R/definitions.r");
source("R/logit.r")
source("R/graphics.r")
set_ggplot_theme()
data1 = read.delim(opt$inp1)
data2 = read.delim(opt$inp2)
rbind(data1, data2) -> data
unlist(lapply(strsplit(rownames(data),"_"),function(x){paste(x[1],x[2],x[4],sep="_")})) -> data$don
unlist(lapply(strsplit(rownames(data),"_"),function(x){paste(x[1],x[3],x[4],sep="_")})) -> data$acc
strength = read.delim(opt$sss, col.names=c('chr_pos_str','Boundary','Usage','Biotype','Level','score','cons'))
strength$score[strength$Boundary == "52"] = scale(strength$score[strength$Boundary == "52"])
strength$score[strength$Boundary == "32"] = scale(strength$score[strength$Boundary == "32"])
merge(merge(data, strength, by.x='don', by.y='chr_pos_str'), strength[,c('chr_pos_str','score')], by.x='acc', by.y='chr_pos_str') -> df1
df = subset(df1, Biotype!='other' & Level==1 & mean>0 & mean<1)
df$Biotype = factor_biotype(df$Biotype)
name = factor_species(opt$name)
symbol = fsymbol(opt$symb)
pdf(opt$out, onefile=T, width=normal_w, height = normal_h)
p <- ggplot(df,aes(x=logit(mean), colour=Biotype)) + xlab(bquote(paste('logit AVG ',.(symbol), ', ', .(as.character(name))))) + ylab("")
p <- p + my_colour_palette1()
p + geom_density(alpha=the_alpha, size=1, adjust=2) #+ look_x_logit() + theme(axis.text.x = element_text(angle=-90,hjust=0))
print(count(df,c('Biotype')))
p <- ggplot(df,aes(x=ff*log10(var), colour=Biotype)) + xlab(bquote(paste('SD ',.(symbol), ', ', .(as.character(name))))) + ylab("")
p <- p + my_colour_palette1()
p + geom_density(alpha=the_alpha, size=1, adjust=2) + look_x_sd()
q <- ggplot(df,aes(x=logit(mean), y=score.x+score.y)) + xlab(bquote(paste('logit AVG ',.(symbol), ', ', .(as.character(name))))) + ylab(bquote(S[D]+S[A])) #+ look_x_logit() + theme(axis.text.x = element_text(angle=-90,hjust=0))
cf <- coef(lm(score.x+score.y~logit(mean),data=subset(df,mean>0 & mean<1)))
q + stat_density2d(size=1) + geom_abline(intercept=cf[1], slope=cf[2], linetype="dashed",colour='red')
q <- ggplot(df,aes(x=ff*log10(var), y=score.x+score.y)) + xlab(bquote(paste('SD ',.(symbol), ', ', .(as.character(name))))) + ylab(bquote(S[D]+S[A]))
cf <- coef(lm(score.x+score.y~log10(var),data=subset(df,var>0)))
q + stat_density2d(size=1) + geom_abline(intercept=cf[1], slope=cf[2]/ff, linetype="dashed",colour='red') + look_x_sd()
dev.off()
|
f66b4985f58b564f7d6f535e3600541addb711c4
|
8118a0fa472da408c486d532a45f3515e7dc3ef2
|
/tests/testthat.R
|
b98e99ffa6451acfd3aec123680eb39056e78831
|
[
"MIT"
] |
permissive
|
mdt-ds/slotR
|
7129c762ef611c6bca00eccea2a6efe3606ab710
|
7b23a731abd0aa714c46cee98b61a032cbe8250c
|
refs/heads/master
| 2023-04-28T21:38:41.701400
| 2021-05-23T16:44:45
| 2021-05-23T16:44:45
| 314,540,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(slotR)
test_check("slotR")
|
4fbe69c30cb27019761ef37eafdce40a12dcfab2
|
0368be8d75ccc042e198d800584c2300754a7eb2
|
/R/myApply.R
|
dc2e2a7165b90ab5a8f080551fd8a6ddc65bff88
|
[] |
no_license
|
JWooll/Homework3
|
5eb8460352dcd85d4208a99b2e14f58989121af8
|
3cf19e013cda5aec4b397aa824f167427ac13d3f
|
refs/heads/master
| 2020-04-08T00:51:00.088880
| 2018-03-08T01:22:43
| 2018-03-08T01:22:43
| 124,233,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
r
|
myApply.R
|
#' A simple homemade apply function
#'
#' Changes a user supplied matrix by a user supplied function on the dimension
#' that the user chooses with 1 being the first dimension and 2 being the seconnd
#' It also allows user to insert ther own further parameters
#'
#' @param X a matrix to be applied on
#' @param MARGIN for which dimension will be applied on
#' @param FUN the function that will change the matrix by
#' @param ... additional paramerters for the function if neccessary
#' @return the results of the permutated matrix in simple array form
#' @export
myApply <- function(X, MARGIN, FUN, ...)
{
if(length(dim(X))!=2)
stop("matrix is not 2d")
if(!(MARGIN %in% c(1,2)))
stop("Margin is not in 1 or 2")
R = dim(X)[1]
C = dim(X)[2]
f = match.fun(FUN)
if (MARGIN == 1)
{
result = list()
for (i in 1:R)
result[[i]] = f(X[i],...)
} else if(MARGIN == 2)
{
result = list()
for(j in 1:C)
result[[j]] = f(X[,j],...)
}
return(simplify2array(result))
}
|
14b4e76ff948043972074dfb907c2bd82bb36fae
|
845a4db68eebe70d5c204fbad2dd27cabf1908df
|
/R/compare.R
|
7a223028f98a1ee323c9fc2dff93a63d5268898b
|
[] |
no_license
|
jashu/beset
|
6b1a6d8340b887a3628d0db6563bcdf53b4c709c
|
703e4e7da70185d279c4a60e76207ff2dae91103
|
refs/heads/master
| 2023-05-03T20:22:00.304497
| 2023-04-18T18:23:26
| 2023-04-18T18:23:26
| 49,987,418
| 6
| 0
| null | 2021-04-13T11:36:35
| 2016-01-19T22:24:12
|
R
|
UTF-8
|
R
| false
| false
| 7,308
|
r
|
compare.R
|
#' Compare Predictive Performance of Two Models
#'
#' @param yhat1 A data frame consisting of cross-validated predictions from a
#' benchmark model, an object containing such a data frame, e.g., a
#' "cross_valid" object returned by \code{\link{validate}}, or an object that
#' can be passed to \code{\link{validate}}.
#'
#' @param yhat2 An object of the same type as \code{yhat1} to be compared
#'
#' @param n_rep \code{Integer} giving the number of bootstrap replicates to
#' perform for each repetition of cross-validated predictions. For example,
#' if \code{yhat1} and \code{yhat2} contain 10 columns of predictions, the
#' default value of \code{n_rep = 1000} will result in
#' \eqn{1000 \times 10 = 10,000} replicates total.
#'
#' @param conf Confidence level for the difference between model performance.
#'
#' @inheritParams beset_glm
#'
#' @import parallel
#' @import purrr
#' @export
compare <- function(yhat1, yhat2, n_rep = 1000, conf = 0.95,
parallel_type = NULL, n_cores = NULL, cl = NULL,...){
UseMethod("compare")
}
#' @export
#' @describeIn compare S3 method for class 'cross_valid'
compare.cross_valid <- function(
yhat1, yhat2, n_rep = 1000, conf = 0.95, parallel_type = NULL, n_cores = NULL,
cl = NULL, ...
){
y <- yhat1$parameters$y
if(!all(y == yhat2$parameters$y)){
stop("Observed responses for `yhat1` and `yhat2` do not match")
}
out <- list(Model1 = map_dbl(yhat1$stats, "mean"),
Model2 = map_dbl(yhat2$stats, "mean"))
out$Delta <- out$Model2 - out$Model1
compare.default(
yhat1 = yhat1$predictions,
yhat2 = yhat2$predictions,
y = y, n_rep = n_rep, conf = conf,
family = c(yhat1$parameters$family, yhat2$parameters$family),
theta = c(yhat1$parameters$theta, yhat2$parameters$theta),
mu = list(yhat1$predictions$mu, yhat2$predictions$mu),
phi = list(yhat1$predictions$phi,yhat2$predictions$phi),
parallel_type = parallel_type, n_cores = n_cores, cl = cl,
out = out
)
}
compare.default <- function(
yhat1, yhat2, n_rep = 1000, conf = 0.95, parallel_type = NULL, n_cores = NULL,
cl = NULL, y, family = "gaussian", mu = NULL, phi = NULL, theta = NULL, ...
){
extra_args <- list(...)
out <- extra_args$out
dim_yhat1 <- dim(yhat1); dim_yhat2 <- dim(yhat2)
if(!identical(dim_yhat1, dim_yhat2)){
stop("`yhat1` and `yhat2` must have the same dimensions")
}
if(!(NROW(y) == NROW(yhat1) && NROW(y) == NROW(yhat2) &&
NROW(yhat1) == NROW(yhat2))){
stop("`y`, `yhat1`, and `yhat2` must have the same number of observations")
}
if(length(family) == 1) family <- rep(family, 2)
# implement hierarchy of families for comparing models fit under different
# distributions; the logic is to compare predictions under the distribution
# with more parameters
if(family[1] != family[2]){
if(any(family == "zinb" | family == "negbin") & any(family == "zip")){
family[family == "zip"] <- "zinb"
} else if(any(family == "zinb" | family == "negbin")){
family[] <- "negbin"
}
}
if(length(theta) == 1) theta <- rep(theta, 2)
if(is.null(n_cores) || n_cores > 1){
parallel_control <- setup_parallel(
parallel_type = parallel_type, n_cores = n_cores, cl = cl)
have_mc <- parallel_control$have_mc
n_cores <- parallel_control$n_cores
cl <- parallel_control$cl
}
if(is.null(out)){
out <- list(
Model1 = predict_metrics_(
y = y, y_hat = yhat1, family = family[1], theta = theta[1],
mu = mu[[1]], phi = phi[[1]]
) %>% as_vector,
Model2 = predict_metrics_(
y = y, y_hat = yhat2, family = family[2], theta = theta[2],
mu = mu[[2]], phi = phi[[2]]
) %>% as_vector
)
out$Delta <- out$Model2 - out$Model1
}
predictive_gain <- if(n_cores > 1L){
if(have_mc){
mclapply(1:n_rep, resample_pred_diff, y = y, yhat1 = yhat1, yhat2 = yhat2,
family = family, theta = theta, mu = mu, phi = phi,
mc.cores = n_cores)
} else {
parLapply(cl, resample_pred_diff, y = y, yhat1 = yhat1, yhat2 = yhat2,
family = family, theta = theta, mu = mu, phi = phi)
}
} else {
lapply(1:n_rep, resample_pred_diff, y = y, yhat1 = yhat1, yhat2 = yhat2,
family = family, theta = theta, mu = mu, phi = phi)
}
predictive_gain <- transpose(predictive_gain) %>% simplify_all
a <- (1 - conf)/2
a <- c(a, 1 - a)
ci_gain <- map(predictive_gain, ~ quantile(.x, probs = a, na.rm = TRUE))
out$`95% CI` <- ci_gain
out$predictive_gain <- predictive_gain
names(out)[4] <- paste(format(conf * 100, digits = 2), "%", " CI", sep = "")
structure(out, class = "predictive_gain", family = family[1])
}
resample_pred_diff <- function(
seed, y, yhat1, yhat2, family, theta = NULL, mu = NULL, phi = NULL
){
set.seed(seed)
i <- sample(seq_along(y), replace = TRUE)
# needs to be modified to resample mu and phi also, if they are present
pred1 <- if(is.vector(yhat1)){
if(grepl("^zi", family[1])){
predict_metrics_(
y[i], yhat1[i], family[1], theta[1], mu[[1]][i], phi[[1]][i]
)
} else predict_metrics_(y[i], yhat1[i], family[1], theta[1])
} else {
if(grepl("^zi", family[1])){
pmap(list(yhat1, mu[[1]], phi[[1]]), function(yhat, m, p){
predict_metrics_(y[i], yhat[i], family[1], theta[1], m[i], p[i])
}) %>% transpose %>% simplify_all
} else {
map(yhat1, ~ predict_metrics_(y[i], .x[i], family[1], theta[1])) %>%
transpose %>% simplify_all
}
}
pred2 <- if(is.vector(yhat2)){
if(grepl("^zi", family[2])){
predict_metrics_(
y[i], yhat2[i], family[2], theta[2], mu[[2]][i], phi[[2]][i]
)
} else predict_metrics_(y[i], yhat2[i], family[2], theta[2])
} else {
if(grepl("^zi", family[2])){
pmap(list(yhat2, mu[[2]], phi[[2]]), function(yhat, m, p){
predict_metrics_(y[i], yhat[i], family[2], theta[2], m[i], p[i])
}) %>% transpose %>% simplify_all
} else {
map(yhat2, ~ predict_metrics_(y[i], .x[i], family[2], theta[2])) %>%
transpose %>% simplify_all
}
}
map2(pred1, pred2, ~ .y - .x)
}
#' @export
#' @describeIn compare S3 method for class 'numeric'
compare.numeric <- function(
yhat1, yhat2, n_rep = 1000, conf = 0.95, parallel_type = NULL, n_cores = NULL,
cl = NULL, y, ...){
compare.default(
yhat1, yhat2, n_rep, conf, parallel_type, n_cores, cl, y, ...
)
}
#' @export
#' @describeIn compare two 'beset' class models
compare.beset <- function(
yhat1, yhat2, n_rep = 1000, conf = 0.95, parallel_type = NULL, n_cores = NULL,
cl = NULL, ...
){
compare.cross_valid(
validate(yhat1, ...), validate(yhat2, ...), n_rep, conf, parallel_type,
n_cores, cl, ...
)
}
#' @export
#' @describeIn compare two 'glm' or 'lm' class models
compare.lm <- function(
yhat1, yhat2, n_rep = 1000, conf = 0.95, parallel_type = NULL, n_cores = NULL,
cl = NULL, ...
){
compare.cross_valid(
validate(yhat1, ...), validate(yhat2, ...), n_rep, conf, parallel_type,
n_cores, cl, ...
)
}
#' @export
#' @describeIn compare two 'zeroinfl' class models
compare.zeroinfl <- function(yhat1, yhat2, ...){
compare.cross_valid(
validate(yhat1, ...), validate(yhat2, ...), n_rep, conf, parallel_type,
n_cores, cl, ...
)
}
|
4d77e8319f90e812d5c4b5d304ad16f966f85f36
|
b7f920a53b7e4dd49e2cd33de5d86450849a0873
|
/src/global.R
|
d54aff99cce5f60f5394499b994f5dd708571f50
|
[] |
no_license
|
remn123/ggformapp
|
37e69c28137fd5de718c9bde736a171cb5e85f60
|
4cd8f1b4f1a9d5841f1f95fd7f3dfd7abd2930bd
|
refs/heads/master
| 2020-06-07T09:07:05.998460
| 2019-06-20T23:07:01
| 2019-06-20T23:07:01
| 192,983,502
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
r
|
global.R
|
rm(list=ls())
library(shiny)
library(shinydashboard)
library(ggplot2)
library(ggformula)
data("iris")
get_df_plot = function(df, s, new_var){
s <- paste0(new_var, " = ", s)
q = quote(mutate(df, z = s))
eval(parse(text=sub("z = s", s, deparse(q))))
}
|
dc8fa40504d1459bb1b41b486243c22dd19da53a
|
d8c483f808177685eac6f0e3355c903949fa0589
|
/dev/performance/methods/fastreduction-dirs/pr03_globalReduction.R
|
3c1db735753f00a2898cfbeea029d92148f8f1a5
|
[] |
no_license
|
lgarreta/Reduction
|
263d25470302ed33bad3e674d750d74667a6048c
|
ca7a207c05382d2c85898b0cb85ce5e50e06aeb5
|
refs/heads/master
| 2023-06-09T14:57:48.113691
| 2023-06-03T22:43:03
| 2023-06-03T22:43:03
| 164,315,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,434
|
r
|
pr03_globalReduction.R
|
#!/usr/bin/Rscript
#!/home/mmartinez/bin/Rscript
# LOG:
# r2.0 (Aug17): Changed distance matrix calculation, now direct call using proxy::dist
# r1.4 (Aug16): Modified clustering with initial medoids including the last pdb
# r1.3 (Aug13): Fixed error when it gets number of pdbs < K
# r1.2 (Aug3): Extracts K medoids and users TM-score instead RMSD
#----------------------------------------------------------
# Makes a detailed global clustering of protein conformations
# from the representatives resulting from the local clustering
# INPUT: inputDir filename with the protein conformations
# outputDir filename to write the results
#
# OUTPUT: Medoids for each bin cluster and their distance matrix
#----------------------------------------------------------
USAGE="USAGE: global.R <inputDir> <outputDir> <tmpDir> <K> <num cores>\n"
#library (bio3d)
library (parallel)
library (cluster)
source ("pmat-distance-matrix.R") # parallel distance matrix calcPmat
dyn.load ("tmscorelg.so")
options (width=300)
#THRESHOLD = 1.3
#NCORES= 1
#----------------------------------------------------------
# Main function
#----------------------------------------------------------
main <- function () {
args <- commandArgs (TRUE)
#args = c("io/out1000/outbins", "io/out1000/outrepr", "1.5", "1")
if (length (args) < 4){
cat (USAGE)
cat (length(args))
quit (status=1)
}
INPUTDIR = args [1]
OUTPUTDIR = args [2]
K = as.numeric (args [3])
NCORES = as.numeric (args [4])
#createDir (OUTPUTDIR)
cat ("\n\n\n>>>>>>>>>>>>>>>>>>>> Main of Global Reduction...\n")
cat ("args: ", args, "\n")
cat ("\n\n\nReading Bin path names...\n")
listOfBinPaths = list.files (INPUTDIR, pattern=".pdbs", full.names=T)
clusteringResults = mclapply (listOfBinPaths, reduceGlobal, K, mc.cores=NCORES)
writeClusteringResults (clusteringResults, OUTPUTDIR)
}
#----------------------------------------------------------
# Reduction function to reduce a single bin
# Clustering around medoids. Return k medoid for the bin
#----------------------------------------------------------
reduceGlobal <- function (inputBinPath, K) {
cat ("\n>>> Global Reducing ", inputBinPath )
# Fast clustering for bin, writes representatives to clusDir
#listOfPDBPaths <<- list.files (inputBinPath, pattern=".pdb", full.names=T)
pdbsDir = paste (dirname (dirname (inputBinPath)),"/pdbs",sep="")
listOfPDBPaths <<- paste (pdbsDir, readLines(inputBinPath), sep="/")
# Clustering around medoids. Return one medoid for all inputs
nPdbs = length (listOfPDBPaths)
if (nPdbs < 2)
medoids = 1
else if (nPdbs <= K)
medoids = seq (nPdbs)
else {
binDir = inputBinPath
cat ("\n>>> Calculating distance matrix", inputBinPath,"\n")
distanceMatrix <- getTMDistanceMatrix (listOfPDBPaths)
#distanceMatrix <- calcPmat (listOfPDBPaths,2,"")
split <- -1 * nPdbs / K
initialMedoids <- round (seq (nPdbs, 1, split))
pamPDBs <- pam (distanceMatrix, k=K, diss=F, medoids=initialMedoids)
medoids <- pamPDBs$id.med
}
medoidName <- listOfPDBPaths [medoids]
return (medoidName)
}
#--------------------------------------------------------------
# Calculate pairwise using TM-score distance
#--------------------------------------------------------------
getTMDistanceMatrix <- function (listOfPDBPaths) {
n = length (listOfPDBPaths)
mat = matrix (seq (1,n))
distMat = proxy::dist (mat, method=calculateTmscore)
return (distMat)
}
#----------------------------------------------------------
# Calculate the TM-scores using a external tool "TMscore"
#----------------------------------------------------------
calculateTmscore <- function (targetProtein, referenceProtein) {
targetProtein = listOfPDBPaths [[targetProtein]]
referenceProtein = listOfPDBPaths [[referenceProtein]]
results = .Fortran ("gettmscore", pdb1=targetProtein, pdb2=referenceProtein, resTMscore=0.4)
tmscoreValue = results$resTMscore
return (tmscoreValue)
}
#----------------------------------------------------------
# Make links of the selected PDBs into the output dir
#----------------------------------------------------------
writeClusteringResults <- function (clusteringResults, outputDir) {
listOfPDBs = c ()
for (binResults in clusteringResults)
for (pdbPath in binResults) {
#cmm <- sprintf ("ln -s %s/%s %s/%s", getwd(), pdbPath, outputDir, basename (pdbPath))
listOfPDBs = append (listOfPDBs, pdbPath)
#cat (paste (">>> ", cmm, "\n"))
#system (cmm)
}
filename = sprintf ("%s/%s", outputDir, "pdbsGlobal.pdbs")
listOfPDBs = sort (listOfPDBs)
listOfPDBs = paste ("pdbs/", basename(listOfPDBs),sep="")
write.table (listOfPDBs, file=filename, sep="\n",col.names=F, row.names=F, quote=F)
}
#----------------------------------------------------------
# Create dir, if it exists the it is renamed old-XXX
#----------------------------------------------------------
createDir <- function (newDir) {
checkOldDir <- function (newDir) {
name = basename (newDir)
path = dirname (newDir)
if (dir.exists (newDir) == T) {
oldDir = sprintf ("%s/old-%s", path, name)
if (dir.exists (oldDir) == T) {
checkOldDir (oldDir)
}
file.rename (newDir, oldDir)
}
}
checkOldDir (newDir)
system (sprintf ("mkdir %s", newDir))
}
#--------------------------------------------------------------
#--------------------------------------------------------------
main ()
|
2b27c5666a144f29a62e9e02a66f23615f112aef
|
53e510145d7e0a510ca208b0f893d1abe9683f8b
|
/R/clean_bowling_data.R
|
691bdc83713458fd2f6ab68a1600d2f86403616f
|
[] |
no_license
|
kmaheshkulkarni/cricketdata
|
a8c95bd8ec70a13ef8916d4e66f5af86b09e458f
|
589bb1a6130fa3120be6832281d6cdcb1f5105cc
|
refs/heads/master
| 2020-04-24T08:57:36.597335
| 2019-07-03T09:22:27
| 2019-07-03T09:22:27
| 171,847,423
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,374
|
r
|
clean_bowling_data.R
|
# Function to clean bowling data.
# Works with career or innings data
clean_bowling_data <- function(x)
{
# Make names easier to interpret
vars <- colnames(x)
vars[vars=="Mat"] <- "Matches"
vars[vars=="Inns"] <- "Innings"
vars[vars=="Mdns"] <- "Maidens"
vars[vars=="Wkts"] <- "Wickets"
vars[vars=="BBI"] <- "BestBowlingInnings"
vars[vars=="BBM"] <- "BestBowlingMatch"
vars[vars=="Ave"] <- "Average"
vars[vars=="Econ"] <- "Economy"
vars[vars=="SR"] <- "StrikeRate"
vars[vars=="4"] <- "FourWickets"
vars[vars=="5"] <- "FiveWickets"
vars[vars=="10"] <- "TenWickets"
vars[vars=="Start Date"] <- "Date"
colnames(x) <- vars
# Fix classes for all variables
if("Maidens" %in% vars)
x$Maidens <- as.integer(x$Maidens)
if("Balls" %in% vars)
x$Balls <- as.integer(x$Balls)
x$Runs <- as.integer(x$Runs)
x$Wickets <- as.integer(x$Wickets)
x$Innings <- as.integer(x$Innings)
career <- ("Matches" %in% vars)
if(career)
{
x$Matches <- as.integer(x$Matches)
if("Span" %in% vars)
{
x$Start <- as.integer(substr(x$Span, 1, 4))
x$End <- as.integer(substr(x$Span, 6, 9))
}
if("FourWickets" %in% vars)
x$FourWickets <- as.integer(x$FourWickets)
if("FiveWickets" %in% vars)
x$FiveWickets <- as.integer(x$FiveWickets)
if("TenWickets" %in% vars)
x$TenWickets <- as.integer(x$TenWickets)
}
else
{
# Add participation column
if("Overs" %in% vars)
{
x$Participation <- participation_status(x$Overs)
x$Overs[x$Participation!="B"] <- NA
}
x$Date <- lubridate::dmy(x$Date)
x$Opposition <- stringr::str_replace_all(x$Opposition, "v | Women| Wmn", "")
x$Opposition <- rename_countries(x$Opposition)
}
if("Overs" %in% vars)
x$Overs <- as.numeric(x$Overs)
# Recompute average to avoid rounding errors
if("Average" %in% vars)
x$Average <- x$Runs / x$Wickets
# Recompute economy rate to avoid rounding errors
if("Balls" %in% vars)
balls <- x$Balls
else
balls <- trunc(x$Overs)*6 + (x$Overs %% 1)*10
if("Economy" %in% vars)
{
ER <- x$Runs / (balls/6)
differ <- round(ER,2) - as.numeric(x$Economy)
if(any(abs(differ) > 0.05, na.rm=TRUE))
stop("Economy rate incorrect")
else
x$Economy <- ER
}
# Recompute strike rate
if("StrikeRate" %in% vars)
x$StrikeRate <- balls / x$Wickets
# Extract country information if it is present
# This should only be required when multiple countries are included
country <- (length(grep("\\(", x[1,1])) > 0)
if(country)
{
x$Country <- stringr::str_extract(x$Player, "\\([a-zA-Z \\-extends]+\\)")
x$Country <- stringr::str_replace_all(x$Country, "\\(|\\)|-W", "")
x$Country <- rename_countries(x$Country)
x$Player <- stringr::str_replace(x$Player, "\\([a-zA-Z \\-]+\\)", "")
}
# Re-order and select columns
vars <- colnames(x)
if(career)
varorder <- c("Player","Country","Start","End","Matches","Innings","Overs","Balls","Maidens","Runs","Wickets",
"Average","Economy","StrikeRate","BestBowlingInnings","BestBowlingMatch","FourWickets","FiveWickets","TenWickets")
else
varorder <- c("Date","Player", "Country", "Overs","Balls","Maidens","Runs","Wickets",
"Economy","Innings","Participation", "Opposition","Ground")
varorder <- varorder[varorder %in% vars]
return(x[,varorder])
}
|
1a7d1abe6deb5696d2560e21fcd7986f22101679
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.security.identity/man/cloudhsm_describe_luna_client.Rd
|
376756ea6a9f1e41d8e4fc327b85b30cecd82176
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 872
|
rd
|
cloudhsm_describe_luna_client.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudhsm_operations.R
\name{cloudhsm_describe_luna_client}
\alias{cloudhsm_describe_luna_client}
\title{This is documentation for AWS CloudHSM Classic}
\usage{
cloudhsm_describe_luna_client(ClientArn = NULL, CertificateFingerprint = NULL)
}
\arguments{
\item{ClientArn}{The ARN of the client.}
\item{CertificateFingerprint}{The certificate fingerprint.}
}
\description{
This is documentation for \strong{AWS CloudHSM Classic}. For more information, see \href{https://aws.amazon.com/cloudhsm/faqs/}{AWS CloudHSM Classic FAQs}, the AWS CloudHSM Classic User Guide, and the \href{https://docs.aws.amazon.com/cloudhsm/classic/APIReference/}{AWS CloudHSM Classic API Reference}.
See \url{https://www.paws-r-sdk.com/docs/cloudhsm_describe_luna_client/} for full documentation.
}
\keyword{internal}
|
430b622f880c1ff5860e9fa71e35ef292fdc2215
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DiceDesign/examples/coverage.Rd.R
|
67c3244e7fc7261dd50e0d7879b128f5a81ec436
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
r
|
coverage.Rd.R
|
library(DiceDesign)
### Name: coverage
### Title: Coverage
### Aliases: coverage
### Keywords: design
### ** Examples
dimension <- 2
n <- 40
X <- matrix(runif(n*dimension),n,dimension)
coverage(X)
|
00e26d7296a5ebcb27cbd8580b173efa771b2da9
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/pangoAttrScaleNew.Rd
|
ce4f40729b87b9cf0923957b8fa17dd759477f11
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 498
|
rd
|
pangoAttrScaleNew.Rd
|
\alias{pangoAttrScaleNew}
\name{pangoAttrScaleNew}
\title{pangoAttrScaleNew}
\description{Create a new font size scale attribute. The base font for the
affected text will have its size multiplied by \code{scale.factor}.}
\usage{pangoAttrScaleNew(scale.factor)}
\arguments{\item{\verb{scale.factor}}{[numeric] factor to scale the font}}
\value{[\code{\link{PangoAttribute}}] the newly allocated \code{\link{PangoAttribute}},}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
961f8910583bc36ca8b18e601e9d5e3218633da8
|
a3864f60b8dc3a3b9af5a42547c25470e38f7bf6
|
/man/plot.sr.Rd
|
870c158cfe2f0ff8c71a1f272bb94f27892d9b46
|
[] |
no_license
|
cwmiller21/mvdalab
|
c29ffb52a41a969a1c2701ccab77901afed2fbd9
|
e9f1bea960cdf7dd2d0472581f9fe97c30bd6d4f
|
refs/heads/master
| 2021-01-18T05:24:46.133643
| 2016-02-29T10:54:45
| 2016-02-29T10:54:45
| 52,802,389
| 1
| 0
| null | 2016-02-29T15:38:22
| 2016-02-29T15:38:22
| null |
UTF-8
|
R
| false
| false
| 880
|
rd
|
plot.sr.Rd
|
\name{plot.sr}
\alias{plot.sr}
\title{Plotting function for Selectivity Ratio.}
\description{This function provides the ability to plot an object of class \code{sr}
}
\usage{
\method{plot}{sr}(x, variables = "all", ...)
}
\arguments{
\item{x}{\code{sr} object }
\item{variables}{ the number of variables to include the graph output. }
\item{\dots}{ additional arguments. Currently ignored. }
}
\details{
\code{plot.sr} is used to generates the graph of the selectivity ratio from \code{sr} objects.
}
\value{
The output of \code{plot.sr} is a graph of the selectivity ratio for the specified observation(s).
}
\author{Nelson Lee Afanador (\email{nelson.afanador@mvdalab.com})}
\examples{
data(Penta)
mod1 <- plsFit(log.RAI ~., scale = TRUE, data = Penta[, -1], ncomp = 3, contr = "contr.none",
method = "bidiagpls", validation = "oob")
sr(mod1)
plot(sr(mod1))
}
|
01a7f8aead6dd2b830171a2c8d738e41615d86c4
|
573edac85effdda60291c96f568af4bcf36833a5
|
/man/summary_sema.Rd
|
ffcc474e83f0d5ae848fbcc825131b4be89331bc
|
[] |
no_license
|
L-Ippel/SEMA
|
d71835566c17df707896bcd9ef32960c71b2c43a
|
1d0e3a48c855df704cad18c7ab6bb73d08bd4efa
|
refs/heads/master
| 2021-06-03T18:26:07.776301
| 2018-08-06T11:32:00
| 2018-08-06T11:32:00
| 38,366,963
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,969
|
rd
|
summary_sema.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summary.R
\name{summary_sema}
\alias{summary_sema}
\title{Interpreting sema output}
\usage{
summary_sema(x)
}
\arguments{
\item{x}{A sema model output.}
}
\value{
A list with sample size, number of units, the coefficients of the
fixed effects, the variance of the random effects and the residual
variance.
}
\description{
Returns a list with the current model parameter estimates
}
\details{
The output of the sema_fit functions are usually large, and
difficult to read, lists. In order to interpret the output of the sema_fit
functions, \code{summary_sema} returns a general overview of the model
parameters, the fixed effects coefficients, the random effects variances
and covariances, and the residual variance. The \code{summary_sema}
function is also incorpprated within the \code{sema_fit_one} function,
such that with the argument \code{print_every} the user can see a summary
of the updated model parameters every X data points.
}
\examples{
## First we create a dataset, consisting of 2500 observations from 20
## units. The fixed effects have the coefficients 1, 2, 3, 4, and 5. The
## variance of the random effects equals 1, 4, and 9. Lastly the
## residual variance equals 4:
test_data <- build_dataset(n = 2500,
j = 20,
fixed_coef = 1:5,
random_coef_sd = 1:3,
resid_sd = 2)
## Next, we fit a simple model to these data
m1 <- sema_fit_df(formula = y ~ 1 + V3 + V4 + V5 + V6 + (1 + V4 + V5 | id),
data_frame = test_data,
intercept = TRUE)
summary_sema(m1)
}
\seealso{
\code{\link{store_fixed_coef}},
\code{\link{store_random_var}}, \code{\link{store_resid_var}},
\code{\link{ranef}}
}
\keyword{model}
\keyword{sema}
\keyword{summary}
|
8cc2ec0a9ec3550ec4e47a24623044c0d9451296
|
49d51566b41a141a0dcad5119fcf910efcdfd82d
|
/Ex.4_3.R
|
56c50d750ba4f52a016de3adbbf0c0c64a3d6174
|
[] |
no_license
|
himanshu6980/RSolvedProblems
|
460124c286f8d6814ae7f9ba762cb01dcb7e5cc4
|
d9928290279030543572005c84638bcab98c72ac
|
refs/heads/master
| 2020-03-06T18:42:59.866055
| 2018-03-27T16:10:59
| 2018-03-27T16:10:59
| 127,012,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
Ex.4_3.R
|
# Sorting dataframes
sampledata_file <- read.table("yield.txt",header=T)
attach(sampledata_file)
# sorting by row
sampledata_file[order(death),]
# rev sorting
sampledata_file[rev(order(death)),]
# we an also sort on multiple attributes.
sampledata_file[order(death,status),]
# logical conditions to select rows from the dataframe
sampledata_file[status == 1,]
sampledata_file[status == 1|status == 2 ,]
sampledata_file[!(status == 1),]
# Omitting rows containing missing values, NA
na.omit(sampledata_file)
|
f6dd48d53088a680014422add333ca4cf76099c1
|
808f1cd2932824552e8c50e9869a7a77194a510e
|
/utils.R
|
23c627d58a4812626a98ece62b4cd965ebbf9899
|
[] |
no_license
|
ph-hack/3dFR
|
9de0f5fcdc7071d0fe7ca6aa344e327ab2f60bbf
|
32af516a29a691febfe28cf519d7de2dd78a8c17
|
refs/heads/master
| 2021-01-21T12:47:34.554868
| 2016-03-25T18:53:43
| 2016-03-25T18:53:43
| 20,979,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,142
|
r
|
utils.R
|
#' Checks for the integrety of the closest samples.
#' In other words, it checks if the 'nClosest' first closest selected
#' contains at least one sample of that class.
#' @example
#' The file 'closestDir'/cl_02463d452.txt contains the selected
#' closest for the individual 452 of the class 02463.
#' This function checks if the file contains at least one sample of the
#' class 02463.
checkClosestIntegrity <- function(closestDir, nClosest, isTraining=FALSE){
closest <- dir(closestDir)
N <- length(closest)
count <- 0
for(i in 1:N){
minI <- 1
maxI <- nClosest
prefix <- "cl__"
if(isTraining){
minI <- 2
maxI <- nClosest + 1
prefix <- "cl_"
}
cl <- readLines(concatenate(c(closestDir, closest[i])))[minI:maxI]
name <- strsplit(closest[i], prefix)[[1]][2]
if(length(which(getPersonID(cl) == getPersonID(name))) == 0){
cat(name, " has failed!\n")
count <- count + 1
}
}
cat("done!", count, "has failed!\n")
}
#' Goes through the log file and counts the amount
#' of time it finds the word "found" and "missed",
#' respectively, meaning that a sample was correctly
#' classified and wasn't.
count_found <- function(logFile){
lines <- readLines(logFile)
n <- length(lines)
founds <- 0
missed <- 0
for(i in 1:n){
line <- strsplit(lines[i], "[ ]")[[1]][1]
if(line == "-"){
line <- strsplit(lines[i], "[ ]")[[1]][2]
if(line == "found")
founds <- founds + 1
else
missed <- missed + 1
}
}
(list(found=founds, missed=missed))
}
#' Returns a vector containing all the classes for a given directory
#' or vector of strings wth the file names.
#' The class information is retrieved from the file names
#' @example
#' from the file '02463d660.txt', the returned class is '02463'
getClassFromFiles <- function(directory=0, files=0){
# if the directory is given, ...
if(directory[1] != 0){
files <- dir(directory)
}
fileClasses <- getPersonID(files)
classes <- unique(fileClasses)
(list(classes=classes, fileClasses=fileClasses))
}
|
ba58674d69bd68eb17c06f30cefc6f6bacc35925
|
c36315ca4008746505182fdac3dc61b47de81a70
|
/Blatt2/Rotation.R
|
fcc9e9a5fad9811ecaf4363306f29ca1dc9fe390
|
[] |
no_license
|
alexanderlange53/DataMining_in_Bioinformatics
|
455df0badcaa075b19b33b8474516d7bd15470fa
|
4758446a3d5a625e8b83f7f1b047dc1472d47165
|
refs/heads/master
| 2021-01-20T20:36:10.677689
| 2016-08-09T21:56:36
| 2016-08-09T21:56:36
| 64,967,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 232
|
r
|
Rotation.R
|
Rotation <- function(alpha, Data){
alpha <- ((2*pi)/360)*alpha
RotMat <- matrix(c(cos(alpha), sin(alpha), -sin(alpha), cos(alpha)), ncol = 2, byrow = T)
X <- RotMat%*%Data
V <- var(X[1,])
Out <- cbind(V, X)
return(Out)
}
|
f584304023a3026e9b4b9a5d2066d09de2d82944
|
db4ab344e60cbbbd9a726dc8e84db32018dcb2e8
|
/tests/testthat/test-ncaa_lineups.R
|
c1fb26f9677bb9621b34ec76c2dbbac62fe4fb5f
|
[
"MIT"
] |
permissive
|
robert-frey/baseballr
|
8491c93a9c1fdf74c74a75c3a6e2ebb65a7862e8
|
060cd8b18c080cbb50fae444460a4c0a4db175e9
|
refs/heads/master
| 2023-03-09T17:21:52.013535
| 2023-02-24T18:06:56
| 2023-02-24T18:06:56
| 248,656,064
| 3
| 0
| null | 2020-03-20T03:04:29
| 2020-03-20T03:04:29
| null |
UTF-8
|
R
| false
| false
| 304
|
r
|
test-ncaa_lineups.R
|
cols <- c("year", "playerName", "position", "batting_order", "school")
test_that("NCAA Batting Lineups", {
skip_on_cran()
x <- ncaa_lineups(game_info_url="https://stats.ncaa.org/game/index/4587474?org_id=528",year=2018)
expect_equal(colnames(x), cols)
expect_s3_class(x, "data.frame")
})
|
76a2ea63291cdc8a187ddab5bc08ec3632f6d30c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ShinyItemAnalysis/examples/gDiscrim.Rd.R
|
87ddd1c0bd42411b6713583fcbce0d59947f6b2a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 978
|
r
|
gDiscrim.Rd.R
|
library(ShinyItemAnalysis)
### Name: gDiscrim
### Title: Generalized Item Discrimination
### Aliases: gDiscrim
### ** Examples
## Not run:
##D # loading 100-item medical admission test data sets
##D data(dataMedical, dataMedicalgraded)
##D # binary data set
##D dataBin <- dataMedical[, 1:100]
##D # ordinal data set
##D dataOrd <- dataMedicalgraded[, 1:100]
##D
##D # ULI for first 5 items for binary data set
##D # compare to psychometric::discrim(x)
##D gDiscrim(dataBin)[1:5]
##D # generalized ULI using 5 groups, compare 4th and 5th for binary data set
##D gDiscrim(dataBin, k = 5, l = 4, u = 5)[1:5]
##D
##D # ULI for first 5 items for ordinal data set
##D gDiscrim(dataOrd)[1:5]
##D # generalized ULI using 5 groups, compare 4th and 5th for binary data set
##D gDiscrim(dataOrd, k = 5, l = 4, u = 5)[1:5]
##D # maximum (4) and minimum (0) score are same for all items
##D gDiscrim(dataOrd, k = 5, l = 4, u = 5, maxscore = 4, minscore = 0)[1:5]
## End(Not run)
|
61536a2a1c934dd69769c593c92e91cbb9d5ae06
|
0160b0354204a7bb9b00adac864156b8c1ddf0a5
|
/Analysis/Kristiansson_analysis/heatmap.R
|
9115de7573a1c8dd72ac3e6626b85dfe7a8049d9
|
[] |
no_license
|
avahoffman/gene-expression
|
c51b1e176cb86a6d1e64c9f38f56a45966972de2
|
ab3103c4df9294678128d8469b0ad094eaa255fc
|
refs/heads/master
| 2021-08-22T19:01:35.119698
| 2020-03-16T21:39:17
| 2020-03-16T21:39:17
| 132,801,984
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,373
|
r
|
heatmap.R
|
library(reshape)
library(ggplot2)
df=read.csv("/Users/avahoffman/Documents/CSU/Research/RNASEQDATA/MEC_reanalysis/Kristiansson_analysis/SxE.csv")
head(df)
df=df[,c(2,17:24,27)]
colnames(df)=c("Group","Ad1","Ad2","Aw1","Aw2","Sd1","Sd2","Sw1","Sw2","tf")
labels=df[,1]
tfs=df[,10]
m1=as.matrix(df[,2:9])
m1=log2(m1+0.001)
m1 = t(scale(t(m1), scale=F))
order1 <- hclust( dist(m1, method = "euclidean"), method = "ward.D" )$order
df1=as.data.frame(m1)
df2=cbind(labels,df1,tfs) #reattach names
df2$labels=factor(df2$labels, levels = (df2$labels)[order1] )
df3=melt(df2)
#pdf(file="/Users/avahoffman/Documents/CSU/Research/RNASEQDATA/MEC_reanalysis/SxE-heatmap.pdf",height=10,width=4)
pdf(file="/Users/avahoffman/Documents/CSU/Research/Shenghua_Data_Manuscripts/RNA-seq/MER-resubmit/FIGURE_5.pdf",height=9,width=3.14961)
ggplot(df3, aes(variable, labels)) +
theme_classic() +
geom_tile(aes(fill = value), colour = "black") +
scale_fill_gradient2(low = "red", mid="black",high = "green",name="Log2 TMM \n(centered) ") +
theme(axis.text.y = element_text(size=7.7, face = df3$tfs[order1]))+
theme(axis.text.x = element_text(angle=30,hjust=1))+
ylab("Gene group") +
xlab("") +
theme(legend.position = c(0.25, -0.07), legend.direction = "horizontal") +
theme(axis.ticks.length=unit(0,"cm")) +
theme(plot.margin=unit(c(0.2,0.2,1.7,0.2), "cm"))
dev.off()
|
d4cb0bca811f0e201faa8aceb5e008cd548df513
|
a10d14465377df739497293d665c69c575c6438b
|
/Predictions_and_evaluation.R
|
f00c38eba704f078f42f8c62adcf175d397a131b
|
[] |
no_license
|
khaled-mansour/Targeted-Marketing-project
|
b628d3948fb1feb1d2686362d9c67ca7be50d1e4
|
bcdd709afe89542ab03df90633242c6c5c97869e
|
refs/heads/master
| 2021-08-31T20:29:05.323946
| 2017-12-22T19:21:06
| 2017-12-22T19:21:06
| 115,143,577
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,009
|
r
|
Predictions_and_evaluation.R
|
##################################################################################################################################
############### Predictions and evaluation #######################################################################################
##################################################################################################################################
##########################################
##### Final Predictions ##################
##########################################
if(!require("caret")) install.packages("caret"); library("caret") # load the package
if(!require("randomForest")) install.packages("randomForest"); library("randomForest") # load the package
if(!require("xgboost")) install.packages("xgboost"); library("xgboost") # load the package
if(!require("pROC")) install.packages("pROC"); library("pROC") # load the package
library(plyr); library(dplyr)
idx.train <- createDataPartition(y = train$return_customer, p = 0.6, list = FALSE)
train08 <- train[idx.train, ] # training set
test02 <- train[-idx.train, ] # testing set
# Setup the options for model selection
model.control<- trainControl(
method = "cv", # 'cv' for cross validation
number = 20, # number of folds in cross validation
classProbs = TRUE,
summaryFunction = twoClassSummary,
allowParallel = TRUE, # Enable parallelization if available
returnData = FALSE
)
xgb.parms <- expand.grid(nrounds = 4000,
max_depth = 15,
eta = 0.0001,
gamma = 10,
colsample_bytree = 0.8,
min_child_weight = 1,
subsample = 0.8)
# Train model
xgb <- train(return_customer~., data = train08,
method = "xgbTree",
tuneGrid = xgb.parms,
metric = "ROC", trControl = model.control)
# Make prediction on test02 set
xgb.pred <- predict(xgb, newdata = test02, type = "prob")[,2]
xgb4000.ROC <- roc(predictor=xgb.pred,
response=test02$return_customer,
levels=rev(levels(test02$return_customer)))
xgb4000.ROC$auc
# Make prediction on test set
yhat.xgb <- predict(xgb, newdata = test, type = "prob")[,2]
print(yhat.xgb)
FinalPredictions6_final <- data.frame(Customer_ID = test$ID,
EstimatedreturnProbability = yhat.xgb)
head(FinalPredictions6_final)
write.csv(FinalPredictions6_final,file='/Users/Barischnikow/Desktop/chloe/FinalPredictions6_final.csv')
################################################################################################################
############ Predicted Data preparation for Cost Function and AUC #################################
################################################################################################################
FinalPredictions_test02 <- data.frame(return_customer = test02$return_customer,
EstimatedreturnProbability = xgb.pred)
write.csv(FinalPredictions_test02_ver3,file='/Users/Barischnikow/Desktop/chloe/FinalPredictions_test02_ver3.csv')
####### Cost Function ##########
costs = matrix(c(0, 0, 10, -3), 2)
colnames(costs) = rownames(costs) = c('returning.customer','non.returning.customer')
FinalPredictions_test02$return_customer=as.factor(FinalPredictions_test02$return_customer)
str(FinalPredictions_test02)
th<-costs[2,1] - costs[2,2]/(costs[2,1] - costs[1,1] + costs[1,2] -costs[2,2])
predicted.outcome1 <- factor(FinalPredictions6_final$EstimatedreturnProbability > th, labels = c("non.returning.customer", "returning.customer"))
predicted.outcome1<-as.character(predicted.outcome1)
predicted.outcome1[predicted.outcome1=='returning.customer']<-1
predicted.outcome1[predicted.outcome1=='non.returning.customer']<-0
FinalPredictions1 <- data.frame(ID = class_onlyID_, return_customer=predicted.outcome1)
write.csv(FinalPredictions1,file='~/Desktop/51.csv',row.names = FALSE)
|
619e8a590288dcb593c51aae2e233bed0e354b5d
|
84e7c052fae39843d3f67be78049e175ea8c441c
|
/R_cleaned/Figure 1.R
|
29cd5325b361b18eea9ef7cb3e13f6d59755c14b
|
[] |
no_license
|
AngeVar/GLAHD
|
935b7d346dc9fb4cf8a8b552dda1400fe7100fda
|
80ff898cd15b2e670ea0ed7c31db83a69b657faf
|
refs/heads/master
| 2020-05-22T07:57:11.913885
| 2017-07-04T06:01:13
| 2017-07-04T06:01:13
| 36,908,501
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 43,880
|
r
|
Figure 1.R
|
#load some libraries
source("R_cleaned/GLAHD_LoadLibraries.R")
library(raster)
library(rgdal)
library(stringr)
library(scales)
library(oz)
library(maps)
library(mapdata)
library(RNCEP)
library(mapplots)
library(plotrix)
library(plotBy)
library(doBy)
library(dismo)
library(rgeos)
#load file
biodat <- raster::getData('worldclim', var='bio', res=2.5, path="C:/Repos/GLAHD/Data/Climate/T data")
biodat1<-subset(biodat,1) #mean annual T
#biodat5 <- subset(biodat,5)#mean max T of warmest month
#biodat6<-subset(biodat,6) #mean min T of coldest month
biodat10 <- subset(biodat,10) #mean T warmest quarter
#biodat11 <- subset(biodat,11) #mean T coldest quarter
YbrevRange <- extent(100.00, 154.00, -40, -11.0)
biodat.oz1 <- crop(biodat1,YbrevRange)
# biodat.oz5 <- crop(biodat5,YbrevRange)
# biodat.oz6 <- crop(biodat6,YbrevRange)
biodat.oz10 <- crop(biodat10,YbrevRange)
# biodat.oz11 <- crop(biodat11,YbrevRange)
seed_atsc<-read.csv("W:/WORKING_DATA/GHS39/GLAHD/Varhammar_A/newbrascoords.csv")
dist <- read.csv("C:/Repos/GLAHD/Data/ALAdata20151022Clean.csv")
#dist<- subset(dist, Country...parsed == "Australia" )#only Australian records
cam<- subset(seed_atsc, Taxa == "camaldulensis");ter<- subset(seed_atsc, Taxa == "tereticornis")
bra<- subset(seed_atsc, Taxa == "brassiana");pel<- subset(seed_atsc, Taxa == "pellita");pla<- subset(seed_atsc, Taxa == "platyphylla")
bot<- subset(seed_atsc, Taxa == "botryoides");lon<- subset(seed_atsc, Taxa == "longifolia");smi<- subset(seed_atsc, Taxa == "smithii")
coordinates(dist) <- c("Longitude...processed","Latitude...processed")
projection(dist) <- CRS('+proj=longlat')
terdist<- subset(dist, Species...matched == "Eucalyptus tereticornis"& "Longitude...processed" > 141.00)
camdist<- subset(dist, Species...matched == "Eucalyptus camaldulensis"& "Longitude...processed" > 141.00)
bradist<- subset(dist, Species...matched == "Eucalyptus brassiana"& "Longitude...processed" > 141.00)
peldist<- subset(dist, Species...matched == "Eucalyptus pellita"& "Longitude...processed" > 141.00)
pladist<- subset(dist, Species...matched == "Eucalyptus platyphylla"& "Longitude...processed" > 141.00)
botdist<- subset(dist, Species...matched == "Eucalyptus botryoides"& "Longitude...processed" > 141.00)
londist<- subset(dist, Species...matched == "Eucalyptus longifolia"& "Longitude...processed" > 141.00)
smidist<- subset(dist, Species...matched == "Eucalyptus smithii"& "Longitude...processed" > 141.00)
t<- circles(terdist, d=50000, lonlat=TRUE)
c<- circles(camdist, d=50000, lonlat=TRUE)
br<- circles(bradist, d=50000, lonlat=TRUE)
pe<- circles(peldist, d=50000, lonlat=TRUE)
pl<- circles(pladist, d=50000, lonlat=TRUE)
b<- circles(botdist, d=50000, lonlat=TRUE)
l<- circles(londist, d=50000, lonlat=TRUE)
s<- circles(smidist, d=50000, lonlat=TRUE)
tpol<- gUnaryUnion(t@polygons)
cpol<- gUnaryUnion(c@polygons)
brpol<- gUnaryUnion(br@polygons)
pepol<- gUnaryUnion(pe@polygons)
plpol<- gUnaryUnion(pl@polygons)
bpol<- gUnaryUnion(b@polygons)
lpol<- gUnaryUnion(l@polygons)
spol<- gUnaryUnion(s@polygons)
gClip <- function(shp, bb){
if(class(bb) == "matrix") b_poly <- as(extent(as.vector(t(bb))), "SpatialPolygons")
else b_poly <- as(extent(bb), "SpatialPolygons")
gIntersection(shp, b_poly, byid = T)
}
tpol2 <- gClip(tpol, YbrevRange)
cpol2 <- gClip(cpol, YbrevRange)
bpol2 <- gClip(bpol, YbrevRange)
spol2 <- gClip(spol, YbrevRange)
lpol2 <- gClip(lpol, YbrevRange)
brpol2 <- gClip(brpol, YbrevRange)
pepol2 <- gClip(pepol, YbrevRange)
plpol2 <- gClip(plpol, YbrevRange)
outline <- map("worldHires", regions="Australia", exact=TRUE, plot=FALSE) # returns a list of x/y coords
outline2<-subset(outline, YbrevRange)
xrange <- range(outline$x, na.rm=TRUE) # get bounding box
yrange <- range(outline$y, na.rm=TRUE)
xbox <- xrange + c(-2, 11)
ybox <- yrange + c(-2, 2)
subset <- !is.na(outline$x)
#######################################################
windows(9,5)
par(mfrow=c(2,5),mar=c(1.5,0,0,0),oma=c(6,6,6,4))
layout(matrix(c(1:10), nrow=2, ncol=5,byrow=T),
heights=c(1,1,1,0.01,1),
widths=c(1,1,1,0.01,1))
#windows(7.79,11.69);par(mfrow=c(2,4),mar=c(1.5,0,0,0),oma=c(6,6,6,4))
xlims<-c(130,154)
ylims<-c(-40,-11)
##############
plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F,xaxt='n',col="white")
plot(plpol2,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext("E. platyphylla",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", box=F)
box()
points(x=pla$lon,y=pla$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","a", bty='n', cex=1.2)
###################
plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F,xaxt='n',yaxt='n',col="white")
plot(pepol2,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext("E. pellita",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", xlim=xlims,ylim=ylims,box=F)
box()
points(x=pel$lon,y=pel$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","b", bty='n', cex=1.2)
###################
plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F,xaxt='n', yaxt='n',col="white", bty='n')
plot(brpol2,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext("E. brassiana",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", box=F)
box()
points(x=144.1304,y=-14.51,col="black", bg="black",cex=2,pch=21)
legend("topright","c", bty='n', cex=1.2)
#####################
plot(NA)
##############
plot(biodat.oz10/10, xlim=xlims, ylim=ylims,legend=F, xaxt='n', col="white")
plot(cpol,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext(text="E. camaldulensis",3, cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", box=F)
box()
points(x=cam$lon,y=cam$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","d", bty='n', cex=1.2)
###############
plot(biodat.oz10/10,xlim=xlims, ylim=ylims,legend=F, col="white")
plot(bpol2,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext("E. botryoides",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", box=F)
box()
points(x=bot$lon,y=bot$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","e", bty='n', cex=1.2)
#######################
plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F, yaxt='n',col="white")
plot(spol2,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext("E. smithii",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", box=F)
box()
points(x=smi$lon,y=smi$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","f", bty='n', cex=1.2)
#######################
plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F, yaxt='n',col="white")
plot(lpol2,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext("E. longiflora",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd", box=F)
box()
points(x=lon$lon,y=lon$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","g", bty='n', cex=1.2)
#######################
plot(NA)
##################
plot(biodat.oz10/10, xlim=xlims, ylim=ylims,legend=F, col="white")
plot(tpol,col=alpha("red",0.8),border=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
mtext(text="E. tereticornis",3,cex=0.8,font=3)
polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
c(outline$y[subset], NA, rep(ybox, each=2)),
col="white", rule="evenodd")
box()
points(x=ter$lon,y=ter$lat,col="black", bg="black",cex=2,pch=21)
legend("topright","h", bty='n', cex=1.2)
mtext("Latitude", side=2, outer=T, line=3)
mtext("Longitude", side=1, outer=T, line=2, at=0.45)
mtext("Narrow", side=3, outer=T, line=2, at=0.3)
mtext("Wide", side=3, outer=T, line=2, at=0.89)
text(97,y=0,labels="Tropical", xpd=NA, srt=-90, pos=2, cex=1.7)
text(97,y=-32,labels="Temperate", xpd=NA, srt=-90, pos=2, cex=1.7)
##################################
#########################################################################################################################
#extract climate for coordinates
xy <- SpatialPoints(cbind(dist$Longitude...processed,dist$Latitude...processed))
dist$bio10 <- extract(biodat.oz10/10,xy,method="bilinear",fun=mean, buffer=15000)
#dist$bio11 <- extract(biodat.oz11/10,xy,method="bilinear",fun=mean, buffer=15000)
dist$bio1 <- extract(biodat.oz1,xy,method="bilinear",fun=mean, buffer=15000)
# dist$bio5 <- extract(biodat.oz5/10,xy,method="bilinear",fun=mean, buffer=15000)
# dist$bio6 <- extract(biodat.oz6/10,xy,method="bilinear",fun=mean, buffer=15000)
# # write out a csv
# write.csv(dist,file="dist_bioclim.csv")
# dist<-read.csv("dist_bioclim.csv")
#extracting climate for selected provenances
y <- SpatialPoints(cbind(seed_atsc$lon,seed_atsc$lat))
seed_atsc$bio10 <- extract(biodat.oz10/10,y,method="bilinear", fun=mean,buffer=15000)
# seed_atsc$bio11 <- extract(biodat.oz11/10,y,method="bilinear", fun=mean,buffer=15000)
seed_atsc$bio1 <- extract(biodat.oz1/10,y,method="bilinear", fun=mean,buffer=15000)
# seed_atsc$bio5 <- extract(biodat.oz5/10,y,method="bilinear",fun=mean, buffer=15000)
# seed_atsc$bio6 <- extract(biodat.oz6/10,y,method="bilinear",fun=mean, buffer=15000)
seed_atsc$Species...matched<- as.factor(paste(seed_atsc$Genus,seed_atsc$Taxa, sep=" "))
#removing distribution data where location uncertainty is more than 5 km
dis <- as.data.frame(subset(dist,dist$Coordinate.Uncertainty.in.Metres...parsed<5000))
#summarise climatic conditions of distributions per species
dat<-summaryBy(bio10~Species...matched,data=as.data.frame(dis), FUN=c(mean,min,max),na.rm=T)
dat$Sp<- factor(dat$Species...matched, levels =
c("Eucalyptus brassiana","Eucalyptus pellita",
"Eucalyptus platyphylla","Eucalyptus longifolia",
"Eucalyptus smithii","Eucalyptus botryoides",
"Eucalyptus tereticornis","Eucalyptus camaldulensis"
))
seed_atsc$Sp<- factor(seed_atsc$Species...matched, levels =
c("Eucalyptus brassiana","Eucalyptus pellita",
"Eucalyptus platyphylla","Eucalyptus longifolia",
"Eucalyptus smithii","Eucalyptus botryoides",
"Eucalyptus tereticornis","Eucalyptus camaldulensis"
))
seed_atsc$bio10.mean<- seed_atsc$bio10
####################################################
#new plot:
windows(7.79,11.69)
plot1<-ggplot(data=dat,
aes(x=Sp,y=bio10.mean,ymax=bio10.max,
ymin=bio10.min))+ ylim(15,33)+geom_pointrange(shape="", col='red')
#plot provenances
# plot1.1<- plot1+geom_point(data = seed_atsc,
# aes(x=Sp, y=bio10.mean,ymax=bio10.mean,
# ymin=bio10.mean),
# color = 'black', size=3)
#flip and add lines
plot2<-plot1+coord_flip()+geom_hline(aes(yintercept=30.7), lty=3,size=1)+
geom_hline(aes(yintercept=27.4), lty=2,size=1)+
geom_hline(aes(yintercept=22.4), lty=3,size=1)+
geom_hline(aes(yintercept=18.9), lty=2,size=1)+xlab("")+ylab("")
plot2+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black"),
axis.text=element_text(size=12))+
ylab("Temperature of Warmest Quarter")+xlab("")+
theme(axis.title.x=element_text(margin=margin(20,0,0,0)))+
theme(plot.margin=unit(c(1,1,1,0),"cm"))
#add density plots
cam.df<- as.data.frame(cam)
camdist.df<- as.data.frame(camdist)
ter.df<- as.data.frame(ter)
terdist.df<- as.data.frame(terdist)
bra.df<- as.data.frame(bra)
bradist.df<- as.data.frame(bradist)
pel.df<- as.data.frame(pel)
peldist.df<- as.data.frame(peldist)
pla.df<- as.data.frame(pla)
pladist.df<- as.data.frame(pladist)
bot.df<- as.data.frame(bot)
botdist.df<- as.data.frame(botdist)
lon.df<- as.data.frame(lon)
londist.df<- as.data.frame(londist)
smi.df<- as.data.frame(smi)
smidist.df<- as.data.frame(smidist)
#density plot
a<-density(terdist.df$bio10, na.rm=T)
b<-density(camdist.df$bio10, na.rm=T)
c<-density(pladist.df$bio10, na.rm=T)
d<-density(bradist.df$bio10, na.rm=T)
e<-density(peldist.df$bio10, na.rm=T)
f<-density(botdist.df$bio10, na.rm=T)
g<-density(londist.df$bio10, na.rm=T)
h<-density(smidist.df$bio10, na.rm=T)
windows(7.79,11.69)
par(mfrow=c(8,1), mar=c(1,1,1,1), oma=c(6,8,4,4))
plot(a, ylim=c(0,0.3), main="", xlab="", xlim=c(15,33), ylab="E. tereticornis", xaxt="n")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
arrows(18.85448,0.3,22.29183,0.3,code=2, lwd=2, length=0.1, angle=20, xpd=TRUE)
arrows(27.45121,0.3,30.93521,0.3,code=2, lwd=2, length=0.1, angle=20)
plot(b, ylim=c(0,0.25), main="", xlab="", xlim=c(15,33), ylab="E. camaldulensis", xaxt="n")
polygon(b, col=alpha("black",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
plot(c, ylim=c(0,0.5), main="", xlab="", xlim=c(15,33), ylab="E. platyphylla", xaxt="n")
polygon(c, col=alpha("red",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
plot(d, ylim=c(0,1.2), main="", xlab="", xlim=c(15,33), ylab="E. brassiana", xaxt="n")
polygon(d, col=alpha("red",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
plot(e, ylim=c(0,0.7), main="", xlab="", xlim=c(15,33), ylab="E. pellita", xaxt="n")
polygon(e, col=alpha("red",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
plot(f, ylim=c(0,0.4), main="", xlab="", xlim=c(15,33), ylab="E. botryoides", xaxt="n")
polygon(f, col=alpha("blue",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
plot(g, ylim=c(0,1.2), main="", xlab="", xlim=c(15,33), ylab="E. longifolia", xaxt="n")
polygon(g, col=alpha("blue",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
plot(h, ylim=c(0,0.5), main="", xlab="", xlim=c(15,33), ylab="E. smithii")
polygon(h, col=alpha("blue",0.3), border= "black")
abline(v=c(18.9,27.4), lty=2)
abline(v=c(22.4,30.7), lty=3)
mtext(text="Temperature of Warmest Quarter", side=1, outer=T, line=3)
mtext(text="Temperate Tropical", side=3, outer=T, line=1)
widehist <- hist(widedist$bio10)
widemultiplier <- widehist$counts / widehist$density
widedensity <- density(widedist$bio10, na.rm=T)
widedensity$y <- widedensity$y * widemultiplier[1]
narrowNhist <- hist(narrowNdist$bio10)
narrowNmultiplier <- narrowNhist$counts / narrowNhist$density
narrowNdensity <- density(narrowNdist$bio10, na.rm=T)
narrowNdensity$y <- narrowNdensity$y * narrowNmultiplier[1]
narrowShist <- hist(narrowSdist$bio10)
narrowSmultiplier <- narrowShist$counts / narrowShist$density
narrowSdensity <- density(narrowSdist$bio10, na.rm=T)
narrowSdensity$y <- narrowSdensity$y * narrowSmultiplier[1]
windows(width=5, height=5)
par(mar=c(5,0,0,0))
plot(widehist, border="white", ylim=c(0,800), xlim=c(13,33), xlab="",
ylab="", main="",axes=F)
axis(1, at=seq(14,32,1), labels=F, line=-0.5)
axis(1, at=seq(15,32,5), line=-0.5)
#axis(2, at=seq(0,1000,200), line=-1.5)
mtext(text=expression(Mean~temperature~of~warmest~quarter~(degree*C)),side=1,outer=T,line=-3,cex=1)
polygon(widedensity, col=alpha("black",0.4), border= "black")
polygon(narrowNdensity, col=alpha("white",0.5), border= "black")
polygon(narrowSdensity, col=alpha("white",0.5), border= "black")
arrows(18.85448,30,22.29183,30,code=2, lwd=2, length=0.1, angle=20)
arrows(27.45121,30,30.93521,30,code=2, lwd=2, length=0.1, angle=20)
legend(27.5,800,legend=c("Wide","Narrow"), fill=c(alpha("black",0.4),alpha("white",0.5)),border= "black")
#pick random subsample and see if it makes the figure prettier
narrowSdist<- narrowSdist[sample(1:nrow(narrowSdist),1000,replace=F),]
narrowNdist<- narrowNdist[sample(1:nrow(narrowNdist),1000,replace=F),]
widedist<- widedist[sample(1:nrow(widedist),4000,replace=F),]
widehist <- hist(widedist$bio10)
widemultiplier <- widehist$counts / widehist$density
widedensity <- density(widedist$bio10, na.rm=T)
widedensity$y <- widedensity$y * widemultiplier[1]
narrowNhist <- hist(narrowNdist$bio10)
narrowNmultiplier <- narrowNhist$counts / narrowNhist$density
narrowNdensity <- density(narrowNdist$bio10, na.rm=T)
narrowNdensity$y <- narrowNdensity$y * narrowNmultiplier[1]
narrowShist <- hist(narrowSdist$bio10)
narrowSmultiplier <- narrowShist$counts / narrowShist$density
narrowSdensity <- density(narrowSdist$bio10, na.rm=T)
narrowSdensity$y <- narrowSdensity$y * narrowSmultiplier[1]
windows(width=6, height=6)
par(mar=c(5,3,1,0))
plot(widehist, border="white", ylim=c(0,800), xlim=c(13,33), xlab="",
ylab="", main="",axes=F,line=1)
axis(1, at=seq(14,32,1), labels=F, line=-0.5)
axis(1, at=seq(15,32,5), line=-0.5)
axis(2, at=seq(0,800,200),line=-1.5)
mtext(text=expression(Mean~temperature~of~warmest~quarter~(degree*C)),side=1,outer=T,line=-3,cex=1.3)
polygon(widedensity, col=alpha("white",0.4), border= "black", lwd=2)
polygon(narrowNdensity, col=alpha("red",0.5), border= "black")
polygon(narrowSdensity, col=alpha("dodgerblue",0.5), border= "black")
mtext("Frequency", side=2, line=1.3, cex=1.5)
arrows(18.85448,30,22.29183,30,code=2, lwd=2.5, length=0.15, angle=20)
arrows(27.45121,30,30.93521,30,code=2, lwd=2.5, length=0.15, angle=20)
legend("topright",legend=c("2 Wide","3 Temperate Narrow", "3 Tropical Narrow"),
fill=c(alpha("white",0.4),alpha("dodgerblue",0.5),alpha("red",0.5)),
border= "black", bty="n", cex=1.2)
# ####################################################
# #subset data per species
# cam<- subset(seed_atsc, Taxa == "camaldulensis")
# camdist <- subset(dis, Species...matched == "Eucalyptus camaldulensis")
# ter<- subset(seed_atsc, Taxa == "tereticornis")
# terdist <- subset(dis, Species...matched == "Eucalyptus tereticornis")
#
# bra<- subset(seed_atsc, Taxa == "brassiana")
# bradist <- subset(dis, Species...matched == "Eucalyptus brassiana")
# pel<- subset(seed_atsc, Taxa == "pellita")
# peldist <- subset(dis, Species...matched == "Eucalyptus pellita")
# pla<- subset(seed_atsc, Taxa == "platyphylla")
# pladist <- subset(dis, Species...matched == "Eucalyptus platyphylla")
#
# bot<- subset(seed_atsc, Taxa == "botryoides")
# botdist <- subset(dis, Species...matched == "Eucalyptus botryoides")
# lon<- subset(seed_atsc, Taxa == "longifolia")
# londist <- subset(dis, Species...matched == "Eucalyptus longifolia")
# smi<- subset(seed_atsc, Taxa == "smithii")
# smidist <- subset(dis, Species...matched == "Eucalyptus smithii")
#
# #windows(7.79,11.69);par(mfrow=c(4,2),mar=c(0,0,0,0),oma=c(6,6,6,4))
# windows(7.79,11.69);par(mfrow=c(4,2),mar=c(1.5,0,0,1.5),oma=c(6,6,6,4))
# ylims=c(12,38)
# xlims=c(0,28)
# #plot species on climate space
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus tereticornis"),
# col=alpha("red",0.1), xlim=c(-5,25), ylim=ylims, xaxt="n",xaxs="i",
# pch= 16, cex=1)
# points(bio10~bio11, data=ter, col="black", pch=16)
# abline(h=c(22.4 ,30.7), lty =3)
# abline(h=c(18.9 ,27.4), lty =2)
# mtext(text="E. tereticornis",3,cex=0.8,font=3)
# axis(side = 1, labels=F, tcl=0.5 )
# #magaxis(side=c(1,2,4),labels=c(0,1,0),frame.plot=T,las=1,cex.axis=1.2)
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus camaldulensis"),
# pch=16, cex=1, ylim=ylims,xlim=xlims,col=alpha("red",0.1), xaxt="n", yaxt="n",xaxs="i")
# points(bio10~bio11, data=cam, col="black", pch=16)
# abline(h=c(22.4 ,30.7), lty =3)
# abline(h=c(18.9 ,27.4), lty =2)
# axis(side=4)
# mtext(text="E. camaldulensis",3,cex=0.8,font=3)
# #magaxis(side=c(1,2,4),labels=c(0,0,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus botryoides"), ylim=ylims,
# pch=16, cex=1,xlim=xlims,col=alpha("red",0.1), xaxt="n",xaxs="i")
# points(bio10~bio11, data=bot, col="black", pch=16)
# abline(h=22.4 , lty =3)
# abline(h=c(18.9), lty =2)
# mtext(text="E. botryoides",3,cex=0.8,font=3)
# #magaxis(side=c(1,2,4),labels=c(0,1,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus platyphylla"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, xaxt="n", yaxt="n",xaxs="i")
# points(bio10~bio11, data=pla, col="black", pch=16)
# abline(h=30.7, lty =3)
# abline(h=c(27.4), lty =2)
# mtext(text="E. platyphylla",3,cex=0.8,font=3)
# axis(side=4)
# #magaxis(side=c(1,2,4),labels=c(0,0,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus smithii"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1),pch=16, cex=1, xaxt="n",xaxs="i")
# points(bio10~bio11, data=smi, col="black", pch=16)
# abline(h=22.4 , lty =3)
# abline(h=c(18.9), lty =2)
# mtext(text="E. smithii",3,cex=0.8,font=3)
# #magaxis(side=c(1,2,4),labels=c(0,1,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus pellita"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, xaxt="n", yaxt="n",xaxs="i")
# points(bio10~bio11, data=pel, col="black", pch=16)
# abline(h=30.7, lty =3)
# abline(h=c(27.4), lty =2)
# mtext(text="E. pellita",3,cex=0.8,font=3)
# axis(side=4)
# #magaxis(side=c(1,2,4),labels=c(0,0,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus longifolia"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, xaxt='n',xaxs="i")
# points(bio10~bio11, data=lon, col="black", pch=16)
# abline(h=22.4 , lty =3)
# abline(h=c(18.9), lty =2)
# mtext(text="E. longifolia",3,cex=0.8,font=3)
# #magaxis(side=c(1,2,4),labels=c(1,1,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=T, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus brassiana"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, yaxt="n", xaxt='n',xaxs="i")
# points(bio10~bio11, data=bra, col="black", pch=16)
# abline(h=30.7, lty =3)
# abline(h=c(27.4), lty =2)
# mtext(text="E. brassiana",3,cex=0.8,font=3)
# axis(side=4)
# axis(side = 1, labels=T, tcl=0.5 )
# #magaxis(side=c(1,2,4),labels=c(1,0,0),frame.plot=T,las=1,cex.axis=1.2)
#
# mtext(text=expression(Temperature~of~warmest~Quarter~(degree*C)),side=2, outer=T, line=3)
# mtext(text=expression(Temperature~of~coldest~Quarter~(degree*C)),side=1, outer=T, line=2, at=0.45)
#
########################################################################################################
#with black outline on distributions
# #Combine the two
# #left, right bottom top
# split.screen(rbind(c(0.1,0.292,0.1, 0.98), c(0.312, 0.95, 0.1, 0.98)))
# screen(1)
# windows(11.69,11.69);par(mfrow=c(4,5),mar=c(1.5,0,0,0),oma=c(6,6,6,4))
#
#
# xlims<-c(130,154)
# ylims<-c(-40,-11)
# plot(biodat.oz10/10, xlim=xlims, ylim=ylims,legend=F, xaxt='n',
# col="white")
# plot(tpol,col=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
# mtext(text="E. tereticornis",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd")
# box()
# points(x=ter$lon,y=ter$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","a", bty='n', cex=1.2)
#
# plot(biodat.oz10/10, xlim=xlims, ylim=ylims,legend=F, yaxt='n',xaxt='n',col="white")
# plot(cpol,col=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
# mtext(text="E. camaldulensis",3, cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=cam$lon,y=cam$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","b", bty='n', cex=1.2)
#
# plot(1, type="n", axes=F, xlab="", ylab="")
#
# ylims=c(12,38)
# xlims=c(0,28)
# #plot species on climate space
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus tereticornis"),
# col=alpha("red",0.1), xlim=c(-5,25), ylim=ylims, xaxt="n",
# pch= 16, cex=1)
# points(bio10~bio11, data=ter, col="black", pch=16)
# abline(h=c(22.4 ,30.7), lty =3)
# abline(h=c(18.9 ,27.4), lty =2)
# axis(side = 1, labels=F, tcl=0.5 )
# mtext(text="E. tereticornis",3,cex=0.8,font=3)
# #magaxis(side=c(1,2,4),labels=c(0,1,0),frame.plot=T,las=1,cex.axis=1.2)
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus camaldulensis"),
# pch=16, cex=1, ylim=ylims,xlim=xlims,col=alpha("red",0.1), yaxt='n',xaxt="n")
# points(bio10~bio11, data=cam, col="black", pch=16)
# abline(h=c(22.4 ,30.7), lty =3)
# abline(h=c(18.9 ,27.4), lty =2)
# axis(side=2)
# #legend("topleft", legend="E. camaldulensis", cex=1.3, bty="n", text.font=3)
# #magaxis(side=c(1,2,4),labels=c(0,0,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
# mtext(text="E. camaldulensis",3, cex=0.8,font=3)
#
#
# xlims<-c(130,154)
# ylims<-c(-40,-11)
# plot(biodat.oz10/10,xlim=xlims, ylim=ylims,legend=F, xaxt='n',col="white")
# plot(bpol2,col=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
# mtext("E. botryoides",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=bot$lon,y=bot$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","c", bty='n', cex=1.2)
#
# plot(biodat.oz10/10,xlim=xlims, ylim=ylims,legend=F, xaxt='n', yaxt='n',col="white")
# plot(plpol2,col="red",xlim=xlims, ylim=ylims, add=T)
# mtext("E. platyphylla",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=pla$lon,y=pla$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","d", bty='n', cex=1.2)
#
# ylims=c(12,38)
# xlims=c(0,28)
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus botryoides"), ylim=ylims,
# pch=16, cex=1,xlim=xlims,col=alpha("red",0.1), xaxt="n")
# points(bio10~bio11, data=bot, col="black", pch=16)
# abline(h=22.4 , lty =3)
# abline(h=c(18.9), lty =2)
# #legend("topleft", legend="E. botryoides", cex=1.3, bty="n", text.font=3)
# #magaxis(side=c(1,2,4),labels=c(0,1,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
# mtext("E. botryoides",3,cex=0.8,font=3)
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus platyphylla"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, xaxt="n", yaxt="n")
# points(bio10~bio11, data=pla, col="black", pch=16)
# abline(h=30.7, lty =3)
# abline(h=c(27.4), lty =2)
# mtext("E. platyphylla",3,cex=0.8,font=3)
# axis(side=4)
# #magaxis(side=c(1,2,4),labels=c(0,0,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# xlims<-c(130,154)
# ylims<-c(-40,-11)
# plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F, xaxt='n',col="white")
# plot(spol2,col=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
# mtext("E. smithii",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=smi$lon,y=smi$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","e", bty='n', cex=1.2)
#
# plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F, xaxt='n',yaxt='n',col="white")
# plot(pepol2,col=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
# mtext("E. pellita",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=pel$lon,y=pel$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","f", bty='n', cex=1.2)
#
# ylims=c(12,38)
# xlims=c(0,28)
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus smithii"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1),pch=16, cex=1, xaxt="n")
# points(bio10~bio11, data=smi, col="black", pch=16)
# abline(h=22.4 , lty =3)
# abline(h=c(18.9), lty =2)
# mtext("E. smithii",3,cex=0.8,font=33)
# #magaxis(side=c(1,2,4),labels=c(0,1,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus pellita"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, xaxt="n", yaxt="n")
# points(bio10~bio11, data=pel, col="black", pch=16)
# abline(h=30.7, lty =3)
# abline(h=c(27.4), lty =2)
# mtext("E. pellita",3,cex=0.8,font=3)
# axis(side=4)
# #magaxis(side=c(1,2,4),labels=c(0,0,0),frame.plot=T,las=1,cex.axis=1.2)
# axis(side = 1, labels=F, tcl=0.5 )
#
# xlims<-c(130,154)
# ylims<-c(-40,-11)
# plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F, col="white")
# plot(lpol2,col=alpha("red",0.8)),xlim=xlims, ylim=ylims, add=T)
# mtext("E. longiflora",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=lon$lon,y=lon$lat,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","g", bty='n', cex=1.2)
#
# plot(biodat.oz10/10,xlim=xlims,ylim=ylims,legend=F, yaxt='n',col="white", bty='n')
# plot(brpol2,col=alpha("red",0.8),xlim=xlims, ylim=ylims, add=T)
# mtext("E. brassiana",3,cex=0.8,font=3)
# polypath(c(outline$x[subset], NA, c(xbox, rev(xbox))),
# c(outline$y[subset], NA, rep(ybox, each=2)),
# col="white", rule="evenodd", box=F)
# box()
# points(x=144.1304,y=-14.51,col="black", bg="black",cex=1.5,pch=21)
# legend("topright","h", bty='n', cex=1.2)
#
# ylims=c(12,38)
# xlims=c(0,28)
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus longifolia"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, xaxt='n')
# points(bio10~bio11, data=lon, col="black", pch=16)
# abline(h=22.4 , lty =3)
# abline(h=c(18.9), lty =2)
# #legend("topleft", legend="E. longifolia", cex=1.3, bty="n", text.font=3)
# #magaxis(side=c(1,2,4),labels=c(1,1,0),frame.plot=T,las=1,cex.axis=1.2)
# mtext("E. longiflora",3,cex=0.8,font=3)
#
# plot(bio10~bio11, data=subset(dis,dis$Species...matched == "Eucalyptus brassiana"), ylim=ylims,
# xlim=xlims,col=alpha("red",0.1), pch=16, cex=1, yaxt="n", xaxt='n')
# points(bio10~bio11, data=bra, col="black", pch=16)
# abline(h=30.7, lty =3)
# abline(h=c(27.4), lty =2)
# mtext("E. brassiana",3,cex=0.8,font=3)
# axis(side=4)
# axis(side = 1, labels=T, tcl=0.5 )
cam.df<- as.data.frame(cam)
camdist.df<- as.data.frame(camdist)
ter.df<- as.data.frame(ter)
terdist.df<- as.data.frame(terdist)
bra.df<- as.data.frame(bra)
bradist.df<- as.data.frame(bradist)
pel.df<- as.data.frame(pel)
peldist.df<- as.data.frame(peldist)
pla.df<- as.data.frame(pla)
pladist.df<- as.data.frame(pladist)
bot.df<- as.data.frame(bot)
botdist.df<- as.data.frame(botdist)
lon.df<- as.data.frame(lon)
londist.df<- as.data.frame(londist)
smi.df<- as.data.frame(smi)
smidist.df<- as.data.frame(smidist)
wide<-rbind(cam.df,ter.df)
widedist<-rbind(camdist.df,terdist.df)
narrowS<-rbind(bot.df,lon.df,smi.df)
narrowN<-rbind(bra.df, pel.df, pla.df)
narrowSdist<-rbind(botdist.df,londist.df,smidist.df)
narrowNdist<-rbind(bradist.df, peldist.df, pladist.df)
#density plot
a<-density(terdist.df$bio10, na.rm=T)
b<-density(camdist.df$bio10, na.rm=T)
c<-density(pladist.df$bio10, na.rm=T)
d<-density(bradist.df$bio10, na.rm=T)
e<-density(peldist.df$bio10, na.rm=T)
f<-density(botdist.df$bio10, na.rm=T)
g<-density(londist.df$bio10, na.rm=T)
h<-density(smidist.df$bio10, na.rm=T)
i<-density(widedist$bio10, na.rm=T)
j<-density(narrowNdist$bio10, na.rm=T)
k<-density(narrowSdist$bio10, na.rm=T)
plot(i, ylim=c(0,0.6))
polygon(i, col=alpha("black",0.3), border= "black")
polygon(j, col=alpha("white",0.3), border= "black")
polygon(k, col=alpha("white",0.3), border= "black")
widehist <- hist(widedist$bio10)
widemultiplier <- widehist$counts / widehist$density
widedensity <- density(widedist$bio10, na.rm=T)
widedensity$y <- widedensity$y * widemultiplier[1]
narrowNhist <- hist(narrowNdist$bio10)
narrowNmultiplier <- narrowNhist$counts / narrowNhist$density
narrowNdensity <- density(narrowNdist$bio10, na.rm=T)
narrowNdensity$y <- narrowNdensity$y * narrowNmultiplier[1]
narrowShist <- hist(narrowSdist$bio10)
narrowSmultiplier <- narrowShist$counts / narrowShist$density
narrowSdensity <- density(narrowSdist$bio10, na.rm=T)
narrowSdensity$y <- narrowSdensity$y * narrowSmultiplier[1]
windows(width=5, height=5)
par(mar=c(5,0,0,0))
plot(widehist, border="white", ylim=c(0,800), xlim=c(13,33), xlab="",
ylab="", main="",axes=F)
axis(1, at=seq(14,32,1), labels=F, line=-0.5)
axis(1, at=seq(15,32,5), line=-0.5)
#axis(2, at=seq(0,1000,200), line=-1.5)
mtext(text=expression(Mean~temperature~of~warmest~quarter~(degree*C)),side=1,outer=T,line=-3,cex=1)
polygon(widedensity, col=alpha("black",0.4), border= "black")
polygon(narrowNdensity, col=alpha("white",0.5), border= "black")
polygon(narrowSdensity, col=alpha("white",0.5), border= "black")
arrows(18.85448,30,22.29183,30,code=2, lwd=2, length=0.1, angle=20)
arrows(27.45121,30,30.93521,30,code=2, lwd=2, length=0.1, angle=20)
legend(27.5,800,legend=c("Wide","Narrow"), fill=c(alpha("black",0.4),alpha("white",0.5)),border= "black")
#pick random subsample and see if it makes the figure prettier
narrowSdist<- narrowSdist[sample(1:nrow(narrowSdist),1000,replace=F),]
narrowNdist<- narrowNdist[sample(1:nrow(narrowNdist),1000,replace=F),]
widedist<- widedist[sample(1:nrow(widedist),4000,replace=F),]
widehist <- hist(widedist$bio10)
widemultiplier <- widehist$counts / widehist$density
widedensity <- density(widedist$bio10, na.rm=T)
widedensity$y <- widedensity$y * widemultiplier[1]
narrowNhist <- hist(narrowNdist$bio10)
narrowNmultiplier <- narrowNhist$counts / narrowNhist$density
narrowNdensity <- density(narrowNdist$bio10, na.rm=T)
narrowNdensity$y <- narrowNdensity$y * narrowNmultiplier[1]
narrowShist <- hist(narrowSdist$bio10)
narrowSmultiplier <- narrowShist$counts / narrowShist$density
narrowSdensity <- density(narrowSdist$bio10, na.rm=T)
narrowSdensity$y <- narrowSdensity$y * narrowSmultiplier[1]
windows(width=6, height=6)
par(mar=c(5,3,1,0))
plot(widehist, border="white", ylim=c(0,800), xlim=c(13,33), xlab="",
ylab="", main="",axes=F,line=1)
axis(1, at=seq(14,32,1), labels=F, line=-0.5)
axis(1, at=seq(15,32,5), line=-0.5)
axis(2, at=seq(0,800,200),line=-1.5)
mtext(text=expression(Mean~temperature~of~warmest~quarter~(degree*C)),side=1,outer=T,line=-3,cex=1.3)
polygon(widedensity, col=alpha("white",0.4), border= "black", lwd=2)
polygon(narrowNdensity, col=alpha("red",0.5), border= "black")
polygon(narrowSdensity, col=alpha("dodgerblue",0.5), border= "black")
mtext("Frequency", side=2, line=1.3, cex=1.5)
arrows(18.85448,30,22.29183,30,code=2, lwd=2.5, length=0.15, angle=20)
arrows(27.45121,30,30.93521,30,code=2, lwd=2.5, length=0.15, angle=20)
legend("topright",legend=c("2 Wide","3 Temperate Narrow", "3 Tropical Narrow"),
fill=c(alpha("white",0.4),alpha("dodgerblue",0.5),alpha("red",0.5)),
border= "black", bty="n", cex=1.2)
windows(width=5, height=5)
par(mar=c(5,3,1,0))
plot(narrowNdensity, border="white", ylim=c(0,800), xlim=c(13,33), xlab="",
ylab="", main="",axes=F,line=1)
axis(1, at=seq(14,32,1), labels=F, line=-0.5)
axis(1, at=seq(15,32,5), line=-0.5)
axis(2, at=seq(0,800,200),line=-1.5)
mtext(text=expression(Mean~temperature~of~warmest~quarter~(degree*C)),side=1,outer=T,line=-3,cex=1)
widehist <- hist(widedist$bio1)
widemultiplier <- widehist$counts / widehist$density
widedensity <- density(widedist$bio1, na.rm=T)
widedensity$y <- widedensity$y * widemultiplier[1]
narrowNhist <- hist(narrowNdist$bio1)
narrowNmultiplier <- narrowNhist$counts / narrowNhist$density
narrowNdensity <- density(narrowNdist$bio1, na.rm=T)
narrowNdensity$y <- narrowNdensity$y * narrowNmultiplier[1]
narrowShist <- hist(narrowSdist$bio1)
narrowSmultiplier <- narrowShist$counts / narrowShist$density
narrowSdensity <- density(narrowSdist$bio1, na.rm=T)
narrowSdensity$y <- narrowSdensity$y * narrowSmultiplier[1]
windows(width=5, height=5)
par(mar=c(5,3,1,0))
plot(widehist, border="white", ylim=c(0,800), xlim=c(5,30), xlab="",
ylab="", main="",axes=F,line=1)
axis(1, at=seq(5,30,1), labels=F, line=-0.5)
axis(1, at=seq(5,30,5), line=-0.5)
axis(2, at=seq(0,800,200),line=-1.5)
mtext(text=expression(Mean~annual~temperature~(degree*C)),side=1,outer=T,line=-3,cex=1)
polygon(widedensity, col=alpha("white",0.4), border= "black")
polygon(narrowNdensity, col=alpha("red",0.5), border= "black")
polygon(narrowSdensity, col=alpha("dodgerblue",0.5), border= "black")
mtext("Frequency of occurrance", side=2, line=1.5)
arrows(18.85448,30,22.29183,30,code=2, lwd=2.5, length=0.15, angle=20)
arrows(27.45121,30,30.93521,30,code=2, lwd=2.5, length=0.15, angle=20)
legend(24,700,legend=c("Wide","Narrow"), fill=c(alpha("black",0.4),alpha("white",0.5)),border= "black", bty="n", cex=1.5)
bothist <- hist(botdist$bio1)
botmultiplier <- bothist$counts / bothist$density
botdensity <- density(botdist$bio1, na.rm=T)
botdensity$y <- botdensity$y * botmultiplier[1]
botdist$bio1future<- botdist$bio1 +3.5
bothist2 <- hist(botdist$bio1future)
botmultiplier2 <- bothist2$counts / bothist2$density
botdensity2 <- density(botdist$bio1future, na.rm=T)
botdensity2$y <- botdensity2$y * botmultiplier2[1]
windows(width=5, height=5)
par(mar=c(5,3,2,1))
plot(bothist, border="white", ylim=c(0,1000), xlim=c(10,25), xlab="",
ylab="", main="",axes=F,line=1)
#polygon(botdensity2, col=alpha("red",0.5), border= "black")
axis(1, at=seq(10,25,1), labels=F, line=-0.5)
axis(1, at=seq(10,25,5), line=-0.5)
axis(2, at=seq(0,1000,500),line=-1.5)
#arrows(15.5,250,19,250,code=2, lwd=2.5, length=0.15, angle=20)
mtext(text=expression(Mean~Annual~Temperature~(degree*C)),side=1,outer=T,line=-3,cex=1.2)
#mtext(text=expression(italic("E. botryoides")), side=3, line=-3,adj=0.2,cex=1)
polygon(botdensity, col=alpha("white",0.4), border= "black")
mtext("Frequency of occurrence", side=2, line=1.5, cex=1.2)
windows(3,5)
par(mfrow=c(1,1), oma=c(1,1,1,0))
plot(biodat.oz1/10,main="",cex.main=1.5 ,font.main=3,xlim=c(141,155), ylim=c(-44,-11),legend=F, col= "grey")
points(x=botdist$Longitude...processed,y=botdist$Latitude...processed,col=alpha("red",0.3), bg=alpha("red",0.3),cex=1,pch=21)
mtext("Latitude", side=2, outer=F, line=2.5)
mtext("Longitude", side=1, outer=F, line=2.5)
# #_______________________________
# #Different way to plot data on a map
# library(maps)
# library(mapdata)
#
# etdist<- subset(dist, Species...matched == "Eucalyptus tereticornis" & Longitude...processed > 141.00 & Latitude...processed < -11)
# ecdist<- subset(dist, Species...matched == "Eucalyptus camaldulensis" & Longitude...processed > 141.00& Latitude...processed < -11)
# botdist<- subset(dist, Species...matched == "Eucalyptus botryoides" & Longitude...processed > 144.00& Latitude...processed < -11)
# smdist<- subset(dist, Species...matched == "Eucalyptus smithii" & Longitude...processed > 141.00& Latitude...processed < -11)
#
# #one remarkable outlier record of botryoides - remove
# botdist<- subset(botdist,Latitude...processed < -25)
#
# coordinates(etdist) <- c("Longitude...processed","Latitude...processed")
# coordinates(ecdist) <- c("Longitude...processed","Latitude...processed")
# coordinates(botdist) <- c("Longitude...processed","Latitude...processed")
# coordinates(smdist) <- c("Longitude...processed","Latitude...processed")
#
# projection(etdist) <- CRS('+proj=longlat')
# projection(ecdist) <- CRS('+proj=longlat')
# projection(botdist) <- CRS('+proj=longlat')
# projection(smdist) <- CRS('+proj=longlat')
#
# et <- circles(etdist, d=50000, lonlat=TRUE)
# ec <- circles(ecdist, d=50000, lonlat=TRUE)
# bt <- circles(botdist, d=50000, lonlat=TRUE)
# sm<- circles(smdist, d=50000, lonlat=TRUE)
#
# etpol <- gUnaryUnion(et@polygons)
# ecpol <- gUnaryUnion(ec@polygons)
# botpol <- gUnaryUnion(bt@polygons)
# smpol <- gUnaryUnion(sm@polygons)
#
# windows(10,10);par(mfrow=c(1,2), mar=c(2,0,2,0),oma=c(2,5,2,2))
# map("worldHires","Australia",xlim=c(141.00, 154.00), col="gray90", fill=T)
# plot(ecpol,add=T,col=alpha("#E0B55A", 0.6), border=F)
# plot(etpol,add=T,col=alpha("#595BDE",0.6), border=F)
#
# points(x=subset(cam, Code=="ACAM")$lon,y=subset(cam, Code=="ACAM")$lat,
# col="black", bg=alpha("#E0B55A",0.6),cex=2,pch=21,lwd=2)
# points(x=subset(ter, Code=="BTER")$lon,y=subset(ter, Code=="BTER")$lat,
# col="black", bg=alpha("#595BDE",0.6),cex=2,pch=21,lwd=2)
# box()
# map("worldHires","Australia",xlim=c(141.00, 154.00), col="gray90", fill=T)
#
# plot(botpol,add=T,col=alpha("#C74444",0.6), border=F)
# plot(smpol,add=T,col=alpha("#8541B5",0.6), border=F)
# points(x=smi$lon,y=smi$lat,col="black", bg=alpha("#C74444",0.6),cex=2,pch=21,lwd=2)
# points(x=bot$lon,y=bot$lat,col="black", bg=alpha("#8541B5",0.6),cex=2,pch=21,lwd=2)
# box()
# mtext("Latitude", side=2, outer=T, line=2.5)
# mtext("Longitude", side=1, outer=T, line=1)
# legend(0,2, c("E. camaldulensis","E.tereticornis"),
# col=c(alpha("#E0B55A",0.6),alpha("#595BDE",0.6)))
#
#
#
#
# require(spatialEco)
# require(sp)
# data(meuse)
# coordinates(meuse) = ~x+y
# sr1=Polygons(list(Polygon(cbind(c(180114, 180553, 181127, 181477, 181294, 181007, 180409,
# 180162, 180114), c(332349, 332057, 332342, 333250, 333558, 333676,
# 332618, 332413, 332349)))),'1')
# sr2=Polygons(list(Polygon(cbind(c(180042, 180545, 180553, 180314, 179955, 179142, 179437,
# 179524, 179979, 180042), c(332373, 332026, 331426, 330889, 330683,
# 331133, 331623, 332152, 332357, 332373)))),'2')
# sr3=Polygons(list(Polygon(cbind(c(179110, 179907, 180433, 180712, 180752, 180329, 179875,
# 179668, 179572, 179269, 178879, 178600, 178544, 179046, 179110),
# c(331086, 330620, 330494, 330265, 330075, 330233, 330336, 330004,
# 329783, 329665, 329720, 329933, 330478, 331062, 331086)))),'3')
# sr4=Polygons(list(Polygon(cbind(c(180304, 180403,179632,179420,180304),
# c(332791, 333204, 333635, 333058, 332791)))),'4')
# sr=SpatialPolygons(list(sr1,sr2,sr3,sr4))
# srdf=SpatialPolygonsDataFrame(sr, data.frame(row.names=c('1','2','3','4'), PIDS=1:4, y=runif(4)))
# head(srdf@data) # polygons
# head(meuse@data) # points
# plot(srdf)
# points(meuse, pch=20)
# pts.poly <- point.in.poly(meuse, srdf)
# head(pts.poly@data)
# srdf@data$poly.ids <- 1:nrow(srdf)
# # Number of points in each polygon
# tapply(pts.poly@data$lead, pts.poly@data$PIDS, FUN=length)
# # Mean lead in each polygon
# tapply(pts.poly@data$lead, pts.poly@data$PIDS, FUN=mean)
#
# m1<-over(srdf,meuse)
|
c757d2b764b17a54115c9b2d2a1929d971e06015
|
8ea8dd82beb390c5ae59d32acaf854067e2f310a
|
/test/workerHotspotsFuncs.R
|
5e806d3feb81721c31573954cca520bf58f9ba7f
|
[
"MIT"
] |
permissive
|
hongooi73/AzureDSVM
|
91d9f69e8ad30f8d589f49f734422a5d8496e319
|
3553b5581dd640513a37101bb71a8170498f1809
|
refs/heads/master
| 2021-07-06T12:24:35.772109
| 2017-10-02T17:00:31
| 2017-10-02T17:00:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
workerHotspotsFuncs.R
|
# do preparation.
# -------------------------------------------------------------------------
# Split data into training and testing sets.
# -------------------------------------------------------------------------
dataSplit <- function(data, ratio) {
data_part <- c(train=ratio, test= 1 - ratio)
data_split <-
rxSplit(data,
outFilesBase=tempfile(),
splitByFactor="splitVar",
transforms=list(splitVar=
sample(data_factor,
size=.rxNumRows,
replace=TRUE,
prob=data_part)),
transformObjects=
list(data_part=data_part,
data_factor=factor(names(data_part), levels=names(data_part))))
data_split
}
|
37b7843b7c4e7ab10bfb84f5b0c9f977316664b8
|
1c2152f3c88d4e2059729ceaaf72b88f930c4306
|
/615 midterm project.R
|
b12b4d35499d4b2ccd6dc4304f5ea1bc2478e41d
|
[] |
no_license
|
JingningYang/MA-615
|
6c73f15d96df3c5a02081a8fe064636e92109cfc
|
0053a889fbe55ac4fae99725ad7d533f3678a1fd
|
refs/heads/master
| 2020-07-28T12:14:16.279159
| 2020-06-13T19:06:56
| 2020-06-13T19:06:56
| 209,407,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,595
|
r
|
615 midterm project.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(psych)
library(shiny)
library(gplots)
library(ggplot2)
library(RColorBrewer)
library(ggfortify)
library(readxl)
library(cluster)
library(DT)
library(fpc)
# Define UI for application that draws a histogram
ui <- fluidPage(
headerPanel("World value dataset exploration"),
sidebarLayout(
sidebarPanel(
h5("Since my goal is reduce number of variables(selected survey questions) in my data by
extracting important one from the data, thus, I will using PCA to do factor extraction for this dataset."),
checkboxInput(inputId = "parallel",label="Parallel Scree plot"),
checkboxInput(inputId = "regular", label = "Regular Scree plot"),
sliderInput("factor", "Number of Factors we can choose based on the Scree plot", value = 0, min = 0, max = 11),
helpText("After we decided the number of factors used in PCA, we can check the output of PCA
and compare them with each other for the best choice that we can explain it well.
Now, try to move the sidebar and choose the button below and see how plots shows different"),
hr(),
h4("For fun, we can plot PCA in many different ways to understand our data:"),
checkboxInput(inputId = "interesting", label = "Interesting plot"),
checkboxInput(inputId = "int", label = "Interesting Cluster Plot"),
helpText("Please slide down the page and maybe wait a second for the appearance of plots~")
),
mainPanel(
tabsetPanel(
tabPanel("Raw data",DT::dataTableOutput("Raw")),
tabPanel("Matrix table",
tags$h4("Matrix table that show correlation between selected survey questions after
we delete non-information questions, select survey questions and numeric answers of our selected questions."),
tableOutput("table")),
tabPanel("Scree Plot", plotOutput("screeplot")),
tabPanel("Summary",
tags$h4("For easier to explain the output of factor extraction, we can using
orthogonal rotation to decreasing noice for factors as much as possible."),
tags$h5("Here is the output of orthogonal rotationed PCA, we can try understand
the correlation between survey questions and components numberically"),
verbatimTextOutput("summary")),
tabPanel("PCA result", plotOutput("result"), tags$h4("This is a graph drarws the relationship
between selected questions and components well
graphically and easier to understand than understand
it based on numbers."),
tags$h4("From the result of PCA, we have general idea about how can we explain data from
using PCA, and understand undergoing realtionship between those varibles")),
tabPanel("Interesting plots", tags$h3("Click plots button down the left side"), plotOutput("intertest"), plotOutput("intt")),
tabPanel("Conclusion", tags$h3("According to the results and the questionnaires, We can find the questions that load
highly on factor 1 are V4(Important in life:Family) with the highest loading of 0.71, and lowest loading of 0.4 is V10(Feeling of happiness). Factor 2
are mianly explained by V64(Most important:first choice) and V60(Aim of country:first choice) with loading of 0.76 and 0.75. Factor 3 are mainly explained
by V11(state of health) with 0.86 and the loweest loading of 0.64 is V10(Feeling of happiness)."),
tags$h3("Based on the obsersation, we can summarize the factor 1 as hapiness people consider a lot are important in life and label factor 2 as the expectation from people to country, factor 3 as causes of healthy people."))
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
#Read data and numeric selected columns:
data <- read_excel("F00007693-WV6_Data_United_States_2011_Excel_v20180912.xlsx")
newdata <- data[vapply(data, function(x) length(unique(x)) > 1, logical(1L))]
x <- c(1:8,57,59,61)
newdata[x] <- apply(newdata[x],2,function(x) as.numeric(as.factor(x)))
pc2 <- reactive({
principal(newdata[x], nfactors = input$factor, rotate="none")
})
pc3 <- reactive({
principal(newdata[x], nfactors = input$factor, rotate = "varimax")
})
#create output object, and name it so it corresponds to the ui output function ID:
#create the raw data frame:
output$Raw <- DT::renderDataTable({
data
})
#create the matrix table for selected:
output$table <- renderTable({
cor(newdata[x])
})
#plot scree plot
output$screeplot <- renderPlot({
if(input$parallel) {
x <- c(1:8,57,59,61)
newdata[x] <- apply(newdata[x],2,function(x) as.numeric(as.factor(x)))
fa.parallel(newdata[x], fm='minres', fa='fa', main = "Scree Plot")
}
if(input$regular){
x <- c(1:8,57,59,61)
newdata[x] <- apply(newdata[x],2,function(x) as.numeric(as.factor(x)))
pc <- principal(newdata[x], nfactors = 11, rotate="none")
plot(pc$values, type="b", main = "Scree Plot")
}
})
#plot table PC:
output$summary <- renderPrint({
print.psych(pc3(), cut=0.3, sort = TRUE)
})
#plot result of PCA:
output$result <- renderPlot({
fa.diagram(pc3(),simple=TRUE)
})
#plot further interesting plot:
output$intertest <- renderPlot({
if(input$interesting){
autoplot(prcomp(newdata[x]), scale=0)
}
})
output$intt <- renderPlot({
if(input$int){
clus <- kmeans(newdata[x], centers = 3)
plotcluster(newdata[x], clus$cluster)
clusplot(newdata[x], clus$cluster, color = TRUE, shade = TRUE, labels = 3, lines = 0)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
cba5d3ade8c3b5b910f4e80cef374db2d9e62cf0
|
41e24c9c473277d0f671ddb597958dad68063786
|
/plot2.R
|
ff61598b11241b00b4bf266ebaf340894f9b3a12
|
[] |
no_license
|
divyadixit/ExData_Plotting1
|
c7be8d8f0ea6bd7e0d8302337cd1d44554ee2796
|
87a778a182f315fe7455eb608222f052a5a4a53f
|
refs/heads/master
| 2021-01-18T13:31:33.300159
| 2015-11-08T20:05:13
| 2015-11-08T20:05:13
| 45,796,370
| 0
| 0
| null | 2015-11-08T19:58:14
| 2015-11-08T19:58:14
| null |
UTF-8
|
R
| false
| false
| 1,566
|
r
|
plot2.R
|
library(lubridate)
# Loading the dataset :
housing_powercomp <- read.table(file = "./household_pc/household_power_consumption.txt", header = TRUE, sep = ";",
colClasses = "character", na.strings = "?")
# subsetting based on the row numbers of the required date and time
powercompfeb_data <- housing_powercomp[66637:69516,]
rm("housing_powercomp") # removing the bigger dataset in order to conserve memory
## Type conversions of all columns into either numeric or date format :
powercompfeb_data$Global_active_power <- as.double(powercompfeb_data$Global_active_power)
powercompfeb_data$Global_reactive_power <- as.double(powercompfeb_data$Global_reactive_power)
powercompfeb_data$Voltage <- as.double(powercompfeb_data$Voltage)
powercompfeb_data$Global_intensity <- as.double(powercompfeb_data$Global_intensity)
powercompfeb_data$Sub_metering_1 <- as.double(powercompfeb_data$Sub_metering_1)
powercompfeb_data$Sub_metering_2 <- as.double(powercompfeb_data$Sub_metering_2)
powercompfeb_data$Sub_metering_3 <- as.double(powercompfeb_data$Sub_metering_3)
# combinging date and time columns together and then converting into a single date format
powercompfeb_data$Date <- dmy_hms(paste(powercompfeb_data$Date, powercompfeb_data$Time, sep = " "))
powercompfeb_data <- powercompfeb_data[,-2] ## eliminating the time column
## Creation of Plot 2 in the Line chart form :
png(file = "plot2.png")
plot(x = powercompfeb_data$Date, y = powercompfeb_data$Global_active_power, type = "l",
xlab = NA, ylab = "Global Active Power (kilowatts)")
dev.off()
|
f994ef21895aee94e928e3c4d9f904931ea95ec2
|
63fe30f175c8294963a3f2fb6c2e6a0a6a18f61c
|
/man/get_meetup_events.Rd
|
52e22d484f7da50546f474c781d4c3855b9317e9
|
[] |
no_license
|
cran/meetupapi
|
d4d9a94a68d0f7067fb775c3f78f38ec57ad2a4d
|
c4febe7c384a9879d7f19dbf925418b6b914660d
|
refs/heads/master
| 2021-04-28T05:39:09.889787
| 2018-02-20T09:04:28
| 2018-02-20T09:04:28
| 122,181,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,036
|
rd
|
get_meetup_events.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/meetups.R
\name{get_meetup_events}
\alias{get_meetup_events}
\title{Get Meetup Events}
\usage{
get_meetup_events(urlname, key, fields = c("status", "id", "name"), ...)
}
\arguments{
\item{urlname}{string, URL name for the meetup. e.g 'R-Users-Sydney'}
\item{key}{an API key from https://www.meetup.com/meetup_api/}
\item{fields}{a charcter vector of the fields to return}
\item{...}{a named list where each element is a character vector for additional
parameters e.g. `list("omit" = c("member.photo", "member.event_context")`}
}
\value{
data.frame of meetup events for a meetup.
}
\description{
This function retrieves all meetup events for a meetup.
This is forced to be ordered in descending order and show both upcoming and
past events, therefore 'status' and 'desc' should not be passed as named
arguements to the `...` (dots) arguement.
}
\examples{
\dontrun{
get_meetup_events("R-Users-Sydney", "your_api_key")
}
}
|
e358bfa438271f5cdd57a2e5bf73fd4e76fec392
|
c64081a2c69a1b320d6d56e48c973a62935ab895
|
/man/maxjobs.mclapply.Rd
|
f9a2eddf55a32ec3994af690cf06abddd57fd79d
|
[] |
no_license
|
PetrTsurinov/PeakSegPipeline
|
a1c04cdb88dac81d789305becc1eec25634495ff
|
e6c24caef1245e7d6769cee96841b659e5bd8dab
|
refs/heads/master
| 2021-09-11T15:37:06.410226
| 2018-04-09T11:13:34
| 2018-04-09T11:13:34
| 114,019,554
| 0
| 0
| null | 2017-12-12T17:30:26
| 2017-12-12T17:30:26
| null |
UTF-8
|
R
| false
| false
| 537
|
rd
|
maxjobs.mclapply.Rd
|
\name{maxjobs.mclapply}
\alias{maxjobs.mclapply}
\title{maxjobs mclapply}
\description{Run mclapply inside of a for loop, ensuring that it never receives
a first argument with a length more than maxjobs. This avoids some
memory problems (swapping, or getting jobs killed on the cluster)
when using mclapply(1:N, FUN) where N is large.}
\usage{maxjobs.mclapply(X, FUN, maxjobs = getOption("mc.cores",
1L))}
\arguments{
\item{X}{
}
\item{FUN}{
}
\item{maxjobs}{
}
}
\author{Toby Dylan Hocking}
|
eeaafcbfce6d785d6e8b15567c703aa6538d1a65
|
77de99ed2d5c88a17e134ce094fbc9d20e6edb82
|
/analysis/2016-02-12/normalizedDrugAnalysis.R
|
a564bea3cde7a2608fa7783550a017bb68685a78
|
[] |
no_license
|
sgosline/pnfCellLines
|
e1c3e2b951ebe6cd941aab8902fefcfce9aa0d93
|
e6574317a77ee5b8e44fe9659c2dc2027ac65937
|
refs/heads/master
| 2020-04-12T08:04:10.935912
| 2019-05-30T22:58:54
| 2019-05-30T22:58:54
| 59,235,401
| 0
| 2
| null | 2018-07-12T21:52:57
| 2016-05-19T19:23:46
|
HTML
|
UTF-8
|
R
| false
| false
| 1,632
|
r
|
normalizedDrugAnalysis.R
|
##double checking these scripts to ensure they map back to the BROAD data
source("../../bin/singleDrugAnalysis.R")
source("../../bin/ctrpSingleAgentScreens.R")
source("../../bin/ncatsSingleAgentScreens.R")
#plotMostVariableAUCs()
##first get original auc vals
orig.ncats<-getValueForAllCells("FAUC")
orig.ctrp<-getCtrpScreensAsMatrix()
#rescored.ncats<-getRecalculatedAUCMatrix()
rescored.ncats<-acast(getRecalculatedAUCTab(),Drug~Cell,value.var="AUC",fun.aggregate = mean)
rescored.ctrp<-acast(ctrpDoseResponseCurve(FALSE),Drug~Cell,value.var="AUC",fun.aggregate = mean)
##now test normalization on rows and columns
testNormalizationParameters(orig.ncats,byCol=FALSE,alphas=c(1,5,10,100),prefix='ncatsDrug')
testNormalizationParameters(orig.ncats,byCol=TRUE,alphas=c(1,5,10,100),prefix='ncatsCell')
testNormalizationParameters(rescored.ncats,byCol=FALSE,alphas=c(1,5,10,100),prefix='rescoredNcatsDrug')
testNormalizationParameters(rescored.ncats,byCol=TRUE,alphas=c(1,5,10,100),prefix='rescoredNcatsCell')
testNormalizationParameters(orig.ctrp,byCol=FALSE,alphas=c(1,5,10,100),prefix='ctrpDrug')
testNormalizationParameters(orig.ctrp,byCol=TRUE,alphas=c(1,5,10,100),prefix='ctrpCell')
testNormalizationParameters(rescored.ctrp,byCol=FALSE,alphas=c(1,5,10,100),prefix='rescoredCtrpDrug')
testNormalizationParameters(rescored.ctrp,byCol=TRUE,alphas=c(1,5,10,100),prefix='rescoredCtrpCell')
##now add in more cell parameters
testNormalizationParameters(orig.ncats,byCol=TRUE,alphas=c(20,40,60,80),prefix='ncatsCell')
testNormalizationParameters(rescored.ncats,byCol=TRUE,alphas=c(20,40,60,80),prefix='rescoredNcatsCell')
|
6031501c8d8355c01ffad30eee11f804536d4477
|
661d4d3f14e14b699c697efc8db05a220ed40eb9
|
/mosaicApps/mosaicManipShiny/mLM/ui.R
|
2c7ddd6ea21157856b91180519ff89e85207a76a
|
[] |
no_license
|
dtkaplan/MOSAIC-Summer-2015
|
60518bb08edb3c7165ddb5e74104ccdfdb1c0225
|
2f97827b9e09fccc7cc5679888fe3000d71fe1cc
|
refs/heads/master
| 2021-01-23T13:31:24.897643
| 2015-11-17T22:39:39
| 2015-11-17T22:39:39
| 35,576,261
| 0
| 1
| null | 2015-06-02T21:24:26
| 2015-05-13T21:58:29
|
R
|
UTF-8
|
R
| false
| false
| 857
|
r
|
ui.R
|
shinyUI(fluidPage(
titlePanel("Visualizing a Linear Model"),
p("An interactive app that allows you to fit linear model by entering an expression.
Note that while the expression can take several x variables simultaneously,
the graph only models one x varibale at a time"),
sidebarLayout(position = "right",
sidebarPanel(
selectInput("data", "Please selet a dataset", choices = list("Galton", "KidsFeet")),
selectInput("var_choices", "These are the variables available to model", choices = list(x = "x", y = "y")),
textInput("expr", "Please enter the expression for model"),
actionButton("plot", label = "Make a plot")
),
mainPanel(
plotOutput("graph")
)
)
))
|
9fd5af9e8a8448e52f30228ec63b6db02191e6b8
|
1ba83608457f6f31b61e40441001849c38c4fb1d
|
/PCA/PCA.compare.R
|
6d8538f0290ba8cab64052a89f9387396f2836ae
|
[] |
no_license
|
hungying/AS-GWAS
|
22d0a96bc114d4f3f469af21f20d0a23babcfaa0
|
3648535112ffe0a450b4e7c61920c2c8e2f3abd7
|
refs/heads/master
| 2020-03-22T03:48:58.150653
| 2018-07-03T01:56:01
| 2018-07-03T01:56:01
| 139,454,065
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,330
|
r
|
PCA.compare.R
|
## This scripts used to compare PCA result between SNP and AS
par(mfrow=c(1,2))
AS_PCA <- read.table("/mnt/01/hungying/cassy.folder/AS_GWAS/AS_GWAS_PCA.txt", head=TRUE)
#AS_PCA <- read.table("/mnt/01/hungying/cassy.folder/AS_GWAS/AS_all/GAPIT.PCA.csv", head=TRUE)
SNP_PCA <- read.table("/mnt/01/hungying/cassy.folder/SAM_GAPIT_CMLM1/GAPIT.PCA.csv",header =T, sep=",") #Using SNP as PCA source
line_info <- read.csv("/mnt/01/hungying/AS_GWAS/line_info.csv",header =T, sep="," )
AS_table <- merge(AS_PCA, line_info, by.x="X.Trait.", by.y="Inbred.line", all.x=T, all.y=F)
SNP_table <- merge(SNP_PCA[,c(1,2,3)], line_info, by.x="taxa", by.y="Inbred.line", all.x=T, all.y=F)
plot(SNP_table$PC1[SNP_table$Pop.structure=="unclassified"], SNP_table$PC2[SNP_table$Pop.structure=="unclassified"], col="gray", pch=20, cex=0.8,
xlim=c(-250,600), ylim=c(-500, 500),xlab="PC1 (5.9%)",ylab="PC2 (3.3%)", main="PCA_SNP")
points(SNP_table$PC1[SNP_table$Pop.structure=="stiff stalk"], SNP_table$PC2[SNP_table$Pop.structure=="stiff stalk"], col="red", pch=20, cex=0.8)
points(SNP_table$PC1[SNP_table$Pop.structure=="non-stiff stalk"], SNP_table$PC2[SNP_table$Pop.structure=="non-stiff stalk"], col="blue", pch=20, cex=0.8)
points(SNP_table$PC1[SNP_table$Pop.structure=="tropical"], SNP_table$PC2[SNP_table$Pop.structure=="tropical"], col="purple", pch=20, cex=0.8)
points(SNP_table$PC1[SNP_table$Pop.structure=="sweet corn"], SNP_table$PC2[SNP_table$Pop.structure=="sweet corn"], col="yellow", pch=20, cex=0.8)
points(SNP_table$PC1[SNP_table$Pop.structure=="popcorn"], SNP_table$PC2[SNP_table$Pop.structure=="popcorn"], col="green", pch=20, cex=0.8)
points(SNP_table$PC1[SNP_table$Pop.structure=="landrace"], SNP_table$PC2[SNP_table$Pop.structure=="landrace"], col="yellow", pch=20, cex=0.8)
legend(par("usr")[2]*0.95, par("usr")[3]*0.95, xjust=1, yjust=0, legend=c("unclassified", "stiff stalk", "non-stiff stalk ", "tropical", "popcorn"),
col=c("gray","red","blue", "purple", "green"), bg="white", pch=20, cex=0.6)
plot(AS_table$PC1[AS_table$Pop.structure=="unclassified"], AS_table$PC2[AS_table$Pop.structure=="unclassified"], col="gray", pch=20, cex=0.8,
xlim=c(-150,110), ylim=c(-100, 100), xlab="PC1 (5.4%)",ylab="PC2 (2.4%)", main="PCA_AS")
points(AS_table$PC1[AS_table$Pop.structure=="stiff stalk"], AS_table$PC2[AS_table$Pop.structure=="stiff stalk"], col="red", pch=20, cex=0.8)
points(AS_table$PC1[AS_table$Pop.structure=="non-stiff stalk"], AS_table$PC2[AS_table$Pop.structure=="non-stiff stalk"], col="blue", pch=20, cex=0.8)
points(AS_table$PC1[AS_table$Pop.structure=="tropical"], AS_table$PC2[AS_table$Pop.structure=="tropical"], col="purple", pch=20, cex=0.8)
points(AS_table$PC1[AS_table$Pop.structure=="sweet corn"], AS_table$PC2[AS_table$Pop.structure=="sweet corn"], col="yellow", pch=20, cex=0.8)
points(AS_table$PC1[AS_table$Pop.structure=="popcorn"], AS_table$PC2[AS_table$Pop.structure=="popcorn"], col="green", pch=20, cex=0.8)
points(AS_table$PC1[AS_table$Pop.structure=="landrace"], AS_table$PC2[AS_table$Pop.structure=="landrace"], col="brown", pch=20, cex=0.8)
legend(par("usr")[2]*0.95, par("usr")[3]*0.95, xjust=1, yjust=0, legend=c("unclassified", "stiff stalk", "non-stiff stalk ", "tropical", "popcorn"),
col=c("gray","red","blue", "purple", "green"), bg="white", pch=20, cex=0.6)
|
6ab6603bb74cc79d7cf86ac0cbd0e517af6b7799
|
4d3fcaa53d54cca891f036339d01eb1582ec4072
|
/workflow/scripts/build_bedgraphs_from_programs_blood.R
|
11116204b1ad7c594bbde351f663109f0fc69a8e
|
[] |
no_license
|
kkdey/CREGENE_GWAS_benchmark
|
04f71ef5a50224aac2d0d63760c25558bb31e904
|
ea07e2576c7c58c36e0a9fe2eecb6fa010b5477e
|
refs/heads/main
| 2023-07-04T12:59:40.952311
| 2021-08-09T10:45:59
| 2021-08-09T10:45:59
| 393,850,478
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,998
|
r
|
build_bedgraphs_from_programs_blood.R
|
## Generate bedgraph files from a gene score file and CRE-gene links files in blood
# open log file to collect all messages, warnings and errors
log <- file(snakemake@log[[1]], open = "wt")
sink(log)
sink(log, type = "message")
library(data.table)
library(R.utils)
merge_functions_file <- file.path(snakemake@scriptdir, "ALL_CRE_gene_linkfiles.R")
suppressPackageStartupMessages(source(merge_functions_file))
message("Reading in the gene program directory:", snakemake@input$programdir)
ll = list.files(snakemake@input$programdir, pattern = ".txt")
annot_names = as.character(sapply(ll, function(x) return(strsplit(x, ".txt")[[1]][1])))
for(numl in 1:length(ll)){
score_file = paste0(snakemake@input$programdir, "/", ll[numl])
gene_scores = read.delim(score_file, header=F)
if(!dir.exists(paste0(snakemake@output$outdir, "/", annot_names[numl]))){
dir.create(paste0(snakemake@output$outdir, "/", annot_names[numl]))
}
scores = gene_scores[,2]
names(scores) = gene_scores[,1]
out1 = ABC_blood_calc(scores,
pred_file = snakemake@params$pred_file_ABC,
key_file = snakemake@params$key_file_ABC,
output_cell = paste0(snakemake@output$outdir, "/", annot_names[numl]),
output_bed = paste0("ABC_blood.bed"))
out2 = ChromHMM_blood_calc(scores,
pred_file = snakemake@params$pred_file_ChromHMM,
key_file = snakemake@params$key_file_ChromHMM,
output_cell = paste0(snakemake@output$outdir, "/", annot_name),
output_bed = paste0("ChromHMM_blood.bed"))
out3 = EpiMap_blood_calc(scores,
pred_file = snakemake@params$pred_file_EpiMap,
key_file = snakemake@params$key_file_EpiMap,
output_cell = paste0(snakemake@output$outdir, "/", annot_name),
output_bed = paste0("EpiMap_blood.bed"))
}
|
d29b95da5e6e13cb4792491a8da562d1237ee45e
|
57feb81dfc38a8a59abad2d7af6378d48e4afa9f
|
/smdanalysis/man/readCOM.Rd
|
a51cf1333f9f1fa831e31c4279d735bb06a769a0
|
[] |
no_license
|
Danny221212/sMD-PF-M1AAP
|
a4bf27e3e6bd6a9602420cc9fb2584bec9d707fc
|
a1519c01c7a31a6a97938a7a8e2873b0a11d58d5
|
refs/heads/master
| 2020-03-26T06:26:56.730160
| 2018-08-20T21:56:55
| 2018-08-20T21:56:55
| 144,605,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 831
|
rd
|
readCOM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readCOM.R
\name{readCOM}
\alias{readCOM}
\title{Loads multiple or a single Center Of Mass (COM) or Centroid .xyz file(s) and
Converts to 2D x and y coordinates based on a viewing matrix}
\usage{
readCOM(directory, pmat, seperator = " ", extension = ".xyz")
}
\arguments{
\item{directory}{Path containing the input .xyz file(s)}
\item{pmat}{A 4 x 4 Viewing Matrix used to transform the 3D coordinates to 2D}
\item{seperator}{how the columns are seperated. Example: " " for space, "," for csv files, "\t" for tab-delim files.}
}
\value{
list with transformed coordinates and min/max + length of files loaded info
}
\description{
NB*** Ensure the extension of the files is .xyz, contains no header and is column ordered X, Y, Z.
}
|
cf917f0cb0a06c4684b3125d3e322b771a0487fb
|
ca500a32f0dd1cf749dbab138511f88f020c5b5c
|
/run_analysis.R
|
4fe5a7b171c4f1dc5bc5aade80071f3f821fae54
|
[] |
no_license
|
KenHo95/Getting-and-Cleaning-Data-Course-Project
|
606ec934357b84e0c85c9097cfeca728c425bbf1
|
bee8fa1625a20e1ae67fd9e61754f10a82d5bdac
|
refs/heads/master
| 2022-12-20T20:36:28.155153
| 2020-09-16T16:58:52
| 2020-09-16T16:58:52
| 293,816,507
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,617
|
r
|
run_analysis.R
|
# import relevant library
library(dplyr)
repo_dir <- getwd() # Get woring directory
# Read in train and test dataset, features table, subject tables, and avtivity label table
# get data from "UCI HAR Dataset" folder
x_train <- read.table( paste0 (repo_dir, '/UCI HAR Dataset/train/X_train.txt'))
y_train <- read.table( paste0 (repo_dir, '/UCI HAR Dataset/train/y_train.txt'))
x_test <- read.table( paste0 (repo_dir, '/UCI HAR Dataset/test/X_test.txt'))
y_test <- read.table( paste0 (repo_dir,'/UCI HAR Dataset/test/y_test.txt'))
subject_train <- read.table( paste0 (repo_dir,'/UCI HAR Dataset/train/subject_train.txt'))
subject_test <- read.table( paste0 (repo_dir,'/UCI HAR Dataset/test/subject_test.txt'))
features <- read.table( paste0 (repo_dir, '/UCI HAR Dataset/features.txt'))
activity_label <- read.table( paste0 (repo_dir, '/UCI HAR Dataset/activity_labels.txt'))
# Merge train and test datasets into one dataset
# cbind subject identifier to train and test (using subject table) for grouping purpose in step 5 later
train <- cbind(x_train, subject_train, y_train)
test <- cbind(x_test, subject_test, y_test)
full_df <- rbind(train, test) # Merged dataset
# Appropriately labels the data set with descriptive variable names (based on the features table given)
names(full_df) <- features$V2
# Extracts only the measurements on the mean and standard deviation for each measurement.
# subsetting variables that contain "mean()" or "std()"
extract_df <- cbind(full_df[grep("mean\\(\\)|std\\(\\)", names(full_df), ignore.case=F)], full_df[ncol(full_df)-1] ,full_df[ncol(full_df)])
names(extract_df)[ncol(extract_df) -1] <- 'Subject_ID'
names(extract_df)[ncol(extract_df)] <- 'ActivityName' # Assigned descriptive variable names for last two unnamed columns
names(extract_df) <- sub('BodyBody','Body',names(extract_df),)
# Uses descriptive activity names to name the activities in the data set (mapped against activity_label table)
# map activity labels using JOIN on 'ActivityName' and "V1" (column name in activity_label table)
extract_df <- merge(extract_df, activity_label, by.x = 'ActivityName', by.y = "V1", all.x = TRUE, all.y = FALSE)
extract_df <- extract_df[-c(1)] # Remove old label column
names(extract_df)[ncol(extract_df)] <- 'ActivityName' # Assign descriptive name to new label column
# Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
avg_by_act_df <- extract_df %>%
group_by(ActivityName, Subject_ID) %>%
summarise_all(mean) %>%
ungroup()
write.table(avg_by_act_df, file = "tidy_data.txt", row.name=FALSE)
|
24911e32b26078812e088871d1622dbf198dee78
|
67d069e4d9d9b852e11cdc333bb0ff0ed5e6a477
|
/tests/testthat/test.R
|
674b277d5afecd926eb8eb97384062b3ae509dcc
|
[] |
no_license
|
schiffner/EMnet
|
3bf8aba9b0a9c318b8dbb7f7638f6c28be5aefd2
|
ce34e19a36637e3eb0cebe2162417e4c67904ac7
|
refs/heads/master
| 2020-12-25T14:14:02.980829
| 2016-12-08T11:06:43
| 2016-12-08T11:06:43
| 64,326,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,010
|
r
|
test.R
|
context("EMglmnet")
test_that("J = 2, K = 3", {
X = as.matrix(iris[1:4])
y = iris$Species
lev3 = levels(y)
res32 = EMglmnet(y, X, lambda = 1e-03)
pred32 = predict(res32, X)
expect_equal(levels(pred32$class), lev3)
expect_equal(length(pred32$class), nrow(X))
expect_equal(ncol(pred32$posterior), 3)
expect_equal(colnames(pred32$posterior), lev3)
expect_equal(nrow(pred32$posterior), nrow(X))
expect_equal(rowSums(pred32$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred32$gating), 1)
expect_equivalent(nrow(pred32$gating), nrow(X))
for (j in 1:2) {
expect_equal(ncol(pred32$experts[[j]]), 3)
expect_equal(nrow(pred32$experts[[j]]), nrow(X))
expect_equal(colnames(pred32$experts[[j]]), lev3)
}
})
test_that("J = 3, K = 3", {
X = as.matrix(iris[1:4])
y = iris$Species
lev3 = levels(y)
res33 = EMglmnet(y, X, lambda = 1e-03, J = 3)
pred33 = predict(res33, X)
expect_equal(levels(pred33$class), lev3)
expect_equal(length(pred33$class), nrow(X))
expect_equal(ncol(pred33$posterior), 3)
expect_equal(colnames(pred33$posterior), lev3)
expect_equal(nrow(pred33$posterior), nrow(X))
expect_equal(rowSums(pred33$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred33$gating), 3)
expect_equal(nrow(pred33$gating), nrow(X))
expect_equivalent(rowSums(pred33$gating), rep(1, nrow(X)))
for (j in 1:3) {
expect_equal(ncol(pred33$experts[[j]]), 3)
expect_equal(nrow(pred33$experts[[j]]), nrow(X))
expect_equal(colnames(pred33$experts[[j]]), lev3)
}
})
test_that("J = 2, K = 2", {
X = as.matrix(iris[1:100,1:4])
y = iris$Species
lev2 = levels(y)[1:2]
y = factor(y[1:100], levels = lev2)
res22 = EMglmnet(y, X, lambda = 1e-03)
pred22 = predict(res22, X)
expect_equal(levels(pred22$class), lev2)
expect_equal(length(pred22$class), nrow(X))
expect_equal(ncol(pred22$posterior), 2)
expect_equal(colnames(pred22$posterior), lev2)
expect_equal(nrow(pred22$posterior), nrow(X))
expect_equivalent(rowSums(pred22$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred22$gating), 1)
expect_equal(nrow(pred22$gating), nrow(X))
expect_equal(ncol(pred22$experts), 2) # only one class
expect_equal(nrow(pred22$experts), nrow(X))
})
test_that("J = 3, K = 2", {
X = as.matrix(iris[1:100,1:4])
y = iris$Species
lev2 = levels(y)[1:2]
y = factor(y[1:100], levels = lev2)
res23 = EMglmnet(y, X, lambda = 1e-03, J = 3)
pred23 = predict(res23, X)
expect_equal(levels(pred23$class), lev2)
expect_equal(length(pred23$class), nrow(X))
expect_equal(ncol(pred23$posterior), 2)
expect_equal(colnames(pred23$posterior), lev2)
expect_equal(nrow(pred23$posterior), nrow(X))
expect_equivalent(rowSums(pred23$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred23$gating), 3)
expect_equal(nrow(pred23$gating), nrow(X))
expect_equivalent(rowSums(pred23$gating), rep(1, nrow(X)))
expect_equal(ncol(pred23$experts), 3) # only one class
expect_equal(nrow(pred23$experts), nrow(X))
})
## =================================================================================================================================
context("EMlnet")
test_that("J = 2, K = 3", {
X = as.matrix(iris[1:4])
y = iris$Species
lev3 = levels(y)
res32 = EMlnet(y, X, lambda = 1e-03)
pred32 = predict(res32, X)
expect_equal(levels(pred32$class), lev3)
expect_equal(length(pred32$class), nrow(X))
expect_equal(ncol(pred32$posterior), 3)
expect_equal(colnames(pred32$posterior), lev3)
expect_equal(nrow(pred32$posterior), nrow(X))
expect_equal(rowSums(pred32$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred32$gating), 1)
expect_equivalent(nrow(pred32$gating), nrow(X))
for (j in 1:2) {
expect_equal(ncol(pred32$experts[[j]]), 3)
expect_equal(nrow(pred32$experts[[j]]), nrow(X))
# expect_equal(colnames(pred32$experts[[j]]), lev3)
}
})
test_that("J = 3, K = 3", {
X = as.matrix(iris[1:4])
y = iris$Species
lev3 = levels(y)
res33 = EMlnet(y, X, lambda = 1e-03, J = 3)
pred33 = predict(res33, X)
expect_equal(levels(pred33$class), lev3)
expect_equal(length(pred33$class), nrow(X))
expect_equal(ncol(pred33$posterior), 3)
expect_equal(colnames(pred33$posterior), lev3)
expect_equal(nrow(pred33$posterior), nrow(X))
expect_equal(rowSums(pred33$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred33$gating), 3)
expect_equal(nrow(pred33$gating), nrow(X))
expect_equivalent(rowSums(pred33$gating), rep(1, nrow(X)))
for (j in 1:3) {
expect_equal(ncol(pred33$experts[[j]]), 3)
expect_equal(nrow(pred33$experts[[j]]), nrow(X))
# expect_equal(colnames(pred33$experts[[j]]), lev3)
}
})
test_that("J = 2, K = 2", {
X = as.matrix(iris[1:100,1:4])
y = iris$Species
lev2 = levels(y)[1:2]
y = factor(y[1:100], levels = lev2)
res22 = EMlnet(y, X, lambda = 1e-03)
pred22 = predict(res22, X)
expect_equal(levels(pred22$class), lev2)
expect_equal(length(pred22$class), nrow(X))
expect_equal(ncol(pred22$posterior), 2)
expect_equal(colnames(pred22$posterior), lev2)
expect_equal(nrow(pred22$posterior), nrow(X))
expect_equivalent(rowSums(pred22$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred22$gating), 1)
expect_equal(nrow(pred22$gating), nrow(X))
expect_equal(ncol(pred22$experts), 2) # only one class
expect_equal(nrow(pred22$experts), nrow(X))
})
test_that("J = 3, K = 2", {
X = as.matrix(iris[1:100,1:4])
y = iris$Species
lev2 = levels(y)[1:2]
y = factor(y[1:100], levels = lev2)
res23 = EMlnet(y, X, lambda = 1e-03, J = 3)
pred23 = predict(res23, X)
expect_equal(levels(pred23$class), lev2)
expect_equal(length(pred23$class), nrow(X))
expect_equal(ncol(pred23$posterior), 2)
expect_equal(colnames(pred23$posterior), lev2)
expect_equal(nrow(pred23$posterior), nrow(X))
expect_equivalent(rowSums(pred23$posterior), rep(1, nrow(X)))
expect_equal(ncol(pred23$gating), 3)
expect_equal(nrow(pred23$gating), nrow(X))
expect_equivalent(rowSums(pred23$gating), rep(1, nrow(X)))
expect_equal(ncol(pred23$experts), 3) # only one class
expect_equal(nrow(pred23$experts), nrow(X))
})
|
d86dfcccd80d37eaf428a3baf5c201c6b50cc1c3
|
a15bafa2b845fff694d5428c27796ae02fc04b17
|
/man/Colors1.Rd
|
e2237ffaa8af1e565f80dd928c181366f1342b41
|
[] |
no_license
|
cran/IntClust
|
741db502112c3407aefa5eb5d08b92e2d2a75f7a
|
235fe4a67855c96b48247aa2f02895604caf507e
|
refs/heads/master
| 2018-10-30T23:12:34.173124
| 2018-07-30T11:10:15
| 2018-07-30T11:10:15
| 55,162,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 322
|
rd
|
Colors1.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\name{Colors1}
\alias{Colors1}
\title{Colour examples}
\format{An object if class \code{"character"}.}
\description{
A vector of HEX codes for the colours used in the examples
}
\examples{
data(Colors1)
}
\keyword{datasets}
|
9cb3d5b160cf15b2972e3411e5506248c660811d
|
075147b97c67bcfbe90b5386f0a9332648578941
|
/plot2.R
|
23b6287f3202998e3967d60eb80350ec7dd846e8
|
[] |
no_license
|
Zhong-Wang/ExData_Plotting1
|
c896acee1fe96f59be72fc5a6b93bd2b78506120
|
de9886d981f20e050b30d13ddfffa79b888208e2
|
refs/heads/master
| 2020-05-31T00:08:04.079335
| 2014-05-10T10:30:57
| 2014-05-10T10:30:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,050
|
r
|
plot2.R
|
## Get dataset file
## CAUTION: if you have already download dataset file,
## please ensure the name and the file path is correctly set to 'destfile' variable to avoid re-download
destfile <- "exdata-data-household_power_consumption.zip"
if (!file.exists(destfile)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile, method="curl")
}
## Create data frame with the data of 1/2/2007 and 2/2/2007 only
file <- unz(destfile, unzip(destfile, list=TRUE)[1, "Name"])
data <- grep("^[1|2]/2/2007", readLines(file), value=TRUE)
data <- read.csv2(text=data, header=FALSE, colClasses="character", na.strings="?")
names(data) <- unname(unlist(read.csv2(file, header=FALSE, nrow=1)))
## Append DataTime column to data frame
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
## Plot and save to png file
png("plot2.png")
plot(data$DateTime, data$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
##dev.copy(png, file="plot2.png")
dev.off()
|
a1bfc7a500e89887664fe03f199cfb8fc4f12e4d
|
d5eef5ca98115b14d13345c3e104fe4d9448b721
|
/R/webSDM-package.R
|
b3735ece6a5d56411735db378ef70779a58ece3d
|
[] |
no_license
|
giopogg/webSDM
|
b6e3e40fc0ee82f87b2cee3a4c2167555fc3e19a
|
9b011d1dcb58b9e2874f841af4874db752c73fff
|
refs/heads/main
| 2023-04-19T02:10:10.368235
| 2023-03-15T07:23:58
| 2023-03-15T07:23:58
| 359,931,820
| 5
| 1
| null | 2023-03-14T09:38:33
| 2021-04-20T19:39:24
|
R
|
UTF-8
|
R
| false
| false
| 177
|
r
|
webSDM-package.R
|
#' webSDM.
#'
#' Package to fit a trophic Species Distribution Model, analyse it and predict. See Poggiato et al. In prep.
#'
#' @docType package
#'
#'
#' @name webSDM
#'
NULL
|
c699140e7f79d4732f3edac6385be38f7dc0ef0a
|
e5c43a31a082bbfec5ebbc20b34d373896721579
|
/R/functions/vplot.R
|
5b362c2d879ff9442f00eebb46de45a51694e9bd
|
[] |
no_license
|
geryan/rfst
|
3dde3a499651f3a1ccc736f8c6597c5972f0e17c
|
0aac1f0c3b17096af0c5b0b06e1ad80ac6d709ca
|
refs/heads/master
| 2023-05-02T12:32:51.743467
| 2021-04-27T01:26:47
| 2021-04-27T01:26:47
| 164,573,310
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
vplot.R
|
vplot <- function(x, ...){
library("viridis")
plot(x, col = viridis(100), ...)
}
zplot <- function(x, ...){
library("viridis")
plot(x, col = viridis(100), zlim = c(0, 1), ...)
}
|
542b82e093b591d8d0af0b1bc50c9dd2686662c5
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query51_falsequ_1344n/query51_falsequ_1344n.R
|
1671077e6813bee4018e0c345d7227ff1e817d5f
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 67
|
r
|
query51_falsequ_1344n.R
|
a7ea6efed7078242494a844a485f8a81 query51_falsequ_1344n.qdimacs 21 9
|
4f512937cb51cad76b0d180bb878dd99a04e4ade
|
219c5192a2af3c82cf1e498b7abca3b782dbefe9
|
/cachematrix.R
|
9a84f13e0252b0d6cb4e8783ea210fa14f683858
|
[] |
no_license
|
superbunika/ProgrammingAssignment2
|
6309c34727240ef87f62b50a7d231d2e90b99454
|
fefb32ad21437389f6faaa0726d0744624336412
|
refs/heads/master
| 2020-12-02T15:06:22.501161
| 2014-12-21T22:46:19
| 2014-12-21T22:46:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,112
|
r
|
cachematrix.R
|
## We define a custom object with functions for setting and getting
## the value of matrix and it's inverse.
## Custom function that stores a matrix and it's inverse which is solved
## with another function. Upon setting a new value of matrix, the inverse
## is reset.
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(inv) inverse <<- inv
getinverse <- function() inverse
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve checks if inverse matrix is already cached. If it is it
## returns the cache, otherwise it solves the matrix for the inverse
## and caches the solution.
cacheSolve <- function(x, ...) {
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("Getting cached inverse matrix")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
## Return a matrix that is the inverse of 'x'
}
|
65e09323bfa42426f0095ee83c81c21fc38c136d
|
ffd14f4e8b9d71ef13968755efca4760bfc518b4
|
/Code/8-LocalSource.R
|
8dd98caa186d9dec368ceb6a6f37a1c4c60f6890
|
[
"MIT"
] |
permissive
|
tchakravarty/RExamples
|
d398d50b5362f1955ba73d26dd806e6fb218b9df
|
65c17fade0a784018f2f3afcb4a9a8c603bd65a0
|
refs/heads/master
| 2021-01-21T21:48:45.641418
| 2016-05-20T17:55:47
| 2016-05-20T17:55:47
| 28,849,466
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
8-LocalSource.R
|
# write out the file to be sourced
fLS = file(description = "Code/8-LocalSource-Input.R", open = "w+")
write(x ="a = 1; print(a)", file = fLS)
close(fLS)
# source the file
a = 3
sourceEnv = new.env()
with(sourceEnv, source("Code//8-LocalSource-Input.R", local = TRUE))
a
|
db20775dec852f5193ca9522503993574bdcf51b
|
2da570da5859c8a830e76d794fa17d042cd41ebc
|
/05 Introduction_to_Data_in_R/03 Sampling_Exp_Design/01 Simple_Random_Sample_R.R
|
19664d83955d1359452a0d7add6bb96b33900436
|
[] |
no_license
|
ArmandoReyesRepo/RCode
|
85d5c8f36107936bfcbbdbf16dc9bb2ed1a0feee
|
41c96dd0d4bc7762fad3cbeb46c3df4ee1444575
|
refs/heads/main
| 2023-05-12T16:29:25.970647
| 2021-06-04T09:01:34
| 2021-06-04T09:01:34
| 373,320,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 597
|
r
|
01 Simple_Random_Sample_R.R
|
setwd("C:/Users/arman/OneDrive/Desktop/2020/DataCamp/05 Introduction_to_Data_in_R")
getwd()
dir()
# Load packages
library(dplyr)
us_regions<- get(load("us_regions.RData"))
# The dplyr package and us_regions data frame have been loaded.
#
# Use simple random sampling to select eight states from us_regions.
# Save this sample in a data frame called states_srs.
#
# Count the number of states from each region in your sample.
# Simple random sample: states_srs
states_srs <- us_regions %>%
sample_n(size=8)
# Count states by region
states_srs %>%
count(region)
|
acd35f4fac395970dc580b07a76e1eaa4a8a8b98
|
3dff7540de9b4037edbc14e4f3bffc4a58bfb1fd
|
/R/todo.R
|
d7f46ec3b5f8dbfbfe9dbda24c0c515ae1ed9e41
|
[] |
no_license
|
antongrau/MONECA
|
1e679da3d1bbc6040e270f474240531c177f69e8
|
a83f176fefa9e98d8d22be7deacedde710797c01
|
refs/heads/master
| 2020-12-25T17:34:57.614262
| 2019-09-16T15:03:05
| 2019-09-16T15:03:05
| 30,929,570
| 3
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,812
|
r
|
todo.R
|
# ## Todo
#
# # Få den fucking plot funktion til at virke igen - vi skal til sna pakken
# library(igraph)
# library(MONECA)
# library(soc.elite)
#
# # devtools::install_github("gaborcsardi/pkgconfig")
# # devtools::install_github("igraph/rigraph")
#
# # library(devtools)
# #
# # devtools::install_github("gaborcsardi/pkgconfig")
# # devtools::install_github("igraph/rigraph")
#
#
# data(occupations)
#
# gg.moneca(mob.seg, show.text = F)
#
# wm <- weight.matrix(seg$mat.list[[1]])
#
# seg <- anton(mob.mat)
#
# segmenter <- seg
# edges <- segment.edges(seg, segment.reduction = 0)
#
#
# attraction=c(30, 20, 15, 10, 5)
# attraction=c(320, 40, 10, 4, 2)
# attraction
# lay <- layout.matrix(seg, attraction = attraction, weight.adjustment = 1, start.temp = 20, niter = 10000, tie.adjustment = 0.4)
#
# gg.moneca (seg, layout = lay, show.text = F, edges = log(edges+1), edge.color = "black", edge.size = 0.2, border.padding = 0.8)
#
# # Først en edge matrice:
#
# edges <- segment.edges(seg)
#
# graph <- graph.adjacency(edges, mode="directed", weighted=TRUE, diag=NULL)
#
# ## ---
# attraction=c(250, 100, 15, 5, 3)
# area.size=200000
# niveau=seq(segmenter$segment.list)
# mode="directed"
# weight.adjustment = 2
#
# seg <- segmenter
# seg$segment.list <- segmenter$segment.list[niveau]
# seg$mat.list <- segmenter$mat.list[niveau]
#
# # mx <- segmenter$mat.list[[1]]
# # l <- nrow(mx)
# # mx.1_exp <- as.array(mx[,l]) %*% t(as.array(mx[l,]) / mx[l,l])
# # mx.1_net <- mx/mx.1_exp
# # mx.1 <- mx.1_net[-l,-l]
# # mx.attract <- mx.1
# #
#
# wm <- weight.matrix(segmenter$mat.list[[1]], cut.off=0, diagonal=TRUE, symmetric=FALSE)
# mx.attract <- wm
# gra.lay <- graph.adjacency(mx.attract, mode="directed", weighted=TRUE, diag=NULL)
#
#
# assign.attraction <- function(mx.attract, segment, attract){
# for (i in 1:length(segment)) mx.attract[segment[[i]],segment[[i]]] <- attract
# return(mx.attract)
# }
#
# for (i in length(seg$segment.list):2){
# segment <- seg$segment.list[[i]]
# mx.attract <- assign.attraction(mx.attract, segment, attraction[i-1])
# }
#
# diag(mx.attract) <- 0
# gra.lay <- graph.adjacency(mx.attract, mode=mode, weighted=TRUE, diag=NULL)
#
#
# a <- rowSums(wm)
# b <- colSums(wm)
# start <- cbind(max(a) - a, max(b)- b)
# start <- norm_coords(start, xmin = -100, xmax = 100, ymin = -100, ymax = 100)
#
# layout <- layout_with_fr(gra.lay,coords = start, weights=E(gra.lay)$weight*weight.adjustment, niter = 1000)
# layout[, 1:2] <- norm_coords(layout[, 1:2], xmin = 1, xmax = 10^10, ymin = 1, ymax = 10^10)
#
#
#
# # # # TODO
# # # # Mode er usystematisk implementeret i anton, jonas og find.segment : husk at weight matrix også er inde over
# # #
# # # #
# # # Ego kort
# # # Vi har ikke styr på rækker og kolonner her, så vi ved ikke hvad der sender og modtager
# #
# # library(MONECA)
# # data(occupations)
# # mxa.b <- mob.mat
# # segmenter <- anton(mxa.b, segment.levels = 3)
# # ego.plot(segmenter, mob.mat, id = 5, edge.color = "green")
# #
# # # add.table.to.plot <- function()
# #
# # # Annotate en tabel på
# # # sum.stat <- c("Beskæftigede" = as.numeric(stor.beskæftigede[id]),
# # # "Andel af alle beskæftigede %" = round(as.numeric(stor.beskæftigede[id]/sum(stor.beskæftigede)), 3),
# # # "Intern mobilitet %" = round(as.numeric(intern.mobilitet[id]), 2),
# # # "Organisationsgrad 2011 %" = round(organiserede.andel[id, ncol(organiserede.andel)], 2))
# #
# # # + annotate("text", x = Inf, y = -Inf, color = "black", vjust = -0.5, hjust = 1 ,label = sum.stat)
|
41d30c27e1463a4432b7cb9bac256bff0403e42f
|
78e31bac83a432d95f07466a1b1cfac78638d671
|
/R/options.R
|
229e2e0ee552ae9a987c4be73b39862e3d6457f7
|
[] |
no_license
|
shakythesherpa/Rnightlights
|
298885d7eb7df9301c7c5e3ed39de4c01d1124c6
|
c36a9a6ef047e2cd1447b4e20a54030f65dd42c3
|
refs/heads/master
| 2020-06-06T13:52:04.061970
| 2019-03-27T12:27:18
| 2019-03-27T12:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,697
|
r
|
options.R
|
######################## RNIGHTLIGHTSOPTIONS ###################################
RNIGHTLIGHTSOPTIONS <- settings::options_manager(
#Specify the regex to uniquely identify the tile file
#to extract from the download tile tar.gz
#more info at: https://ngdc.noaa.gov/eog/viirs/download_dnb_composites.html
configName_VIIRS.D = "vcmcfg",
configName_VIIRS.M = "vcmcfg",
configName_VIIRS.Y = "vcm-orm-ntl",
#cropMaskMethod" Method used to crop and mask tiles to country polygons.
#options: "gdal" or "rast" gdal is usually faster but requires gdal to be installed on the system
cropMaskMethod = "rast",
deleteTiles = FALSE,
#Set directory paths
dirNlData = "data",
dirNlRoot = ".Rnightlights",
dirNlTiles = "tiles",
dirPolygon = "polygons",
dirRasterOutput = "outputrasters",
dirRasterWeb = "outputrasters_web",
dirZonals = "zonals",
dirNlTemp = "temp",
#downloadMethod used options: auto, aria, curl, libcurl, wget
downloadMethod = "auto",
#methods to extract data. Options: raster, gdal
extractMethod = "rast",
#the gadm polygons to use
gadmVersion = "3.6",
#gdalCacheMax Speeds up gdal_rasterize calculation of stats in function ZonalPipe with more cache (advice: max 1/3 of your total RAM) see: http://www.guru-gis.net/efficient-zonal-statistics-using-r-and-gdal/
gdalCacheMax = 1024,
#stats to calculate in processNlData. Can be added to if the function exists
#i.e. if not a standard function can be created in workspace
nlStats = c("sum", "mean"),
#urls for raster tile listings. In theory, can be used to override the
#url if it is changed while the package is being updated
ntLtsIndexUrlOLS = "https://www.ngdc.noaa.gov/eog/data/web_data/v4composites/",
ntLtsIndexUrlVIIRS.D = "https://ngdc.noaa.gov/eog/viirs/download_ut_mos_tile_iframe.html",
ntLtsIndexUrlVIIRS.M = "https://www.ngdc.noaa.gov/eog/viirs/download_dnb_composites_iframe.html",
ntLtsIndexUrlVIIRS.Y = "https://www.ngdc.noaa.gov/eog/viirs/download_dnb_composites_iframe.html",
numCores = 2,
#countries to not process. useful if many countries being processed
#and want to exclude a few
omitCountries = "missing",
#Change the temp dir to use e.g. if the system temp dir does not have enough space
#Not used yet
tmpDir = raster::tmpDir(),
.allowed = list(
configName_VIIRS.D = settings::inlist("vcmcfg", "vcmsl"),
configName_VIIRS.M = settings::inlist("vcmcfg", "vcmsl"),
configName_VIIRS.Y = settings::inlist("vcm-orm", "vcm-orm-ntl", "vcm-ntl"),
cropMaskMethod = settings::inlist("gdal","rast"),
extractMethod = settings::inlist("gdal", "rast"),
downloadMethod = settings::inlist("aria", "auto", "curl", "libcurl", "wget"),
gadmVersion = settings::inlist("2.8", "3.6"),
omitCountries = settings::inlist("error", "missing", "long", "all", "none")
)
)
######################## pkgOptions ###################################
#' Set or get options for the Rnightlights package
#'
#' @param ... Option names to retrieve option values or \code{[key]=[value]}
#' pairs to set options.
#'
#' @section Supported options:
#' The following options are supported
#' \describe{
#' \item{\code{configName.VIIRS.D}}{(\code{character}) The regex to uniquely
#' identify the tile file to use out of the downloaded tile .tgz. The
#' version 1 monthly series is run globally using two different
#' configurations.
#'
#' The first excludes any data impacted by stray light. The second
#' includes these data if the radiance vales have undergone the stray-
#' light correction procedure (Reference). These two configurations
#' are denoted in the filenames as "vcm" and "vcmsl" respectively.
#' The "vcmsl" version, that includes the stray-light corrected data,
#' will have more data coverage toward the poles, but will be of reduced
#' quality.
#'
#' It is up to the users to determine which set is best for their
#' applications. The annual versions are only made with the "vcm"
#' version, excluding any data impacted by stray light.}
#'
#' \item{\code{configName.VIIRS.M}}{(\code{character}) The regex to uniquely
#' identify the tile file to use out of the downloaded monthly .tgz
#' tile. Has the same options as configName.VIIRS.D}
#'
#' \item{\code{configName.VIIRS.Y}}{(\code{character}) The regex to uniquely
#' identify the tile file to use out of the downloaded tile .tgz. The
#' annual products can have other values for the config shortname (Field 5).
#' They are:
#' \itemize{
#' \item vcm-orm \emph{(VIIRS Cloud Mask - Outlier Removed)}: This product
#' contains cloud-free average radiance values that have undergone
#' an outlier removal process to filter out fires and other ephemeral
#' lights.
#' \item vcm-orm-ntl \emph{(VIIRS Cloud Mask - Outlier Removed - Nighttime Lights)}:
#' This product contains the "vcm-orm" average, with background
#' (non-lights) set to zero.
#' \item vcm-ntl \emph{(VIIRS Cloud Mask - Nighttime Lights)}: This product
#' contains the "vcm" average, with background
#' (non-lights) set to zero.}}
#' \item{\code{cropMaskMethod}}{(\code{character}) The method to use to
#' clip the nightlight raster tiles to the country boundaries }
#' \item{\code{deleteTiles}}{(\code{character}) whether to delete tiles
#' after processing may be useful where diskspace is a concern }
#' \item{\code{dirNlData}}{(\code{character}) The directory to store
#' the extracted data files in }
#' \item{\code{dirNlRoot}}{\code{character}) The root directory
#' storing the package data}
#' \item{\code{dirNlTiles}}{(\code{character}) The directory in which
#' to store the downloaded VIIRS raster tiles }
#' \item{\code{dirPolygon}}{(\code{character}) The directory to store
#' the downloaded country administration level polygons }
#' \item{\code{dirRasterOutput}}{(\code{character}) The directory in
#' which to store the clipped country rasters }
#' \item{\code{dirRasterWeb}}{(\code{character}) The directory in which
#' to store the rasters resampled for web display }
#' \item{\code{dirZonals}}{(\code{character}) The directory in which to
#' store the zonal statistics country polygon }
#' \item{\code{downloadMethod}}{(\code{character}) The download method
#' to use }
#' \item{\code{extractMethod}}{(\code{character}) The method to use to
#' extract data from the rasters }
#' \item{\code{gdalCacheMax}}{(\code{character}) The maximum memory gdal
#' should use in gdal_rasterize }
#' \item{\code{ntLtsIndexUrlOLS}}{(\code{character}) The url with the OLS
#' tile index }
#' \item{\code{ntLtsIndexUrlVIIRS}}{(\code{character}) The url with the
#' VIIRS tile index }
#' \item{\code{numCores}}{(\code{character}) The number of processor cores
#' to use when extractMethod = "raster" }
#' \item{\code{omitCountries}}{(\code{character}) The countries to exclude
#' in processing }
#' \item{\code{stats}}{(\code{character}) The statistics to calculate for
#' country regions. The default are sum and mean. Any other aggregate
#' statistics can be included. Also any aggregate function accessible
#' in the current environment can be added. }
#' \item{\code{tmpDir}}{(\code{character}) Change the temporary directory
#' for processing rasters. Not in use }
#' }
#'
#' @return if an option name is supplied as a parameter this returns the
#' value, else a list of all options is returned.
#'
#' @examples
#' #retrieve the current cropMaskMethod
#' pkgOptions("cropMaskMethod")
#'
#' #set the cropMaskMethod
#' pkgOptions(cropMaskMethod="gdal")
#'
#' #retrieve all options
#' pkgOptions()
#'
#' @export
pkgOptions <- function(...)
{
settings::stop_if_reserved(...)
RNIGHTLIGHTSOPTIONS(...)
}
######################## pkgReset ###################################
#' Reset global options for the Rnightlights package
#'
#' Reset global options for the Rnightlights package
#'
#' @examples
#' #get cropMaskMethod
#' pkgOptions("cropMaskMethod") #returns default "rast"
#'
#' #set cropMaskMethod to "gdal"
#' pkgOptions(cropMaskMethod="gdal") #sets to "gdal"
#'
#' #check cropMaskMethod has changed
#' pkgOptions("cropMaskMethod") #returns "gdal"
#'
#' #reset pkgOptions
#' pkgReset()
#'
#' #check cropMaskMethod has been reset
#' pkgOptions("cropMaskMethod") #returns default "rast"
#'
#' @export
pkgReset <- function()
{
settings::reset(RNIGHTLIGHTSOPTIONS)
}
|
ae0a9d84b9f554f154159b145c7fa1436d4d2ce7
|
719dde39be52f5a55efd36d04de076f90d26f681
|
/Exercise/R_Examples/Ex_12_7.R
|
596defda8af0ebc238623197b661d6af68af73ae
|
[] |
no_license
|
KuChanTung/R
|
e56f655551fe1041a25c57afde423816d1173c46
|
f0932766c3ef6ffa9771ab0e63a7ae8f9d163194
|
refs/heads/master
| 2021-01-19T12:14:48.750498
| 2018-07-08T07:40:31
| 2018-07-08T07:40:31
| 82,291,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
Ex_12_7.R
|
# Example 12-7: Convergence in Probability
library(ConvergenceConcepts)
samplegen <- function(n) {
Z <- runif(1)
k <- floor(log2(1:n))
m <- 1:n - 2^k
res <- (m * 2^(-k) <= Z & Z < (m + 1) * 2^(-k))
return(as.integer(res))
}
critp = criterion(data, epsilon = epsilon0, mode = "p")$crit
critas = criterion(data, epsilon = epsilon0, mode = "as")$crit
plot(critp[1:1000],type="l")
lines(1:1000,critas[1:1000],col=2)
|
2d066e9bb137e9a448ceaa01eb183947678df552
|
285541e8ae77482ac7eeb5b51ce06edeb96ef246
|
/man/deviation_test.Rd
|
bca236cddcee805a7255233761c6653c0b04c25a
|
[] |
no_license
|
myllym/GET
|
2033c4f590da7cce114b588e7e39b243b543dcdf
|
72988291d9c56b468c5dddfb5bc2c23f519b6dca
|
refs/heads/master
| 2023-08-24T23:23:14.364346
| 2023-08-15T21:33:51
| 2023-08-15T21:33:51
| 68,914,145
| 12
| 5
| null | 2022-11-16T07:55:16
| 2016-09-22T11:20:34
|
R
|
UTF-8
|
R
| false
| true
| 5,195
|
rd
|
deviation_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deviation_test.r
\name{deviation_test}
\alias{deviation_test}
\title{Deviation test}
\usage{
deviation_test(
curve_set,
r_min = NULL,
r_max = NULL,
use_theo = TRUE,
scaling = "qdir",
measure = "max",
savedevs = FALSE
)
}
\arguments{
\item{curve_set}{A residual curve_set object. Can be obtained by using
residual().}
\item{r_min}{The minimum radius to include.}
\item{r_max}{The maximum radius to include.}
\item{use_theo}{Whether to use the theoretical summary function or the
mean of the functions in the curve_set.}
\item{scaling}{The name of the scaling to use. Options include 'none',
'q', 'qdir' and 'st'. 'qdir' is default.}
\item{measure}{The deviation measure to use. Default is 'max'. Must be
one of the following: 'max', 'int' or 'int2'.}
\item{savedevs}{Logical. Should the global rank values k_i, i=1,...,nsim+1 be returned? Default: FALSE.}
}
\value{
If 'savedevs=FALSE' (default), the p-value is returned.
If 'savedevs=TRUE', then a list containing the p-value and calculated deviation measures
\eqn{u_i}{u_i}, \eqn{i=1,...,nsim+1}{i=1,...,nsim+1} (where \eqn{u_1}{u_1} corresponds to the data pattern) is returned.
}
\description{
Crop the curve set to the interval of distances [r_min, r_max],
calculate residuals, scale the residuals and perform a deviation test
with a chosen deviation measure.
The deviation tests are well known in spatial statistics; in \pkg{GET} they are
provided for comparative purposes. Some (maximum type) of the deviation test
have their corresponding envelope tests available, see Myllymäki et al., 2017
(and 'unscaled', 'st' and 'qdir' in \code{\link{global_envelope_test}}).
}
\details{
The deviation test is based on a test function \eqn{T(r)}{T(r)} and it works as follows:
1) The test function estimated for the data, \eqn{T_1(r)}{T_1(r)}, and for nsim simulations
from the null model, \eqn{T_2(r), ...., T_{nsim+1}(r)}{T_2(r), ...., T_{nsim+1}(r)}, must be saved in 'curve_set'
and given to the deviation_test function.
2) The deviation_test function then
\itemize{
\item Crops the functions to the chosen range of distances \eqn{[r_{\min}, r_{\max}]}{[r_min, r_max]}.
\item If the curve_set does not consist of residuals (see \code{\link{residual}}),
then the residuals \eqn{d_i(r) = T_i(r) - T_0(r)}{d_i(r) = T_i(r) - T_0(r)} are calculated, where \eqn{T_0(r)}{T_0(r)} is the
expectation of \eqn{T(r)}{T(r)} under the null hypothesis.
If use_theo = TRUE, the theoretical value given in the curve_set$theo is used for
as \eqn{T_0(r)}{T_0(r)}, if it is given. Otherwise, \eqn{T_0(r)}{T_0(r)} is estimated by the mean of \eqn{T_j(r)}{T_j(r)},
\eqn{j=2,...,nsim+1}{j=2,...,nsim+1}.
\item Scales the residuals. Options are
\itemize{
\item 'none' No scaling. Nothing done.
\item 'q' Quantile scaling.
\item 'qdir' Directional quantile scaling.
\item 'st' Studentised scaling.
}
See for details Myllymäki et al. (2013).
\item Calculates the global deviation measure \eqn{u_i}{u_i}, \eqn{i=1,...,nsim+1}{i=1,...,nsim+1}, see options
for 'measure'.
\itemize{
\item 'max' is the maximum deviation measure
\deqn{u_i = \max_{r \in [r_{\min}, r_{\max}]} | w(r)(T_i(r) - T_0(r))|}{%
u_i = max_(r in [r_min, r_max]) | w(r)(T_i(r) - T_0(r)) |}
\item 'int2' is the integral deviation measure
\deqn{u_i = \int_{r_{\min}}^{r_{\max}} ( w(r)(T_i(r) - T_0(r)) )^2 dr}{%
u_i = int_([r_min, r_max]) ( w(r)(T_i(r) - T_0(r)) )^2 dr}
\item 'int' is the 'absolute' integral deviation measure
\deqn{u_i = \int_{r_{\min}}^{r_{\max}} |w(r)(T_i(r) - T_0(r))| dr}{%
u_i = int_([r_min, r_max]) | w(r)(T_i(r) - T_0(r)) | dr}
}
\item Calculates the p-value.
}
Currently, there is no special way to take care of the same values of \eqn{T_i(r)}{T_i(r)}
occuring possibly for small distances. Thus, it is preferable to exclude from
the test the very small distances r for which ties occur.
}
\examples{
## Testing complete spatial randomness (CSR)
#-------------------------------------------
if(require("spatstat.explore", quietly=TRUE)) {
pp <- unmark(spruces)
\donttest{nsim <- 999}
\dontshow{nsim <- 19}
# Generate nsim simulations under CSR, calculate L-function for the data and simulations
env <- envelope(pp, fun="Lest", nsim=nsim, savefuns=TRUE, correction="translate")
# The deviation test using the integral deviation measure
res <- deviation_test(env, measure='int')
res
# or
res <- deviation_test(env, r_min=0, r_max=7, measure='int2')
}
}
\references{
Myllymäki, M., Grabarnik, P., Seijo, H. and Stoyan. D. (2015). Deviation test construction and power comparison for marked spatial point patterns. Spatial Statistics 11: 19-34. doi: 10.1016/j.spasta.2014.11.004
Myllymäki, M., Mrkvička, T., Grabarnik, P., Seijo, H. and Hahn, U. (2017). Global envelope tests for spatial point patterns. Journal of the Royal Statistical Society: Series B (Statistical Methodology), 79: 381–404. doi: 10.1111/rssb.12172
}
|
a56ba4d0a0b211fd644fdaa4da319e697930a4a9
|
6cd7468396db7ae36ca24ce9fb2fde989ad33d5e
|
/5.5.plotMutationFrequencies.R
|
9895da1f1bf992f78e7259142a447700cbb9a0be
|
[] |
no_license
|
cyclo-hexane/analysisScripts
|
6c4043d9eded4d21d09a0a6099c6f0ea18766113
|
289715d2891a3002b4b32aa5dabbb5702558960b
|
refs/heads/master
| 2021-01-11T17:39:24.562432
| 2017-01-23T15:28:47
| 2017-01-23T15:28:47
| 79,815,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,277
|
r
|
5.5.plotMutationFrequencies.R
|
# 5.4.plotSampleDiversity
################### notes: #####################
# processing order:
#
#
# 5.0.countBamReads [run on Apocrita, for each set] (gets tables of read numbers for a given bed file )
# |
# v
# 5.1.subsetBamFiles (calculates proportion to be subsampled for each set and makes .sh scripts to subset bams)
# |
# v
# coverageBed run on samples for specified RIO as part of above script
# |
# v
# 5.2.sampleROIcoverage (assessed coverage then samples consistent regions 100 times for 10,000 regions)
# |
# v
# runPlatypus on sampled regions for subsetted bams
# |
# v
# 5.3.processSampledVCFs (to get somatic variants and count interval sampled by each iteration)
# |
# v
# 5.4.plotSampleDiversity
#
#################### libraries #################
#################### main program #################
sampleList <- read.csv(file="~/PhD/CRCproject/masterSampleList.allSamples.filt.csv", header=FALSE, stringsAsFactors=FALSE)
holdingDir <- "3.samplingAnalysis/platypusCalls/"
regionsDir <- "3.samplingAnalysis/sampledRegions/"
vcfName <- ".somatic.txt"
outDir <- "3.samplingAnalysis/mutationFrequencies/"
setNames <- unique(sampleList[[1]])
setNames <- setNames[-c(17:20, 24)]
orderList <- c(2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,3,3,3)
#sampled interval sizes (from script 5.3.processSampledVCFs)
intFile <- paste(sampleList[1,6], regionsDir,"SeqCap.EZ.totalSizeOfIntervals.txt", sep="")
intervalSizes <- read.table(file=intFile, sep="\t", header=TRUE)
mutFreqMainTab <- data.frame(matrix(NA, ncol=length(setNames), nrow = 20))
names(mutFreqMainTab) <- setNames
#filter each sampled vcf
for(j in 1:length(setNames)){
subSample <- subset(sampleList, sampleList[1]==setNames[j])
sampleNames <- subSample[[2]]
normalName <- sampleNames[subSample[1,7]+1]
noSamples <- subSample[1,8]
sampleNamesNoNor <- sampleNames[sampleNames != normalName]
#diversity table (uses sampled interval sizes to get diversity per Mb)
mutRateTab <- data.frame(matrix(NA, ncol=length(sampleNamesNoNor), nrow=100))
names(mutRateTab) <- sampleNamesNoNor
for(currIter in 1:100){
print(paste("#### assessing sample ", setNames[j], " iteration ", currIter, " ####",sep=""))
noSamples <- subSample[1,8]
confFileName <- paste(subSample[1,6], holdingDir, subSample[1,1],"/", subSample[1,1], ".", currIter, vcfName, sep="")
if(!file.exists(confFileName)){
print(paste("iteration ", currIter," was missed"))
next
}
confFile <- read.table(file=confFileName, sep="\t", header=FALSE, stringsAsFactors = FALSE)
names(confFile) <- c("chrom", "pos", "ref", "alt", paste(sampleNames, ".NR", sep=""), paste(sampleNames, ".NV", sep=""))
#get mutation frequency for each sample
for(currSam in 1:length(sampleNamesNoNor)){
tempTab <- table(confFile[[paste(sampleNamesNoNor[currSam], ".NV", sep="")]] > 0)
if("TRUE" %in% names(tempTab)){
mutRateTab[currIter, sampleNamesNoNor[currSam]] <- as.numeric(tempTab["TRUE"]) / intervalSizes[currIter, 1]
}
}
}
#get mean values
meanValues <- c()
for(getMean in 1:ncol(mutRateTab)){
meanValues[getMean] <- median(mutRateTab[[getMean]], na.rm = TRUE)
}
#order mutations
mutRateTab <- mutRateTab[order(meanValues)]
#store mean mutation frequency
mutFreqTab <- c()
for(getMean in 1:ncol(mutRateTab)){
mutFreqTab[getMean] <- mean(mutRateTab[[getMean]], na.rm = TRUE)
}
mutFreqMainTab[c(1:length(mutFreqTab)), j] <- mutFreqTab
#plot graph
pdf(file = paste(sampleList[1,6], outDir, setNames[j], ".mutFreq.pdf", sep=""), width = (noSamples/2), height = 5)
boxplot(mutRateTab, las=2)
dev.off()
}
#plot barchart of mean mutation frequencies
meanList <- c()
for(k in 1:ncol(mutFreqMainTab)){
meanList[k] <- mean(mutFreqMainTab[[k]], na.rm = TRUE)
}
mutFreqMainTab <- mutFreqMainTab[order(orderList, meanList)]
pdf(file = paste(sampleList[1,6], outDir, "adenomaCarcinoma.mutFreq.pdf", sep=""), width = 5, height = 6)
boxplot(mutFreqMainTab[1:15], las=2)
dev.off()
pdf(file = paste(sampleList[1,6], outDir, "MSIlynch.mutFreq.pdf", sep=""), width = 5, height = 8)
boxplot(mutFreqMainTab[16:19], las=2)
dev.off()
|
026498e053a0b9785d5732f7ef5fce41202519b0
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Pan/k_d4_p/k_d4_p-12/k_d4_p-12.R
|
b659d767cce454f452ef9d27930367ab703aee7b
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 59
|
r
|
k_d4_p-12.R
|
301cdb06fa0de3737e7c45639608a0b3 k_d4_p-12.qdimacs 755 2234
|
5c520f20c9e3c448edd8781163ad0e00e123b8a3
|
7487a5ed8d633a155a19dac2f48b7928651c082c
|
/R/ftrace1.R
|
ac16d5b6f25bd71b5c367e467fb68eac569777cd
|
[] |
no_license
|
tibo31/spatial_flows
|
de5a5b8ca0baf21528390ca86494c0fd1139e7fe
|
965f2452b39db0f641fca8423d58263d45a9f5e2
|
refs/heads/master
| 2021-12-06T20:56:47.566312
| 2021-09-06T07:14:18
| 2021-09-06T07:14:18
| 175,052,297
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
ftrace1.R
|
ftrace1 <- function(w, method = "exact", miter = 10, riter = 50) {
# initialization
stopifnot(method %in% c("exact", "approx"))
n <- nrow(w)
if (method == "exact") {
traces <- numeric(miter)
traces[1] <- sum(diag(w))
w_j <- w
for (j in 2:miter) {
w_j <- w %*% w_j
traces[j] <- sum(diag(w_j))
}
} else {
tmat <- matrix(0, miter, riter)
for (j in 1:riter) {
u <- 2*round(runif(n)) - 1
wu <- u
for (i in 1:miter) {
wu <- w %*% wu
tmat[i, j] <- sum(wu * u)
}
}
traces <- apply(tmat, 1, mean)
traces[1] <- sum(diag(w))
traces[2] <- sum(w * t(w))
}
return(traces)
}
|
f5df82600d510138085cc9555c9e5e2732a03168
|
d482484e19ba097a7f52f55e03a9118d554e6437
|
/man/maps.Rd
|
095f9796efb5171d01272f99b3862b75d8447a5f
|
[] |
no_license
|
marcionicolau/ggswissmaps
|
fa43b31a4d6ea0124ed3fd9e8db0749ffb0280fc
|
daa5190e1d6cfb445e8b9e369d804ce24a50d709
|
refs/heads/master
| 2021-01-16T22:25:58.115304
| 2014-11-06T00:00:00
| 2014-11-06T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 771
|
rd
|
maps.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{maps}
\alias{maps}
\title{A list with 8 maps of Switzerland's territory at various levels.}
\format{A list with 16 ggplot2 maps with swiss territory}
\source{
\url{http://www.bfs.admin.ch/bfs/portal/fr/index/dienstleistungen/geostat/datenbeschreibung.html}
}
\description{
Every element of the list is a ggplot2 object which is
already ready to plot.
\itemize{
\item distretti. Swiss districts
\item comuni. Swiss communes (2014)
\item boh. Don't know
\item boh2. Don't know 2
\item cantoni. Swiss cantons
\item ch. Swiss national boundaries
\item gr. Swiss big regions (NUTS3)
}
}
\examples{
data(maps)
maps[[1]]
names(maps)
maps[["cantoni"]]
str(maps[["cantoni"]])
str(maps[["cantoni"]]$data)
}
|
c5409d13e55d5431719115abffdf6d4753cd3fe4
|
4249ddaf7daa8381627889ba2c03208e53783566
|
/r-package/tests/testthat/test-read_comparable_areas.R
|
2ffac45e5e702d08c8fcd953dedb9db89a664acf
|
[] |
no_license
|
ipeaGIT/geobr
|
04d16b346398485c39788dca202b15ac65099c2a
|
9d7e89efc88871378711086671a11b47940dad47
|
refs/heads/master
| 2023-09-01T19:13:11.811800
| 2023-08-31T01:49:56
| 2023-08-31T01:49:56
| 177,215,782
| 711
| 133
| null | 2023-09-14T12:14:05
| 2019-03-22T22:15:22
|
R
|
UTF-8
|
R
| false
| false
| 783
|
r
|
test-read_comparable_areas.R
|
context("read_comparable_areas")
# skip tests because they take too much time
skip_if(Sys.getenv("TEST_ONE") != "")
testthat::skip_on_cran()
# Reading the data -----------------------
test_that("read_comparable_areas", {
# read data
amc <- read_comparable_areas(start_year=1970, end_year=2010)
# check sf object
testthat::expect_true(is(amc, "sf"))
# check number of micro
testthat::expect_equal( nrow(amc), 3800)
})
# ERRORS and messagens -----------------------
test_that("read_comparable_areas", {
# Wrong year
testthat::expect_error( read_comparable_areas(start_year=1, end_year=2010) )
testthat::expect_error( read_comparable_areas(start_year=1970, end_year=2) )
testthat::expect_error( read_comparable_areas(start_year=1970, end_year=1900) )
})
|
243cee61af8f8c07f18d4ae19b01e0f461fbacca
|
cac62fef42d5c17354656fb435ccada0fcaabff2
|
/plot3.R
|
53d9a18f5050b2bb189bb469dc4d76a098f3eb6d
|
[] |
no_license
|
Karamcse/ExData_Plotting1
|
ddff28d378724e08bca0389c577845121dc71b59
|
d128fc2027b39499fec1fb6e8184b14dc763c848
|
refs/heads/master
| 2020-12-11T05:31:43.186266
| 2014-06-08T14:13:01
| 2014-06-08T14:13:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 519
|
r
|
plot3.R
|
source("./readdata.R")
## Create Time Series On Energy Sub Metering
par(mar=c(2,2,1,1))
par(pin=c(6.4,6.4))
par(bg="transparent")
png(filename="./figure/Plot3.png")
plot(data$Sub_metering_1~data$DateTime,type="l",ylab="Energy sub metering",xlab="")
with(data,points(data$Sub_metering_2~data$DateTime,type="l",col="red"))
with(data,points(data$Sub_metering_3~data$DateTime,type="l",col="blue"))
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
835e915195abe9ab20cdc8de607d024a31c7e6ce
|
6203a082bca039d28155b25a080b281a981a077f
|
/man/us_pres_polls_history.Rd
|
6e429f1cf157542c78faaf6034417dec870116ca
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
elliottmorris/politicaldata
|
cd50c1b1b6c9f64d1a4d6d15d563e223bb19ef10
|
044048ea5d4941da95d3f3a225337275547241f7
|
refs/heads/master
| 2020-04-18T21:02:52.559364
| 2019-08-10T13:38:32
| 2019-08-10T13:38:32
| 167,754,489
| 144
| 14
|
NOASSERTION
| 2019-02-22T22:46:53
| 2019-01-27T00:36:56
|
R
|
UTF-8
|
R
| false
| true
| 445
|
rd
|
us_pres_polls_history.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{us_pres_polls_history}
\alias{us_pres_polls_history}
\title{Every presidential general election poll from 1980 through 2016}
\format{An object of class \code{data.frame} with 3586 rows and 10 columns.}
\usage{
data(us_pres_polls_history)
}
\description{
Every presidential general election poll from 1980 through 2016
}
\keyword{datasets}
|
9520d04dab69bfbaef4a7acc69a374e1e55fc98d
|
471fcd08e3b189fe9dc0630e11c5abc6e92f16eb
|
/man/ggplot_boxplot.Rd
|
07171c276e43e7c4ec2dc74c5f1cccc4f9c97123
|
[
"MIT"
] |
permissive
|
freestatman/shinyngs
|
6d776667dc9b9637f10fa654021de3aba2107364
|
ffb6900bfe5ed96ae3b42d877d71c01ff507f4d1
|
refs/heads/master
| 2021-06-01T01:19:53.949901
| 2016-07-04T16:58:09
| 2016-07-04T16:58:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 806
|
rd
|
ggplot_boxplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxplot.R
\name{ggplot_boxplot}
\alias{ggplot_boxplot}
\title{Make a boxplot with coloring by experimental variable}
\usage{
ggplot_boxplot(plotmatrix, experiment, colorby = NULL,
expressiontype = "expression", whisker_distance = 1.5)
}
\arguments{
\item{plotmatrix}{Expression/ other data matrix}
\item{experiment}{Annotation for the columns of plotmatrix}
\item{colorby}{Column name in \code{experiment} specifying how boxes should be colored}
\item{expressiontype}{Expression type for use in y axis label}
}
\value{
output A \code{ggplot} output
}
\description{
A simple function using \code{ggplot2} to make a sample boxplot
}
\examples{
ggplot_boxplot(selectMatrix(), selectColData(), colorBy())
}
\keyword{keywords}
|
bbb7e987b30e783e75b8d5f8a7af006051d891f1
|
db856c7b812645e310b953de50b9025e4b91be14
|
/plot4.R
|
0c426aa48f6da389d7f06349f2db14ead2ef38ec
|
[] |
no_license
|
oarnold1/ExData_Plotting1
|
5a4e1362240017ef3b088ffd3812ad6a95cab9aa
|
1dda63478d69293f35ea13e0bad18172942da216
|
refs/heads/master
| 2021-09-02T01:01:21.379024
| 2017-12-29T14:23:30
| 2017-12-29T14:23:30
| 115,650,199
| 0
| 0
| null | 2017-12-28T18:40:19
| 2017-12-28T18:40:19
| null |
UTF-8
|
R
| false
| false
| 1,794
|
r
|
plot4.R
|
plot4 <- function(){
#This function creates plot4 of the Week1 assignment
#load the data (whole data, memore capacity is enough)
household_data <- read.csv('household_power_consumption.txt', sep = ';', stringsAsFactors = FALSE, dec = '.')
#convert the date format
household_data$Date <- as.Date(household_data$Date, format = '%d/%m/%Y')
#Select only the two dates 2007-02-01 and 2007-02-02
Lvec <- household_data$Date == '2007-02-01' | household_data$Date == '2007-02-02'
household_data <- household_data[Lvec, ]
date_and_time <- strptime(paste(household_data$Date, household_data$Time, sep = " "), "%Y-%m-%d %H:%M:%S")
#create the plot
png(filename = './plot4.png', width = 480, height = 480)
par(mfrow = c(2, 2))
#1
plot(date_and_time, as.numeric(household_data$Global_active_power), xlab = "", type = 'l', ylab = 'Global Active Power')
#2 (it's actually bad to plot Voltage without units, but the default plot shows this in this way)
plot(date_and_time, as.numeric(household_data$Voltage), xlab = "datetime", type = 'l', ylab = 'Voltage')
#3
plot(date_and_time, as.numeric(household_data$Sub_metering_1), xlab = "", type = 'l', ylab = 'Energy sub metering', col = 'black')
lines(date_and_time, as.numeric(household_data$Sub_metering_2), type = 'l', col = 'red')
lines(date_and_time, as.numeric(household_data$Sub_metering_3), type = 'l', col = 'blue')
legend('topright', legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lty = 1, bty = 'n', col=c('black', 'red', 'blue'))
#4
plot(date_and_time, as.numeric(household_data$Global_reactive_power), xlab = "", type = 'l', ylab = 'Global_reactive_power')
dev.off()
}
|
b0796cb704d90673e6667e879b986c33285511c6
|
7d0dd16c6c2aec1fc2ae4c354bd59062d4c9ee35
|
/feature_selection_HDorado.R
|
725901465c70ea3b4914fb2efae71925660daca4
|
[] |
no_license
|
haachicanoy/r_scripts
|
f1be2780d95d934b1a7821ca4c1fdaa0d4f22dfa
|
c59b6f0765b031419b2b0ba0a5a43550c0252326
|
refs/heads/master
| 2022-07-27T14:19:53.277622
| 2022-07-07T19:22:51
| 2022-07-07T19:22:51
| 63,341,887
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,311
|
r
|
feature_selection_HDorado.R
|
# Feature selection via filters,wrappers and embedded algorithms
# Hugo Andres Dorado B.
# 2018 30 08
# Load Libraries
library(here)
library(FSelector)
library(lsr)
library(caret)
# Load feature selection funs
source("C:/Users/hadorado/Desktop/Feature_Selection/feature_selection_FUNS.R")
# Load dataset
chiapas_maiz <- read.csv(here::here('Chiapas_Maiz.csv'),row.names = 1)
# Define the model that will be process with feature selection
performance_test <-
function(data_set,subset){
inputs <- data_set[,-ncol(data_set)]
output <- data_set[,ncol(data_set)]
tr_data <-
train(x=inputs[subset],y=output,method = 'rf',
trControl = trainControl(method = 'cv',number = 5))
tr_sults <- tr_data$results[c('RMSE', 'Rsquared')]
tr_sults[which.max(tr_sults$RMSE),]
}
#----------------------------------FILTERS-------------------------------------#
# Run the filters based in entropy
entIdx_chiapas_maiz <- entropyIndexes(chiapas_maiz)
filters_chiapas_maiz <- evaluateFilters(entIdx_chiapas_maiz,chiapas_maiz)
filts <- c("GI","GR","SU")
# Generate the best solutions
bestSelectionsFilters <-
do.call(rbind,
lapply(1:length(filters_chiapas_maiz),
function(x){
fc <- filters_chiapas_maiz[[x]]
newfc <- do.call(rbind,fc)
perform <- data.frame(nvars = 1:nrow(newfc),
newfc )
df <- data.frame(FS=filts[x],perform[ which.min(perform$RMSE),])
data.frame(df,features= paste(entIdx_chiapas_maiz[[x]][,df$FS],collapse=","))
}
)
)
#----------------------------------WRAPPER-------------------------------------#
datasetWrapper <- chiapas_maiz
# Define the function based in the performance index that will be optimized in
# the wraper
evaluator <- function(subset)
{
pt <- performance_test(datasetWrapper,subset)
-pt$RMSE # By defaul always try to maximice
}
# Forwad
set.seed(123)
forwardSearch_chiapas_maiz <- forward.search(names(datasetWrapper)[-ncol(datasetWrapper)],evaluator)
# Backward
set.seed(123)
backward.search_chiapas_maiz <- backward.search(names(datasetWrapper)[-ncol(datasetWrapper)],evaluator)
# Hill climbing
set.seed(123)
hill.climbing.search_chiapas_maiz <- hill.climbing.search(names(datasetWrapper)[-ncol(datasetWrapper)],evaluator)
#---------------------------------EMBEDDED-------------------------------------#
# Preprocess
dummies <- dummyVars(rendimiento_final ~ ., data = dataset) # Dummy vars
dataset_bin <- predict(dummies, newdata = dataset)
inputs <- dataset_bin[,-ncol(dataset_bin)]
output <- dataset_bin[,ncol(dataset_bin)]
inputs <- inputs[,-nearZeroVar(inputs)] # Remove near zero variables
# Train the model least absolute shrinkage and selection operator
set.seed(123)
tr_data_lasso <-
train(x=inputs,y=output,method = 'lasso',
trControl = trainControl(method = 'cv',number = 5),preProcess = "range")
lasso_chiapas <-
data.frame(tr_data_lasso$results[which.max(tr_data_lasso$results$RMSE),][2:3],
numvars = ncol(inputs),selector='lasso')
# Compare the performances, and choose the best according your
# objectives: the best good of fitness, less variables,...
# Proceed with the REGRESSION METHOD with the new subset of variables selected
|
3b48456b1707b8a8ad246ce7373956c510904cdf
|
ff2b418f76f82ecdd399951f9750fe3386f834f6
|
/week_11/course_11/nnHidden.R
|
3823daf39d970fa6325bd97616471f602d6801ed
|
[] |
no_license
|
b03602023/1062CSX_project
|
545b636331e2038a2df578754f90fe27fecd204e
|
e42ed5911830b5c1641638998ad9d624f6db0f7c
|
refs/heads/master
| 2021-01-25T10:49:47.428765
| 2018-06-25T02:00:53
| 2018-06-25T02:00:53
| 123,371,347
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 972
|
r
|
nnHidden.R
|
#載入套件
library(neuralnet)
#整理資料
data <- iris
data$setosa <- ifelse(data$Species == "setosa", 1, 0)
data$versicolor <- ifelse(data$Species == "versicolor", 1, 0)
data$virginica <- ifelse(data$Species == "virginica", 1, 0)
# Conditional Element Selection
# ifelse(test, yes, no)
# ifelse returns a value with the same shape as test which is filled with elements selected from either yes or no
# depending on whether the element of test is TRUE or FALSE.
#訓練模型
f1 <- as.formula('setosa + versicolor + virginica ~ Sepal.Length + Sepal.Width + Petal.Length + Petal.Width')
bpn <- neuralnet(formula = f1, data = data, hidden = c(2,4),learningrate = 0.01,linear.output = F)
print(bpn)
#圖解BP
plot(bpn)
#Keeping a linear output in the final layer of a neural network is normally
#used in regression settings only; in classification settings,
#such as yours, the correct choice
#is to apply the activation function to the output neuron(s) as well.
|
afd9c1fac4528bdd646280948aef8917e13da9f1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rfast2/examples/Merge.Rd.R
|
f5e57e40fd3fc3a94aa209af09f0152f75174385
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 270
|
r
|
Merge.Rd.R
|
library(Rfast2)
### Name: Merge 2 sorted vectors in 1 sorted vector
### Title: Merge 2 sorted vectors in 1 sorted vector
### Aliases: Merge
### Keywords: Merge 2 sorted vectors in 1 sorted vector
### ** Examples
x <- 1:10
y <- 1:20
Merge(x,y)
x <- y <- NULL
|
f8294d758dac6cc9a4d07928cc1c9d51190006cf
|
6ae2d6b27576cc8c75a7e02256db410e5007a8b2
|
/tests/testthat/test_1-0-0-major-drop_colr.R
|
0da03e1f1aef574de7ea8bf6ff4b63e3883d4cc9
|
[] |
no_license
|
HughParsonage/hutils
|
3c2cec1b1a01179219deb47df0dc9e6a3a127435
|
11e6f828876bbc87a42d43a0ee8084ee6d9a6765
|
refs/heads/master
| 2023-02-20T04:27:01.996815
| 2023-02-10T05:18:04
| 2023-02-10T05:18:04
| 88,268,552
| 11
| 2
| null | 2022-04-13T08:31:57
| 2017-04-14T13:09:05
|
R
|
UTF-8
|
R
| false
| false
| 205
|
r
|
test_1-0-0-major-drop_colr.R
|
context("Major drop_colr")
test_that("Outcomes", {
library(data.table)
DT <- data.table(abcde_ghi = 1, STE_2016 = 2)
DT_out <- drop_colr(DT, "[A-Z]{3}")
expect_equal(names(DT_out), "abcde_ghi")
})
|
f58bdc89b2ab363914bbd8a97f5304c51369a873
|
1c139b9a35bae97bd11e900911c0226db693941c
|
/R/safe-callback.R
|
fd4ed53cc00aec45d453316896f3272ef4166225
|
[] |
no_license
|
danconsults/httr
|
70065a06968261d85ee101e597747b129862482d
|
0f2fb5f0fdadb4270b4a76b9d532b76848b4813b
|
refs/heads/master
| 2021-01-18T02:06:04.116566
| 2014-09-19T17:22:14
| 2014-09-19T17:30:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 681
|
r
|
safe-callback.R
|
#' Generate a safe R callback.
#'
#' Whenever an R callback function is passed to Rcurl, it needs to be wrapped
#' in this handler which converts errors and interrupts to the appropriate
#' values that cause RCurl to terminate a request
#'
#' @param f A function.
#' @export
#' @example
#' f1 <- function(x) {
#' if (x < 0) stop("Negative value")
#' sqrt(x)
#' }
#' f2 <- safe_callback(f1)
#' f2(-10)
safe_callback <- function(f) {
force(f)
function(...) {
tryCatch(f(...),
error = function(e, ...) {
message("Error:", e$message)
1L
},
interrupt = function(...) {
message("Interrupted by user")
1L
}
)
}
}
|
790d17ff5aa29e524b62c7723ffa04c6be157441
|
d366b7cb61a8f897d4eabf9cdc319d287f3e2f48
|
/man/send_to_mysql.Rd
|
3018bccfe445f6773568b3ff17d9a311e406d3c8
|
[
"MIT"
] |
permissive
|
rparrish/GAMUT
|
ee35c83065e93235b4998bf932f2a5c5ec29ee9c
|
16e0309e8d080443e962234aebd4e8931a4dc23d
|
refs/heads/master
| 2021-11-19T15:29:17.860960
| 2021-09-11T02:12:48
| 2021-09-11T02:12:48
| 28,320,390
| 0
| 1
|
MIT
| 2021-09-10T22:54:21
| 2014-12-22T01:39:59
|
R
|
UTF-8
|
R
| false
| true
| 347
|
rd
|
send_to_mysql.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/send_to_mysql.R
\name{send_to_mysql}
\alias{send_to_mysql}
\title{send_redcap_to_mysql.R}
\usage{
send_to_mysql()
}
\value{
a list - table has the raw data and plot has the dotplot
}
\description{
updates the GAMUT dashboard database tables
}
\author{
Rollie Parrish
}
|
2d630d315309d677786be3a32389cab0e841ab4d
|
be91ea534bf267ddf39b47eb4ff1c1a4d6034144
|
/ui.R
|
a56b9449751389c40719934aec556075beed0712
|
[] |
no_license
|
pclivingstone/GETmap
|
df695c3bdc7fa3b2c7dbea9a478b956ad3bdcb83
|
553afa726f0db270793aed3b8996dfe1e952413c
|
refs/heads/master
| 2020-05-05T13:49:52.872326
| 2019-10-25T03:00:42
| 2019-10-25T03:00:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
ui.R
|
# ui.R
ui <- fluidPage(
titlePanel("Installed Solar Panel Capacity by Postcode")
# ,sidebarPanel(
# dateInput(
# 'month'
# ,'Select month to display:'
# ,value = '2019-02-01'
# ,format = 'yyyy-mm'
# )
# ,textInput('postcode','Select postcode to centre:',value = '1234')
# )
,mainPanel(
tabsetPanel(
tabPanel(
'Interactive Map'
,leafletOutput(
"mymap"
,height = 700
# ,height = '100%'
)
)
,tabPanel(
'Table'
,dataTableOutput('table')
)
,tabPanel(
'Motion Chart'
,htmlOutput('chart')
)
)
)
)
|
3f790d12f44ab9f0aa7deaa551a1cac2574ec90b
|
cd245e28506103210e2557b0ce4d80b228899f5b
|
/Final_Code.R
|
13da69279fcfe51e6b9883f7666c3158c8ff1987
|
[] |
no_license
|
arnavd17/Spotify-Song-Genre-Predictor
|
62274cd7339254bcd092d801515387e902740a7e
|
1e0f2b7427ec87f996e467c045cf99d675fbd5f6
|
refs/heads/master
| 2021-05-24T14:43:44.916245
| 2020-04-15T01:33:30
| 2020-04-15T01:33:30
| 253,610,449
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,359
|
r
|
Final_Code.R
|
rm(list = ls())
library(readr)
library(kknn)
library(gbm)
library(mosaic)
set.seed(79643)
df_spotify <- read_csv("songDb.csv")
genres <- read_table("genre_dataset.txt", col_names = c('Genre'))
#Data cleaning
df_clean <- na.omit(df_spotify)
drop <- c('Uri','Name','Ref_Track', 'time_signature', 'Type', 'URL_features')
df_clean <- df_clean[, !(names(df_clean)%in%drop)]
df_clean <- df_clean[(df_clean$Duration_ms > 120000),]
dim(df_clean)
#We selected a couple of genres and used them to classify our data based on the content of the actual Genre column
df_genre <- df_clean
genres_aux = c('rock', 'pop', 'country','classical','hiphop','jazz','blues')
m_genre = matrix(data = NA, nrow = nrow(df_genre), ncol = 7)
colnames(m_genre) = genres_aux
for (x in genres_aux) { #if the column Genre contains the genres we seleceted it will place a 1 in the relevant column
df_genre[grepl(x,df_genre$Genre),x] <- 1
df_genre[!grepl(x,df_genre$Genre),x] <- 0
}
df_genre2 <- df_genre[rowSums(df_genre[15:21])==1,]
for (x in genres_aux){ #We keep only the ones that are part of 1 column and use that genre
df_genre2[df_genre2[x]==1, 'genre'] = x
}
df_genre2 <- df_genre2[,!(names(df_genre2) %in% genres_aux)]
df_genre2$genre = factor(df_genre2$genre)
df_genre2$Tempo = as.numeric(df_genre2$Tempo)
dim(df_genre2)
summary(df_genre2)
unique.levels <- sort(unique(df_genre2$genre))
count <- table(df_genre2$genre)
count.df <- data.frame(unique.levels, count)
#Now plot:
plot <- ggplot(count.df, aes(unique.levels, Freq, fill=unique.levels))
plot + geom_bar(stat="identity") +
labs(title="Genre Count",
subtitle="count for every genre in the dataset",
y="Count", x="Genre") +
theme(legend.position="none")
## NEW DATA SET
r=df_genre2[df_genre2$genre=='rock',][1:1171,]
p=df_genre2[df_genre2$genre=='pop',][1:1171,]
h=df_genre2[df_genre2$genre=='hiphop',][1:1171,]
b=df_genre2[df_genre2$genre=='blues',][1:1171,]
co=df_genre2[df_genre2$genre=='country',][1:1171,]
cl=df_genre2[df_genre2$genre=='classical',][1:1171,]
j=df_genre2[df_genre2$genre=='jazz',][1:1171,]
songs=data.frame(rbind(r,p,h,b,co,cl,j))
# RUN THIS TO CHANGE TO BALANCED DATASET
df_genre2 <- songs
### Train - Test division
train = .7
train_sample = sample(1:nrow(df_genre2), nrow(df_genre2)*train)
train_df = df_genre2[train_sample,]
test_df = df_genre2[-train_sample,]
### Scale for KNN and other methods
scale_train_df <- train_df
scale_train_df[,-c(12,14,15)]<- scale(train_df[,-c(12,14,15)])
scale_test_df <- test_df
scale_test_df[,-c(12,14,15)] <- scale(test_df[,-c(12,14,15)])
### KNN ###
#General model using all the variables and cross validation to find K
model_knn <- train.kknn(genre~(.-ID-Mode-Genre), data=scale_train_df, kmax = 100, kcv = 10)
model_knn$best.parameters
final_model = kknn(genre~(.-ID-Mode-Genre),train_df,test_df,k= model_knn$best.parameters$k, kernel = "rectangular")
sum(diag(table(final_model$fitted.values, test_df$genre)))/length(final_model$fitted.values)
#Using only the most important variables we found and cross validation for K
model_knn2 <- train.kknn(genre~(Danceability + Energy + Loudness + Speechness+Acousticness+Instrumentalness+Valence+Liveness+Duration_ms), data=scale_train_df, kmax = 100, kcv = 10)
model_knn2$best.parameters
final_model2 = kknn(genre~(Danceability + Energy + Loudness + Speechness+Acousticness+Instrumentalness+Valence+Liveness+Duration_ms),train_df,test_df,k= model_knn$best.parameters$k, kernel = "rectangular")
sum(diag(table(final_model2$fitted.values, test_df$genre)))/length(final_model2$fitted.values)
### TREE ###
# Classification tree
library(tree)
set.seed(79643)
# choose the x,y columns and build a tree model
data = read.csv("train.csv")
df=data[,c(1:11,13,15)]
attach(df)
set.seed(79643)
train = sample(1:nrow(df), nrow(df)*0.02)
tree.music=tree(genre~.,data=df, subset = train)
summary(tree.music)
plot(tree.music)
text(tree.music,pretty=0)
MSE=NULL
#estimate the test error using test dataset
test=read.csv("test.csv")
test = test[,c(1:11,13,15)]
tree.pred = predict(tree.music,test, type = 'class')
result = data.frame(test$genre,tree.pred)
result[result$tree.pred == result$test.genre,"Equal"] <- 1
accuracy_tree = nrow(subset(result, result$Equal == 1)) / nrow(result)
accuracy_tree
# Prune the tree model
prun.tree=prune.tree(tree.music,best=8)
# plot the prune tree
plot(prun.tree,type="uniform")
text(prun.tree,pretty=0)
# estimate the test error of prune tree using test dataset
pruntree.pred = predict(prun.tree,test, type = 'class')
result = data.frame(test$genre,pruntree.pred)
result[result$pruntree.pred == result$test.genre,"Equal"] <- 1
accuracy_pruntree = nrow(subset(result, result$Equal == 1)) / nrow(result)
accuracy_pruntree
### BAGGING ###
library(randomForest)
set.seed (79643)
Accuracy_bagging = NULL
ntree <-c(50,100,200,500,2000)
# try trees = 50,100,200,500,2000 with all the variables
for (i in ntree){
bag.music =randomForest(genre ~ Danceability + Energy + Key + Loudness + Mode + Speechness + Acousticness + Instrumentalness + Liveness + Valence + Tempo + Duration_ms, data=train_df ,
mtry=12, ntree = i)
# predict reuslt and calculate accuracy rate
yhat.bag = predict(bag.music,newdata = test_df)
aux = mean( yhat.bag == test_df$genre)
# get a list of accuracy rates
Accuracy_bagging = c(Accuracy_bagging,aux)
}
# plot number of trees versus accuracy rates
plot(ntree, Accuracy_bagging,type="b",xlab="ntree",col="blue",ylab="Accuracy",lwd=2,cex.lab=1.2, main = "ntree vs. Accuracy")
# get highest accuracy rate
Accuracy_bagging[which.max(Accuracy_bagging)]
set.seed (79643)
# training model with the optimal number of trees and splits
model_bagging<-randomForest(genre~.-ID-Genre,data=train_df,ntree=2000,mtry=12,importance=TRUE)
# To check important variables
importance(model_bagging)
varImpPlot(model_bagging)
### BOOSTING ###
# cross validation was left out of final code due to the time it took (we ran a nested for loops for n.trees and shrinkage)
ntrees=1000
boostfit = gbm(genre~.-ID-Genre-Key-Mode,data=train_df,distribution='multinomial', #Multinomial makes the tree take longer to run but it's the output we need
interaction.depth=5,n.trees=ntrees,shrinkage=.01)
pred = predict(boostfit,newdata=test_df,n.trees=ntrees, type = 'response')
df_p = data.frame(pred)
colnames(df_p) = c('blues', 'classical','country','hiphop','jazz','pop','rock')
df_p['genre'] = colnames(df_p)[apply(df_p,1,which.max)] #gets the predicted genre out of the top probability
sum(diag(table(test_df$genre, df_p$genre)))/length(test_df$genre)
### RANDOM FOREST ###
#fit random forest and plot variable importance
# array of number of tree values to use
ntr<-c(50,200,500,2000,5000)
max_acc=0
# Training model with different number of trees and splits to get the optimal values for each
for (n in ntr){
a=c()
i=5
for (i in 3:8) {
model_rf <- randomForest(genre~.-ID-Genre, data = train_df, ntree = n, mtry = i, importance = TRUE)
predValid <- predict(model_rf, test_df, type = "class")
a[i-2] = mean(predValid == test_df$genre)
if (a[i-2]>max_acc){
max_acc=a[i-2]
opt_tree=n
opt_m=i
}
}
print(paste0('Number of trees: ',n))
print(a)
}
# training model with the optimal number of trees and splits
model_rf<-randomForest(genre~.-ID-Genre,data=train_df,ntree=opt_tree,mtry=opt_m,importance=TRUE)
# To check important variables
importance(model_rf)
# plotting the importance of predictors
varImpPlot(model_rf)
# testing for completely new data
s<-data.frame("Danceability"= 0.326,
"energy"= 0.0993,
"key"= 7,
"loudness"= -22.496,
"mode"= 1,
"Speechness"= 0.072,
"acousticness"= 0.988,
"Instrumentalness"= 0.916,
"Liveness"= 0.108,
"Valence"= 0.417,
"Tempo"=137.274,
"Duration_ms"=150973)
# predicting the genre
predValid<-predict(model_rf_4,newdata=s)
predValid
# model<- c('KNN','Classification Tree','Bagging', 'Random Forest', 'Boosting')
# skew <- c(0.5388, 0.4495, 0.6015, 0.61, 0.5652)
# balanced <-c(0.5293, 0.4682, 0.6199, 0.6310, 0.5988)
#
# df_model <- data_frame(model,skew,balanced)
# df_model
# ggplot(df_model, aes(x=model)) +
# geom_bar(aes(...))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.