blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5f1fe6858eb23b533680212312620241fdcb75d
|
a03da6a1edc7b1a1cf4b0829f5ece771f584df95
|
/man/theme.rpres.Rd
|
127c87360df31fa590bad3c09b3ad2fd2d4ac836
|
[] |
no_license
|
homerhanumat/tigerstats
|
4fbcc3609f46f6046a033d17165f7838dbd77e1a
|
17067f7e5ec6b6cf712b628a4dbf5131c691ae22
|
refs/heads/master
| 2021-07-06T06:24:07.716196
| 2020-09-22T15:24:01
| 2020-09-22T15:24:01
| 15,921,287
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 728
|
rd
|
theme.rpres.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/theme.R
\name{theme.rpres}
\alias{theme.rpres}
\title{Lattice Theme or R Presentations}
\usage{
theme.rpres()
}
\value{
Returns a list to be supplied as the \code{theme} to the \code{lattice} function
\code{\link{trellis.par.set}()}.
}
\description{
Modifies the current theme for use with lattice graphics in R Presentation dicuments. Increases size of title,
axis lables and axis numbers, thickens some lines, etc.
}
\note{
Deprecated in favor of \code{themerpres()}. May not appear in future versions.
}
\examples{
trellis.par.set(theme=theme.rpres())
}
\seealso{
\code{\link{trellis.par.set}}, \code{\link{show.settings}}
}
\keyword{graphics}
|
693e9186ab8c216f8a7f7d50ee28330630716fd5
|
1b67115132aee53bad61fa6fb2198685090e6754
|
/server.R
|
d9a18c8ffd48014727ea3ec979b918f1a2c84fed
|
[] |
no_license
|
michelbouchou/shinyApp-PML
|
4cc3e6026473cdd16cb0572be4a324ce0554d641
|
460d844a30513dbdbfd7e27a1ab4d0b68af34dd4
|
refs/heads/master
| 2021-01-01T06:10:10.430583
| 2017-07-16T11:18:26
| 2017-07-16T11:18:26
| 97,376,955
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 564
|
r
|
server.R
|
library(shiny)
library(scales)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$plot <- renderPlot({
set.seed(18)
nbPoints <- input$numeric
opacity <- input$opacity
dataX <- runif(nbPoints, min = -100, max = 100)
dataY <- runif(nbPoints, min = -100, max = 100)
dataType <- ifelse(input$showColor, "blue", "black")
df <- data.frame(X = dataX, Y = dataY)
plot(df$X, df$Y, col = alpha(dataType, opacity), pch = 16)
})
})
|
a89fa72d18313cf0b31e4262e1725c2f28c84436
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stepPenal/examples/stepaic.Rd.R
|
9b3eb1eaa38abb464ae3bf6e727a255c88d5b928
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
stepaic.Rd.R
|
library(stepPenal)
### Name: stepaic
### Title: Stepwise forward variable selection based on the AIC criterion
### Aliases: stepaic
### ** Examples
## Not run:
##D set.seed(14)
##D beta <- c(3, 2, -1.6, -4)
##D noise <- 5
##D simData <- SimData(N=100, beta=beta, noise=noise, corr=FALSE)
##D
##D stepaicfit <- stepaic(Data=simData)
##D stepaicfit
## End(Not run)
|
50a22f32bae437457a76d178043b10f1f93c1809
|
1630e1bf37845810280c6ddb89cb84315d8063e4
|
/epis_analysis/epis_analysis.R
|
bb706b0efa27484c83975743236b783e74a56487
|
[] |
no_license
|
ivanovaos/PetriNetExhaustiveSimulator
|
1b6cd067f4e4fc327ac4aac77abc08e465bc4f0b
|
d7c2108432aa8a38d9283cc3e8007584e40b2ca3
|
refs/heads/master
| 2020-04-07T23:24:36.736304
| 2018-12-01T16:01:16
| 2018-12-01T16:01:16
| 158,811,398
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,300
|
r
|
epis_analysis.R
|
# this script takes care of output from exhasutive modeling of models
# Saman Amini
# Collect arguments
args <- commandArgs(TRUE)
# Default setting when no argument passed
if (length(args) < 1) {
args <- c("--help")
}
# Help section
if ("--help" %in% args) {
cat("
modeling_downstream.R
script to parse through results of simulation and get petrinets that show inversion.
Arguments:
--arg1=full path of folder you want to parse - /hpc/dbg_gen/samini/gi_kp_gstf/modeling/simulation_results/models_row_1
\n")
q(save="no")
}
path <- args[1]
print(path)
##########################################
# functions to do epis analysis.
##########################################
parse_sim_lines <- function(petri_net) {
model_num <- petri_net[1]
transitions <- petri_net[2]
results <- petri_net[3]
results_parse <- unlist(strsplit(results, split = ":"))
petri_net_results <- as.data.frame(matrix(nrow = 4, ncol = 2))
colnames(petri_net_results) <- c("G1", "G2")
rownames(petri_net_results) <- c("wildtype", "R1", "R2", "R1_R2")
# get number of tokens from output of the simulation
wildtype <- unlist(strsplit(results_parse[1], split = " "))
petri_net_results[1,] <- wildtype
r1_Deletion <- unlist(strsplit(results_parse[3], split = " "))
petri_net_results[2,] <- r1_Deletion
r2_Deletion <- unlist(strsplit(results_parse[2], split = " "))
petri_net_results[3,] <- r2_Deletion
r1_r2_Deletion <- unlist(strsplit(results_parse[4], split = " "))
petri_net_results[4,] <- r1_r2_Deletion
return(petri_net_results)
}
FC_and_GI_calc <- function(petri_net_r) {
petri_net_r$G1 <- as.numeric(petri_net_r$G1)
petri_net_r$G2 <- as.numeric(petri_net_r$G2)
petri_net_r <- petri_net_r + 1
petri_net_results_FC <- petri_net_r
for(row in 1:nrow(petri_net_r)) {
petri_net_results_FC[row,] <- log2(petri_net_r[row,]/petri_net_r["wildtype", ])
}
petri_net_results_FC <- petri_net_results_FC[-1,]
# calculate sgi score for G1 and G2 and get those ones that are significant
petri_net_results_FC["sgi","G1"] <- petri_net_results_FC["R1_R2","G1"] - (petri_net_results_FC["R1","G1"] + petri_net_results_FC["R2","G1"])
petri_net_results_FC["sgi","G2"] <- petri_net_results_FC["R1_R2","G2"] - (petri_net_results_FC["R1","G2"] + petri_net_results_FC["R2","G2"])
return(petri_net_results_FC)
}
direction_of_changes <- function(petri_net_results_FC) {
deletions <- c("R1", "R2", "R1_R2")
genes <- c("G1", "G2")
for (gene in genes) {
direction_all <- c()
for (del in deletions) {
if (petri_net_results_FC[del, gene] < -log2(1.7)) {
if (del == "R1_R2") {
direction <- "DOWN"
} else {
direction <- "down"
}
}
else if (petri_net_results_FC[del, gene] > log2(1.7)) {
if (del == "R1_R2") {
direction <- "UP"
} else {
direction <- "up"
}
}
else {
if (del == "R1_R2") {
direction <- "NO"
} else {
direction <- "no"
}
}
direction_all <- cbind(direction_all, direction)
direction_all <- paste(direction_all, collapse = ".")
}
petri_net_results_FC["direction", gene] <- direction_all
}
if (petri_net_results_FC["sgi", "G1"] < 0) petri_net_results_FC["direction", "G1"] <- paste(petri_net_results_FC["direction", "G1"], "negGI", sep = "-")
if (petri_net_results_FC["sgi", "G1"] > 0) petri_net_results_FC["direction", "G1"] <- paste(petri_net_results_FC["direction", "G1"], "posGI", sep = "-")
if (petri_net_results_FC["sgi", "G2"] < 0) petri_net_results_FC["direction", "G2"] <- paste(petri_net_results_FC["direction", "G2"], "negGI", sep = "-")
if (petri_net_results_FC["sgi", "G2"] > 0) petri_net_results_FC["direction", "G2"] <- paste(petri_net_results_FC["direction", "G2"], "posGI", sep = "-")
return(petri_net_results_FC)
}
filter_misc_patterns <- function(petri_net_results_FC) {
patterns <- c("buffering",
"quantitative buffering",
"suppression",
"quantitative suppression",
"masking",
"inversion")
for (pattern in patterns) {
for (gene in c("G1", "G2")) {
double <- as.numeric(petri_net_results_FC["R1_R2", gene])
single_R1 <- as.numeric(petri_net_results_FC["R1", gene])
single_R2 <- as.numeric(petri_net_results_FC["R2", gene])
if ((petri_net_results_FC["epis", gene] == pattern) & (grepl("posGI", petri_net_results_FC["direction", gene]) == TRUE)) {
if ((double < single_R1) & (double < single_R2)) {
petri_net_results_FC["epis", gene] <- "misc_sim"
#print(petri_net_results_FC)
}
}
if ((petri_net_results_FC["epis", gene] == pattern) & (grepl("negGI", petri_net_results_FC["direction", gene]) == TRUE)) {
if ((double > single_R1) & (double > single_R2)) {
petri_net_results_FC["epis", gene] <- "misc_sim"
#print(petri_net_results_FC)
}
}
}
}
return(petri_net_results_FC)
}
##########################################
# main script
##########################################
epis_patterns <- read.delim("/hpc/dbg_gen/samini/repos/exhaustivepetrinetsim/epis_analysis/Transcription-GI-types_v2.txt") # read epistatic patterns
# read and parse files
sim_files <- list.files(path, pattern = "[0-9].txt", full.name = T)
#inv_all <- as.data.frame(matrix(NA, nrow = 0, ncol = 3))
genes <- c("G1", "G2")
for (sim_file in sim_files) {
f <- sprintf("working now on: %s", sim_file)
print(f)
abs_sim_results <- read.delim(sim_file, header = F)
m_epis_all <- as.data.frame(matrix(NA, nrow = 0, ncol = 3))
sgi_scores <- vector(mode = "numeric", length = 0)
for (petri in 1:nrow(abs_sim_results)) {
#print(petri)
petri_net <- unlist(strsplit(as.character(abs_sim_results[petri,]), split = ";"))
model_num <- petri_net[1]
petri_net_results <- parse_sim_lines(petri_net) # use function to get tokens from sim results
#print(petri_net_results)
# calculate FC for G1 and G2 in all deletion mutants.
petri_net_results_FC <- FC_and_GI_calc(petri_net_results)
#print(petri_net_results_FC)
# collect all sgi scores to make a density plot
sgi_scores <- c(sgi_scores, as.numeric(petri_net_results_FC["sgi",]))
petri_net_results_FC <- direction_of_changes(petri_net_results_FC)
for (gene in genes) {
if (abs(as.numeric(petri_net_results_FC["sgi", gene])) > log2(1.7)) {
petri_net_results_FC["epis", gene] <- as.character(epis_patterns$description[grepl(petri_net_results_FC["direction", gene], epis_patterns$name)])
} else {
petri_net_results_FC["epis", gene] <- 0
}
}
#put the results of different models together
petri_net_results_FC <- filter_misc_patterns(petri_net_results_FC)
#print(petri_net_results_FC)
m_epis <- c(model_num, as.character(petri_net_results_FC["epis",]))
m_epis <- data.frame(t(m_epis))
colnames(m_epis) <- c("model", "G1", "G2")
m_epis_all <- rbind(m_epis_all, m_epis)
}
m_epis_all$G1 <- as.character(m_epis_all$G1)
m_epis_all$G2 <- as.character(m_epis_all$G2)
# write output into txt files.
out_file <- paste(strsplit(sim_file, split = "\\.")[[1]][1], "_epis.txt", sep = "")
write.table(m_epis_all, out_file, row.names = F, quote = F, sep = "\t")
#print(m_epis_all)
out_file_sgi_scores <- paste(strsplit(sim_file, split = "\\.")[[1]][1], "_epis_sgi_scores.txt", sep = "")
sgi_scores <- round(sgi_scores, digits = 3)
write.table(sgi_scores, out_file_sgi_scores, row.names = F, quote = F, sep = "\t", col.names = F)
#print(out_file_sgi_scores)
}
|
b9dde102b23efb97e5794b5ad6aa7cb40ef37b77
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkViewportGetShadowType.Rd
|
0c5b4030f5bd2cddaa2cd085b54c4b56e6b2612d
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 439
|
rd
|
gtkViewportGetShadowType.Rd
|
\alias{gtkViewportGetShadowType}
\name{gtkViewportGetShadowType}
\title{gtkViewportGetShadowType}
\description{Gets the shadow type of the \code{\link{GtkViewport}}. See
\code{\link{gtkViewportSetShadowType}}.}
\usage{gtkViewportGetShadowType(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkViewport}}}}
\value{[\code{\link{GtkShadowType}}] the shadow type}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
eaf8ce6e646baf7162bbfac069b1aca542fa9bb3
|
d6744296dbd0427a05119f56c378cdd39d71aad2
|
/man/DMRViterbi.Rd
|
c5ff998af82631a72475fa4b355f02c3e2783f4a
|
[] |
no_license
|
cran/DMRMark
|
a7e50e0351f40bb026a47fc650bd9ee86a15a15f
|
fbf5ffa2c4c628320964ddd4c1ff628c7dbdf9ec
|
refs/heads/master
| 2020-08-05T07:18:31.763883
| 2017-04-21T16:58:43
| 2017-04-21T16:58:43
| 67,360,999
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,787
|
rd
|
DMRViterbi.Rd
|
\name{DMRViterbi}
\alias{DMRViterbi}
\title{
Viterbi algorithm to estimate posterior probabilities of DMRs.
}
\description{
This function takes M-values and estimated parameters
from 'DMRMark', then uses Viterbi algorithm for
estimating states' posterior probabilities for each locus.
}
\usage{
DMRViterbi(mv, pars, L = rep(1, nrow(mv)), starting = NULL,
pd = NULL, region = TRUE,
orderBy = c("max", "mean", "median", "min"), VitP = NULL)
}
\arguments{
\item{mv}{The input M-values matrix, NA is not allowed.
}
\item{pars}{The list of model parameters. Getting by calling 'DMRMark'.
}
\item{L}{A vector to specify the distance between each probes in bp.
$L < 0$ represents
change of chromosome. Default is $L = 1$ for all probes.
}
\item{starting}{A vector to specify the position
to initial new chains. We suggest new chains
should be initiated at least at starting of new chromosome.
When it is null, new chains initiate at beginning and where $L > 100000
or $L < 0$.
}
\item{pd}{A design matrix, which can be generated by 'stats::model.matrix'.
If the M-values are totally paired or single paired, just leave it to be NULL.
}
\item{region}{If set to TRUE, this function returns the regions formed by
Viterbi posterior states. Otherwise, it returns posterior probabilities
and states for individual loci. Default is TRUE.
}
\item{orderBy}{Only enabled when 'region = TRUE'.
Order the regions by which statistics? Choice include 'max', 'mean', 'median' and 'min', which orders the regions by the maximum, geometric mean,
median or minimum of the posterior probabilities in each region respectively.
Default is 'max'.
}
\item{VitP}{Only enabled when 'region = FALSE'.
The minimum posterior probabilities required to be the DMC states.
A locus with DMC's posterior probability lower than 'VitP' will in the
non-DMC states with highest probabilities. When set to NULL, simply return
the MAP states. Default is NULL.
}
}
\value{
If 'region = FALSE', the return value is a list contains:
\item{states}{The MAP methylation status satisfies the 'VitP'.
}
\item{deltas}{The matrix with each row corresponds to the posterior
probabilities of each locus in which states.
}
If 'region = TRUE', the return value is a dataframe with the following fields:
\item{begin}{Beginning of each region. In probe index.
}
\item{ending}{Ending of each region. In probe index.
}
\item{MAP_state}{The MAP state of each region.
}
\item{minVP}{The minimum Viterbi posterior probability
of the MAP state in each region
}
\item{meanVP}{The geometric mean of Viterbi posterior probability
of the MAP state in each region
}
\item{maxVP}{The maximum Viterbi posterior probability
of the MAP state in each region
}
\item{midVP}{The median Viterbi posterior probability
of the MAP state in each region
}
}
\author{
Linghao SHEN <sl013@ie.cuhk.edu.hk>
}
\seealso{
See \code{\link{DMRMark}} about model parameter estimation
}
\examples{
# DMRViterbi
# DMR detection performed on chr18 of a small BLCA dataset from TCGA
data(BLCA)
# Use a small subset
nprobe <- 500
# M-values
mv <- BLCA$mv[1:nprobe,]
# Distance between probes, L<0 indicates acorssing chromosomes
L = BLCA$distance[1:nprobe]
# Initialize new chain when probe distance too long
# or across different chromosomes
newChains <- which((L > 100000) | L < 0)
# The starting positions of new chains
starting <- c(1, newChains[-length(newChains)]+1)
# Run DMRMark with default options
pars <- DMRMark(mv, L, starting)
# Get the posterior of being certain states
results <- DMRViterbi(mv, pars, L, starting)
head(results)
}
|
ca83fad1a0803ecffc9e4470043f9946cb9ace50
|
3d0c35ec6ae3761a182045135de01d1e44a40a38
|
/Autoforecasting0227.R
|
8946a9800e2d8a3288275f329a6840ad97612eb8
|
[] |
no_license
|
wenrurumon/stnn
|
75f09265b63e77796a55fa1d59cc178203b89b1a
|
f86c57f05a51bf1990df4055c56ac2a98910eed1
|
refs/heads/master
| 2020-12-28T04:25:25.615430
| 2020-07-19T07:19:32
| 2020-07-19T07:19:32
| 238,181,205
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,169
|
r
|
Autoforecasting0227.R
|
############################
# Module
############################
rm(list=ls())
library(plyr)
library(openxlsx)
library(data.table)
library(dplyr)
library(keras)
library(MASS)
#Get Model File
get_model_file <- function(x,i,p,gety=TRUE){
if(gety){y <- t(x[p+i,,drop=F])}
x <- t(x[1:p+i-1,,drop=F])
if(gety){y[y<0] <- 0}
x[x<0] <- 0
f <- rowMeans(x)
if(gety){y <- y/f; y[is.na(y)] <- 0; y[y==Inf] <- 0}
x <- x/f; x[is.na(x)] <- 0
if(!gety){y <- NULL}
list(x=x,y=y,f=f,i=i)
}
get_model_xy <- function(x,p,gety,w,sel){
out <- lapply(1:(nrow(x)-p-gety-sel),get_model_file,x=x,p=p,gety=gety)
out <- rep(out,ceiling(sapply(out,function(x){x$i})/w))
X <- do.call(rbind,lapply(out,function(x){x$x}))
Y <- do.call(rbind,lapply(out,function(x){x$y}))
list(Y=Y,X=X)
}
#MSAE
MSAE <- function(X,Y,dims,activations,batch,epochs,verbose){
e.input <- layer_input(shape=ncol(X))
e.layer <- layer_dense(e.input,dims[1],activation=activations[1])
l.layer <- layer_dense(e.layer,dims[2],activation=activations[2])
d.output <- layer_dense(units=ncol(Y),activation=NULL)
model <- keras_model(e.input,d.output(l.layer))
encoder <- keras_model(e.input,l.layer)
d.input <- layer_input(shape=dims[2])
decoder <- keras_model(d.input,d.output(d.input))
model %>% compile(
loss = "mean_squared_error",
optimizer = "adam",
metrics = c('mae')
)
system.time(history <- model %>% fit(
x = X,
y = Y,
batch = batch,
epochs = epochs,
verbose = verbose
))
list(model=model,encoder=encoder,decoder=decoder,history=history)
}
############################
# Data Processing
############################
setwd('/Users/wenrurumon/Documents/posdoc/wuhan')
raw1 <- read.csv("china_confirmed.csv")[,-1]
raw2 <- read.csv("global_confirmed.csv")[,-1:-2]
raw.date <- raw1[,1]
raw <- list(
raw1,
rbind(matrix(0,nrow=10,ncol=ncol(raw2)),as.matrix(raw2))
)
raw <- do.call(cbind,raw)[,1:35]
Y.actual <- raw[,-1]
Y.model <- apply(Y.actual,2,diff)
############################
# Validation
############################
mfile.vali <- lapply(1:10,function(sel){
print(sel)
mfile <- get_model_xy(Y.model,p=8,gety=T,w=12,sel=sel)
models.vali <- lapply(1:5,function(i){
print(paste(i,Sys.time()))
MSAE(X=mfile$X,Y=mfile$Y,
dims=c(32,4),activations=c('relu','relu'),
batch=128,epochs=1000,verbose=0)
})
mfile.vali <- lapply((nrow(Y.model)-9):0,function(i){
get_model_file(x=Y.model,i=nrow(Y.model)-8-i,p=8,gety=FALSE)
})
mfile.vali <- rowMeans(sapply(models.vali,function(m){
sapply(mfile.vali,function(x){
sum((m$model %>% predict(x$x)) * x$f)
})
}))
mfile.vali <- cbind(
actual = rowSums(Y.model)[-1:-8],
predict = ifelse(mfile.vali<0,0,mfile.vali)
)
for(i in 1:(nrow(mfile.vali)-sel)){
mfile.vali[i,] <- mfile.vali[i,] + rowSums(Y.actual)[i+8]
}
for(i in (-(sel-1):0)+nrow(mfile.vali)){
mfile.vali[i,1] <- mfile.vali[i,1] + rowSums(Y.actual)[i+8]
mfile.vali[i,2] <- mfile.vali[i,2] + mfile.vali[i-1,2]
}
mfile.vali <- cbind(mfile.vali,error=mfile.vali[,2]/mfile.vali[,1]-1)
c(rep(NA,nrow(mfile.vali)-sel),mfile.vali[-(1:(nrow(mfile.vali)-sel)),2])
})
mfile.vali <- cbind(actual=rowSums(Y.actual)[-1:-9],do.call(cbind,mfile.vali))
mfile.vali <- data.table(date=as.POSIXct('2020-01-19') + (1:nrow(mfile.vali))*3600*24,
mfile.vali)
write.csv(mfile.vali,'summary_temp/validateion.csv')
############################
# Prediction
############################
setwd('summary_temp')
# set.seed(4)
# mfile <- get_model_xy(Y.model,p=8,gety=T,w=12,sel=sel)
# models <- lapply(1:5,function(i){
# print(paste(i,Sys.time()))
# MSAE(X=mfile$X,Y=mfile$Y,
# dims=c(32,4),activations=c('relu','relu'),
# batch=128,epochs=4000,verbose=0)
# })
# for(i in 1:length(models)){
# save_model_hdf5(models[[i]]$model,paste0('mmodel',i,'.model')
# , overwrite = TRUE,include_optimizer = TRUE)
# save_model_hdf5(models[[i]]$encoder,paste0('mmodel',i,'.encoder')
# , overwrite = TRUE,include_optimizer = TRUE)
# }
models <- lapply(1:5,function(i){list()})
models[[1]]$model <- keras::load_model_hdf5('mmodel1.model')
models[[1]]$encoder <- keras::load_model_hdf5('mmodel1.encoder')
models[[2]]$model <- keras::load_model_hdf5('mmodel2.model')
models[[2]]$encoder <- keras::load_model_hdf5('mmodel2.encoder')
models[[3]]$model <- keras::load_model_hdf5('mmodel3.model')
models[[3]]$encoder <- keras::load_model_hdf5('mmodel3.encoder')
models[[4]]$model <- keras::load_model_hdf5('mmodel4.model')
models[[4]]$encoder <- keras::load_model_hdf5('mmodel4.encoder')
models[[5]]$model <- keras::load_model_hdf5('mmodel5.model')
models[[5]]$encoder <- keras::load_model_hdf5('mmodel5.encoder')
setwd('..')
x <- rbind(Y.actual[1,],Y.model)
rlt <- matrix(NA,0,ncol(x))
while(nrow(rlt)<300){
temp <- get_model_file(x=x,i=nrow(x)-7,p=8,gety=F)
out <- rowMeans(sapply(models,function(m){(m$model %>% predict(temp$x)) * temp$f}))
out <- ifelse(out<0,0,out)
x <- rbind(x,out)
rlt <- rbind(rlt,out)
}
x$shandong[x$shandong==202] <- 0
x$fujian[-1:-45] <- 0
x$ningxia[-1:-50] <- 0
x <- x[1:(apply(x,2,function(x){
max(which(diff(ceiling(cumsum(x)))>0))
}) %>% max+1),]
apply(x,2,sum)/colSums(Y.model)
x.date <- as.POSIXct('2020-01-10') + (1:nrow(x))*3600*24
############################
# Analyzing
############################
#Prediction
x.fit <- c(
58,rowSums(Y.model)[1:8],
sapply(lapply(1:(nrow(Y.model)-8),get_model_file,p=8,gety=F,x=Y.model),function(x){
x <- mean(sapply(models,function(m){
sum((m$model %>% predict(x$x)) * x$f)
}))
ifelse(x<0,0,x)})
)
x.actual <- rowSums(x)
x.actual <- cumsum(x.actual)
for(i in 2:length(x.fit)){x.fit[i] <- x.fit[i] + x.actual[i-1]}
rlt.nat <- rbind(
data.frame(date=x.date[1:length(x.fit)],actual=x.actual[1:length(x.fit)],fit=x.fit),
data.frame(date=x.date[-1:-length(x.fit)],actual=NA,fit=x.actual[-1:-length(x.fit)]))
write.csv(rlt.nat,'summary_temp/prediction_national.csv')
write.csv(data.frame(date=x.date,ceiling(apply(x,2,cumsum))),
'summary_temp/prediction_by_city.csv')
#Featuring
idx <- apply(apply(ceiling(apply(x,2,cumsum)),2,diff),2,function(x){
c(now = cumsum(x)[nrow(Y.actual)],
max = max(x),
sum = sum(x),
start = which(x!=0)[1],
peak = which(x==max(x))[1],
end = max(which(x!=0)),
end1 = which((cumsum(x)/sum(x))>0.25)[1],
end2 = which((cumsum(x)/sum(x))>0.50)[1],
end3 = which((cumsum(x)/sum(x))>0.75)[1],
end4 = which((cumsum(x)/sum(x))>0.99)[1])
}) %>% t
colnames(idx) <- c('now','max','sum','start','peak','end','end1','end2','end3','end4')
mfile <- get_model_xy(apply(ceiling(apply(x,2,cumsum)),2,diff),p=8,gety=F,w=1000,sel=0)
mfile.city <- rownames(mfile$X)
mfile <- lapply(models,function(m){m$encoder %>% predict(mfile$X)})
idx <- cbind(idx,sapply(mfile,function(x){
sapply(unique(mfile.city),function(city){
svd(x[mfile.city==city,])$d[1]
})
}))
colnames(idx)[-1:-10] <- paste0('d',1:5)
#Clustering
idx <- data.frame(idx)
set.seed(1); idx_sizecluster <- kmeans(scale(idx$sum),9,iter.max=1000)
idx_sizecluster <- match(paste(idx_sizecluster$cluster),names(sort(-tapply(idx$sum,idx_sizecluster$cluster,mean))))
idx$init <- NA
idx$init[rownames(idx)%in%'hubei'] <- 1
idx$init[rownames(idx)%in%strsplit('shanghai,beijing,hong_kong_sar',',')[[1]]] <- 2
idx$init[rownames(idx)%in%strsplit('jiangxi,guangdong,zhejiang,henan,',',')[[1]]] <- 3
idx$init[rownames(idx)%in%strsplit('inner_mongolia,xizang',',')[[1]]] <- 4
idx$init[rownames(idx)%in%strsplit('heilongjiang,jilin,liaoning',',')[[1]]] <- 5
idx$init[rownames(idx)%in%strsplit('anhui,hebei,fujian,shandong',',')[[1]]] <- 6
set.seed(5);idx.kmeans <- kmeans(predict(lda(init~sum+peak+end+d1+d2+d3+d4+d5,data=filter(idx,!is.na(init))),idx)$x,9,iter.max = 1000)
lapply(unique(idx.kmeans$cluster),function(x){
names(idx.kmeans$cluster[idx.kmeans$cluster==x])
})
idx$init <- match(paste(idx.kmeans$cluster),names(sort(tapply(-idx$sum,idx.kmeans$cluster,mean))))
idx$size <- idx_sizecluster
colnames(idx)[16] <- 'feature'
idx <- data.frame(city=rownames(idx),idx)
write.csv(idx,'summary_temp/city_idx.csv')
############################
# Charting
############################
#Province Clustering
library(maptools)
library(ggplot2)
library(grid)
citymap <- read.xlsx('data0209.xlsx',sheet='citymap')
str.crs <- "+proj=longlat +ellps=clrk66"
china_map <- readShapePoly("chinamap/bou2_4p.shp",proj4string=CRS(str.crs))
china_province = setDT(china_map@data)
setnames(china_province, "NAME", "province")
china_province[, province:=iconv(province, from = "GBK", to = "UTF-8")]
china_province[, id:= .I-1]
china_province[, table(province)]
china_province[, province:= as.factor(province)]
dt_china = setDT(fortify(china_map))
dt_china[, id:= as.numeric(id)]
setkey(china_province, id); setkey(dt_china, id)
dt_china <- china_province[dt_china]
province_CH <- china_province[, levels(province)]
province_CH <- cbind(sort(province_CH),sort(citymap[-c(33,35),]$zh_name))[,2]
province_EN <- citymap$city[match(province_CH,citymap$zh_name)]
value <- data.frame(citych = province_CH, city = province_EN,
long = tapply(dt_china$long,dt_china$province,median),
lat = tapply(dt_china$lat,dt_china$province,median))
value <- as.data.frame(rbind(as.matrix(value),c(citych='澳门',city='macao_sar',long=113.5439,lat=22.1987)))
value <- merge(value,idx,by=c('city')) %>% mutate(city=toupper(city))
value$long <- as.numeric(paste(value$long))
value$lat <- as.numeric(paste(value$lat))
value$city[13] <- 'HONGKONG'
value$city[21] <- 'MACAO'
value$city[29] <- 'TAIWAN'
value$pos <- strsplit('s,a,a,d,w,s,a,a,d,a,
a,a,d,d,s,a,d,d,a,d,
w,w,a,s,d,d,w,w,d,d,d,
d,a,d',',|,\n')[[1]]
ggplot(data=value, aes(x=long, y=lat, color=paste(feature))) +
geom_point(size=3) +
geom_text(data=filter(value,pos=='a'),aes(x=long-nchar(city)/2.8,y=lat,label=city))+
geom_text(data=filter(value,pos=='s'),aes(x=long,y=lat-1,label=city))+
geom_text(data=filter(value,pos=='d'),aes(x=long+nchar(city)/3,y=lat,label=city))+
geom_text(data=filter(value,pos=='w'),aes(x=long,y=lat+1,label=city))+
labs(x='Latitude',y='Longitude',title = "Feature Clustering") +
theme(plot.title = element_text(hjust = 0.5))
sapply(1:9,function(i){
paste0('Cluster',i,': ',paste(filter(value,feature==i)$city,collapse=', '))
})
ggplot(data=value, aes(x=long, y=lat, color=paste(size))) +
geom_point(size=3) +
geom_text(data=filter(value,pos=='a'),aes(x=long-nchar(city)/2.8,y=lat,label=city))+
geom_text(data=filter(value,pos=='s'),aes(x=long,y=lat-1,label=city))+
geom_text(data=filter(value,pos=='d'),aes(x=long+nchar(city)/3,y=lat,label=city))+
geom_text(data=filter(value,pos=='w'),aes(x=long,y=lat+1,label=city))+
labs(x='Latitude',y='Longitude',title = "Size Clustering") +
theme(plot.title = element_text(hjust = 0.5))
#National chart
rlt <- data.frame(rlt.nat[1:nrow(Y.actual),])
ggplot() + geom_line(aes(x=rlt[,1],y=rlt[,2],colour='Actual')) +
geom_line(aes(x=rlt[,1],y=rlt[,3],colour='Fitted')) +
labs(x='Day',y='Accumulated Confirmed Cases',title = "Fit Chart") +
theme(plot.title = element_text(hjust = 0.5))
ggplot() + geom_line(aes(x=rlt[-nrow(rlt),1],y=diff(rlt[,2]),colour='Actual')) +
geom_line(aes(x=rlt[-nrow(rlt),1],y=diff(rlt[,3]),color='Fitted')) +
labs(x='Day',y='New Confirmed Cases',title = "Fit Chart") +
theme(plot.title = element_text(hjust = 0.5))
ggplot() + geom_line(aes(x=rlt.nat[,1],y=(rlt.nat[,2]),colour='Actual')) +
geom_line(aes(x=rlt.nat[,1],y=(rlt.nat[,3]),colour='Prediction')) +
labs(x='Day',y='Accumulated Confirmed Cases',title = "Prediction Chart") +
theme(plot.title = element_text(hjust = 0.5))
#City Chart 9
grid.newpage()
pushViewport(viewport(layout = grid.layout(3,3)))
vplayout <- function(x,y){viewport(layout.pos.row = x, layout.pos.col = y)}
out <- data.frame(x.date,apply(x,2,cumsum))
out.clust <- idx$size
colnames(out) <- toupper(colnames(out))
colnames(out)[8] <- 'TAIWAN'
colnames(out)[14] <- 'HONGKONG'
colnames(out)[25] <- 'MACAO'
i <- 1
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='HUBEI'))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(1,1))
i <- 2
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='GUANGDONG')) +
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(1,2))
i <- 3
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='ANHUI')) +
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(1,3))
i <- 4
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='SHANDONG'))+
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(2,1))
i <- 5
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='SICHUAN'))+
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(2,2))
i <- 6
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='SHANGHAI'))+
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(2,3))
i <- 7
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='GUANGXI'))+
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(3,1))
i <- 8
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='YUNNAN'))+
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
geom_line(aes(x=temp[,1],y=temp[,5],colour=colnames(temp)[5]))+
geom_line(aes(x=temp[,1],y=temp[,6],colour=colnames(temp)[6]))+
geom_line(aes(x=temp[,1],y=temp[,7],colour=colnames(temp)[7]))+
geom_line(aes(x=temp[,1],y=temp[,8],colour=colnames(temp)[8]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(3,2))
i <- 9
temp <- out[,c(1,which(out.clust==i)+1)]; ncol(temp); colnames(temp)[2]
p <- ggplot() + geom_line(aes(x=temp[,1],y=temp[,2],colour='TAIWAN'))+
geom_line(aes(x=temp[,1],y=temp[,3],colour=colnames(temp)[3]))+
geom_line(aes(x=temp[,1],y=temp[,4],colour=colnames(temp)[4]))+
geom_line(aes(x=temp[,1],y=temp[,5],colour=colnames(temp)[5]))+
geom_line(aes(x=temp[,1],y=temp[,6],colour=colnames(temp)[6]))+
geom_line(aes(x=temp[,1],y=temp[,7],colour=colnames(temp)[7]))+
geom_line(aes(x=temp[,1],y=temp[,8],colour=colnames(temp)[8]))+
geom_line(aes(x=temp[,1],y=temp[,9],colour=colnames(temp)[9]))+
geom_line(aes(x=temp[,1],y=temp[,10],colour=colnames(temp)[10]))+
labs(x='',y='',title = "") +
theme(plot.title = element_text(hjust = 0.5))
print(p, vp = vplayout(3,3))
|
cfd71441616f5e2a75d58b3b63c778dd6812d13b
|
4d216630e99eda5974b2655baf8928ca7da754bd
|
/man/apply_filter_specfile.Rd
|
1a3ba5b8db3d66f3ea68dc362e18dbde4977f1da
|
[] |
no_license
|
ashiklom/edr-da
|
467861ec61cd8953eb272e2844414a522db7268f
|
b092600954b73fa064300c6e7b21d0413d115b94
|
refs/heads/master
| 2021-07-12T18:59:20.190169
| 2021-04-12T14:00:17
| 2021-04-12T14:00:17
| 71,824,349
| 2
| 5
| null | 2018-02-01T13:29:03
| 2016-10-24T19:26:27
|
R
|
UTF-8
|
R
| false
| true
| 429
|
rd
|
apply_filter_specfile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/specfile.R
\name{apply_filter_specfile}
\alias{apply_filter_specfile}
\title{Apply single filter to spectra HDF5 data file}
\usage{
apply_filter_specfile(aviris_h5, quosure)
}
\arguments{
\item{aviris_h5}{HDF5 file object}
\item{quosure}{An `rlang` `quosure` object (e.g. a `formula`)}
}
\description{
Apply single filter to spectra HDF5 data file
}
|
7eaaa767e00d9c1b3f5e7a6bded18edbda24f7ca
|
b7cd62ff3a55a1d1c35ab39c86045ec4950fc0b3
|
/plot6.R
|
dc27499e2dd751919ce226a7d2739ec9c70c580a
|
[] |
no_license
|
bbamini/Exploratory_Data
|
7f4f1fe7e84aab21ef5577856132c907d7884a1f
|
c96b473f9deb0bc42c62ac4573546730da9bb72b
|
refs/heads/master
| 2020-04-22T10:56:29.651454
| 2016-09-12T05:51:23
| 2016-09-12T05:51:23
| 67,979,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,081
|
r
|
plot6.R
|
## Read PM2.5 Emissions Data
NEI <- readRDS("summarySCC_PM25.rds")
#SCC <- readRDS("Source_Classification_Code.rds")
library(reshape2)
#Subset Baltimore City and Los Angeles County PM2.5 data for motor vehicles
balt_LA_road <- subset(NEI, (fips == "24510" | fips == "06037")
& type == "ON-ROAD")
# Calculate total emissions from all sources for a given year and given fips
sums <- tapply(balt_LA_road$Emissions, list(balt_LA_road$year, balt_LA_road$fips),
FUN = sum)
balt_LA <- melt(sums)
names(balt_LA) <- c("Year", "City", "Emissions")
balt_LA$City <- as.factor(balt_LA$City)
levels(balt_LA$City) <- c("Los Angeles County", "Baltimore City")
library(ggplot2)
# Creating graphics device
png("plot6.png", width = 480, height = 480, units = "px")
g <- ggplot(balt_LA, aes(Year, Emissions))
g + facet_grid(City ~ ., scales = "free_y") +
geom_point(aes(Year, Emissions, color = City)) +
geom_line(aes(color = City)) +
scale_x_continuous(name = "Year", breaks = c(1999, 2002, 2005, 2008))
dev.off()
|
a43c45a11f343cc9781819c448778a4b7869302e
|
f2316e98c2e423836e7f291aeb666827fc35df4e
|
/Plot2/plot2.R
|
cac0abfd8f261c4ba256e21f44e872f0a24eb128
|
[] |
no_license
|
yshen92/ExData_Plotting1
|
311a1e23879d9277a2d5a14165389b2a7a5d6f82
|
ba6d19115d3d1e8de62793d10c55970ecda86526
|
refs/heads/master
| 2020-07-07T08:02:20.689521
| 2019-08-21T07:47:20
| 2019-08-21T07:47:20
| 203,298,202
| 0
| 0
| null | 2019-08-20T04:14:17
| 2019-08-20T04:14:16
| null |
UTF-8
|
R
| false
| false
| 570
|
r
|
plot2.R
|
#Read_Data
household <- read.table('household_power_consumption.txt', header=TRUE, sep=';', na.strings = "?")
household[['date_time']] <- strptime(with(household, paste(Date, Time, sep=" ")), "%d/%m/%Y %H:%M:%S")
household[['Date']] <- strptime(household[['Date']], format="%d/%m/%Y")
#01-02 July Data
july <- subset(household, Date >= "2007-02-01" & Date <= "2007-02-02" )
#Plot
with(july, plot(date_time, Global_active_power, type='l', xlab='', ylab='Global Active Power (kilowatts)'))
#Write to .png
dev.copy(png, file='plot2.png', width=480, height=480)
dev.off()
|
6f334c0b0d4a1fd40d492b04cedfcc993c6d3c1f
|
0aaecd6991a7f16759a1f8d2b3be6093f8a183af
|
/inst/snippet/coag-mct.R
|
773d109a2fe87a3dd98b4c8aeccacabf80bc17f3
|
[] |
no_license
|
cran/fastR
|
3f0e3959dad4e5d361c341eb6bea670eab6bfdcc
|
572a5dc31e5aa85af4126662f95268329179c87b
|
refs/heads/master
| 2021-01-21T04:55:14.927487
| 2017-07-27T19:52:06
| 2017-07-27T19:52:06
| 17,695,981
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 196
|
r
|
coag-mct.R
|
coag <- coagulation
coag$x1 <- coag$diet=='B'
coag$x2 <- coag$diet=='C'
coag$x3 <- coag$diet=='D'
coag.model <- lm(coag~x1+x2+x3,coag)
coag.model1 <- lm(coag~1,coag)
anova(coag.model1,coag.model)
|
25cdbdae77a5c815a55733ef07309dc271959fd9
|
17f1b5b761a43ec178602a43f24ac72c2d5d01a9
|
/lobstr/inst/testfiles/v_size/libFuzzer_v_size/v_size_valgrind_files/1609881833-test.R
|
0e27a9e3ade0584b0b31899d4cfd9b32b11fbcdb
|
[] |
no_license
|
akhikolla/newtestfiles-2
|
3e1882e7eea3091f45003c3abb3e55bc9c2f8f56
|
e539420696b7fdc05ce9bad66b5c7564c5b4dab2
|
refs/heads/master
| 2023-03-30T14:44:30.614977
| 2021-04-11T23:21:23
| 2021-04-11T23:21:23
| 356,957,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 75
|
r
|
1609881833-test.R
|
testlist <- list(size = NULL, n = 5.45361239830194e-311, element_size = 0L)
|
202baccc00992dcbcefd7a05d99ebdfd2c7f7add
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RevEcoR/examples/show-methods.Rd.R
|
157794411f0698dfcdf687ad665036fce168bd60
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 206
|
r
|
show-methods.Rd.R
|
library(RevEcoR)
### Name: show,seedset-method
### Title: The show generic function
### Aliases: show show,seedset-method show-methods
### ** Examples
## Not run:
##D show(seed.set)
## End(Not run)
|
ca08d11e7274a7c7f8cfa21a53abcf1ce1865749
|
4ef1abc89cd63293ad7da8c799492aff5ae5a666
|
/inst/tinytest/testSummary.R
|
1c881fb31d5f8d28e8bade9387ab469d2d117f00
|
[
"MIT"
] |
permissive
|
bengalengel/OmicNavigator
|
07ae25f23b8162e3fdee9b7b6cad7f84b4190e32
|
2edaf7204afe9d37467be474ef39ed40ca2d393f
|
refs/heads/main
| 2023-04-17T22:53:51.470685
| 2021-04-26T14:51:43
| 2021-04-26T14:51:43
| 348,800,839
| 0
| 0
|
NOASSERTION
| 2021-03-17T17:46:59
| 2021-03-17T17:46:58
| null |
UTF-8
|
R
| false
| false
| 768
|
r
|
testSummary.R
|
# Test summary.onStudy()
# Setup ------------------------------------------------------------------------
source("tinytestSettings.R")
using(ttdo)
library(OmicNavigator)
emptyStudy <- createStudy(name = "empty", description = "An empty study")
testStudy <- OmicNavigator:::testStudy(name = "test", description = "A test study")
testStudyPlots <- addPlots(testStudy, OmicNavigator:::testPlots())
# Test summary.onStudy() -------------------------------------------------------
expect_stdout(
summary(emptyStudy),
"empty"
)
expect_stdout(
summary(testStudy),
"\\|-reports \\(2\\)"
)
expect_stdout(
summary(testStudyPlots),
"\\|-plots \\(2\\)"
)
expect_stdout(
summary(testStudyPlots, elements = c("annotations", "enrichments")),
"annotations"
)
|
62f7e7587ccd5a72409a6dfb78a1082d9df2da16
|
fa433bb45c39743dffea926544dc402d7801b7ee
|
/code 1 cox analysis.R
|
0c922eaf292f1d3f6792a267bbae917e019f9d99
|
[] |
no_license
|
heleliangww/Alternative-splicing-for-ACC
|
68d867a9cadc208542dca4eaafd413095ae4f016
|
0230ef81a0fd46db2aac039b65936639353d98d6
|
refs/heads/main
| 2023-01-23T16:00:43.544616
| 2020-12-07T08:25:35
| 2020-12-07T08:25:35
| 319,247,234
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,118
|
r
|
code 1 cox analysis.R
|
rm(list = ls())
library(survival)
clinical<-read.table("./clinical_finial.txt",header=TRUE,sep="\t")
rownames(clinical)<-clinical[,1]
clinical<-clinical[,-1]
AS<-read.table("./AS.txt",header=TRUE,sep="\t")
rownames(AS)<-AS[,2]
AS<-AS[,-2]
AS<-as.matrix(AS)
intersection<-intersect(rownames(clinical),colnames(AS))
AS_a<-AS[,intersection]
AS_b<-AS[,1:9]
AS_clinical<-cbind(AS_a,AS_b)
cli<-clinical[intersection,]
y<-Surv(as.numeric(cli[,1]),as.numeric(cli[,2]))
ACCmarker <- c() #ID
ACCgene <- c() #gene
ACCpvalue<-c()
ACCHR<-c()
ACClow<-c() #Low 95%CI
ACChigh<-c() #High 95%CI
for(x in rownames(AS_a)){
cox.fit_dan <- coxph(y~as.numeric(AS_a[x,]))
coxresult<-summary(cox.fit_dan)
pvalue=coxresult$coefficients[5]
gene<- AS_b[x,1]
ACCmarker=c(ACCmarker,x)
ACCgene<-c(ACCgene,gene)
ACCpvalue<-c(ACCpvalue,pvalue)
ACCHR<-c(ACCHR,coxresult$conf.int[1])
ACClow<-c(ACClow,coxresult$conf.int[3])
ACChigh<-c(ACChigh,coxresult$conf.int[4])
}
ACC_cox_single_all<-cbind(ACCmarker,ACCgene,ACCpvalue,ACCHR,ACClow,ACChigh)
name<-c("marker id","gene symbol","P value","HR","Low 95%CI","High 95%CI")
|
969edab9a62d03ac162b5b148621b94ec6805610
|
a3f7826863b6b81bc99ccf9c414f8bcf09a335e7
|
/man/myKable.Rd
|
74c12c098b065b9c559ce1ff4f92874df5b1e139
|
[] |
no_license
|
cran/rmdHelpers
|
24c9516a15a8d6de20bb92df4df1ceba27786ce1
|
b091a8e1ec70f651305074b03ccb38dd0008c599
|
refs/heads/master
| 2021-01-18T18:09:53.043265
| 2016-07-11T23:09:59
| 2016-07-11T23:09:59
| 55,989,977
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,420
|
rd
|
myKable.Rd
|
\name{myKable}
\alias{myKable}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Wrapper for kable
}
\description{
A small wrapper for the knitr kable function
to allow automated bolding of row and/or column names.
Additional functionality may be added/
}
\usage{
myKable(x, row.names = NA, boldRowNames = TRUE, boldColNames = TRUE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
Table or matrix to be passed to \code{kable}
}
\item{row.names}{
Logical: should row names be included?
Defaults to \code{NULL} which includes row names if they are not
just numeric in order.
}
\item{boldRowNames}{
Logical: should row names be bolded?
}
\item{boldColNames}{
Logical: should column names be bolded?
}
\item{\dots}{
Additional arguments to be passed to \code{kable}
}
}
\details{
Currently bolds in markdown format,
so needs to be passed through interpreter after running.
}
\value{
A formatted table from \code{kable}
}
\author{
Mark Peterson
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link[knitr]{kable}
}
\examples{
tempTable <- matrix(LETTERS[6:20], nrow = 5)
colnames(tempTable) <- LETTERS[24:26]
row.names(tempTable) <- LETTERS[1:5]
myKable(tempTable)
myKable(tempTable, boldColNames = FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ kable }
|
d9c1631452b840414939939b28557fcd91d5c503
|
de84560d6597a1980ff2dc2e18bd1e7ad2b67af1
|
/plot3.R
|
705e6f83b953357675afbd187347bb9ddbfeaa28
|
[] |
no_license
|
jwleemd/ExData_Plotting1
|
c7227b0708b41f74a6f9c31eb45f6366cfcd8a02
|
41ab46d17ff00dfa0b678110693345197d5eb67b
|
refs/heads/master
| 2021-01-12T21:05:19.847388
| 2015-11-08T13:18:47
| 2015-11-08T13:18:47
| 45,780,666
| 0
| 0
| null | 2015-11-08T13:02:54
| 2015-11-08T13:02:54
| null |
UTF-8
|
R
| false
| false
| 1,170
|
r
|
plot3.R
|
# Downloading and unzipping the dataset
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, destfile="household_power_consumption.zip")
unzip("household_power_consumption.zip")
data <- read.table("household_power_consumption.txt", header=T, sep=";", stringsAsFactors = F)
# Converting character vectors for date and time to Date and Time classes
data$Time <- paste(data$Date, data$Time)
data$Date <- as.Date(strptime(data$Date, format="%d/%m/%Y"))
data$Time <- strptime(data$Time, format="%d/%m/%Y %H:%M:%S")
# Subsetting data (for 2007-02-01 and 2007-02-02)
data <- data[data$Date >= as.Date("2007-02-01") & data$Date <= as.Date("2007-02-02"),]
# plot 3
png("plot3.png", width = 480, height = 480)
plot(data$Time, as.numeric(data$Sub_metering_1), type="l", col="black",
xlab="",
ylab="Energy sub metering")
lines(data$Time, as.numeric(data$Sub_metering_2), col="red")
lines(data$Time, as.numeric(data$Sub_metering_3), col="blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black","red","blue"), lty=1, lwd=1, cex=0.75)
dev.off()
|
5a94fd394447af691ab3a8919af2926635bc678e
|
c93cf355ec2868c1f5bd04a4df7e4cd572987527
|
/district_pay_income.parse.R
|
c5cf5bff8c03c7ebafba83877819ea7179626ab2
|
[] |
no_license
|
helen-sinica/esbk
|
614d0fb042b054880792f6c43f02129771cfdf3d
|
a4283b5c7339ade076e34637253a5b074746e241
|
refs/heads/master
| 2020-03-31T01:45:06.234034
| 2015-05-27T07:39:52
| 2015-05-27T07:39:52
| 35,873,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,149
|
r
|
district_pay_income.parse.R
|
library(XML)
library(stringr)
# set working directory according to the OS
is.windows <- function() .Platform$OS.type=="windows"
ifelse(is.windows(), setwd("D:/E-sun"), setwd('/home/helen/esbk'))
#xxx getwd()
# read 22 htmls (loop by names)
keywd <- LETTERS[-c(12, 18, 19, 25)]
count <- 1
for(c in keywd){
file <- sprintf("Income_tax_2012/101_165-%s.html", c)
doc <- htmlParse(file)
# get info by table rows
tr <- xpathSApply(doc, '//tr', xmlValue)
# set city or county name for later append
cty <- gsub(" ", "", substr(tr[5], 8, 12))
# leave only necessary rows
# ex. "\n \n 自強里\n 3108\n 3924749\n 1263\n 793\n 397\n 1563\n 2219.35\n 175.75\n \n"
tr <- tr[grep("里|村", tr)]
tr <- tr[-grep("鄉鎮村里", tr)]
# set rows to correct matrix by splitting and replacing "\n"
# leave out first and last blank columns
tmp <- matrix(unlist(strsplit(tr, "\n ")), ncol=12, byrow=T)
tmp <- gsub("\n", "", tmp)
tmp <- tmp[,-c(1,12)]
# append cty name
tmp <- cbind(cty, tmp)
# fill in blank district (must by looping)
x <- tmp[,2]
for(i in 1:length(x)){
if(x[i]==""){
x[i] <- x[(i-1)]
}
}
tmp[,2] <- x
# set column names of the matrix
dimnames(tmp)[[2]] <- c("cty", "dist", "vil", "fam.num", "t.pay", "vil.pay",
"med.pay", "Q1.pay", "Q3.pay", "sd.pay", "var.pay")
# re-order the columns
tmp <- tmp[,c(1:3, 6, 4:5, 7:11)]
# change column class
for(cl in 4:11){
tmp[,cl] <- as.numeric(tmp[,cl])
}
# save the data to one big table
# P.S. *.pay: unit = 10k NTD
if(count==1){
write.csv(tmp, "district_pay_raw.csv", row.names=F)
}else{
write.table(tmp, "district_pay_raw.csv", row.names=F, sep=",", col.names=F, append=T)
}
count <- count + 1
#是否必須統一臺或台?
}
# x <- fread("district_pay_raw.csv", data.table=F)
# x <- x[,1:4]
# x[,4] <- x[,4]/10
# write.csv(x, "district_pay.csv", row.names=F)
|
1a5c8b87861629469f92d1959eea465e4bfe3d4c
|
e8cbfa032b4e85396d31690a8434d28c549184bc
|
/code/coronaphase.R
|
0654d9b4173906666bd18391edb2a0369f7eee32
|
[] |
no_license
|
laasousa/Speed_and_strength
|
3ecd75e11c55731fb306b9b500da9d3b1b35ca06
|
cc4df20cc36ad9c003035106da584477c10decca
|
refs/heads/main
| 2023-03-19T21:27:46.522967
| 2021-03-10T04:36:15
| 2021-03-10T04:36:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,745
|
r
|
coronaphase.R
|
library(ggplot2); theme_set(theme_bw())
library(dplyr)
library(gridExtra)
## Next step: generalize. Calculate distributions by: convolving, cumulating and then differencing.
r <- log(2)/3
## Assume three equal lag kernels; we move from exposed to pre-symptomatic to symptomatic, p is relative contagiousness of pre-symptomatics
genfun <- function(x, p=0.3, duration=2.5) {
p * (- pgamma(x, 4, rate=2/duration) + pgamma(x, 2, rate=2/duration))/duration +
(1-p) * (- pgamma(x, 6, rate=2/duration) + pgamma(x, 4, rate=2/duration))/duration
}
pvec <- c(0.2, 0.4, 0.6, 0.8)
tvec <- seq(0, 20, by=0.1)
genexample <- lapply(pvec, function(p) {
data.frame(
time=tvec,
genden=genfun(tvec, p),
p=paste0("p=", p)
)
}) %>%
bind_rows
g1 <- ggplot(genexample) +
geom_line(aes(time, genden, lty=p)) +
scale_x_continuous("Generation time (days)") +
scale_y_continuous("Density (per days)", expand=c(0, 0)) +
ggtitle("A") +
theme(
legend.position=c(0.9, 0.9),
legend.title=element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line = element_line()
)
rRexample <- lapply(seq(0.01, 0.99, by=0.01), function(p) {
R <- 1/integrate(function(x) {
genfun(x, p) * exp(-r*x)
}, 0, Inf)[[1]]
data.frame(
p=p, R=R
)
}) %>%
bind_rows
g2 <- ggplot(rRexample) +
geom_line(aes(p, R)) +
scale_x_continuous("Proportion of pre-symptomatic transmission, p") +
scale_y_continuous("Reproduction number") +
ggtitle("B") +
theme(
legend.title=element_blank(),
panel.grid = element_blank(),
panel.border = element_blank(),
axis.line = element_line()
)
gtot <- grid.arrange(g1, g2, nrow=1)
ggsave("coronaphase.pdf", gtot, width=8, height=4)
|
c1c2d0965675d7f4ec351665bf45cbcf079aa0d2
|
0877d83cdf78f6e3bb122c7d2c031791684506d3
|
/man/score_hbi_mp_nav_water.Rd
|
a47a8796521db73f610f78d2a741add0af2413bf
|
[] |
no_license
|
BWAM/BAP
|
fec1dbe4475f3869f8007894e9ad9a5581cb1277
|
9dd041516b2f4c8a2269516c57d7ade41746d7e9
|
refs/heads/master
| 2023-04-30T00:25:15.586434
| 2023-04-26T16:17:49
| 2023-04-26T16:17:49
| 180,187,817
| 0
| 1
| null | 2023-04-17T16:54:43
| 2019-04-08T16:18:52
|
R
|
UTF-8
|
R
| false
| true
| 551
|
rd
|
score_hbi_mp_nav_water.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/score_mp_nav_water_metrics.R
\name{score_hbi_mp_nav_water}
\alias{score_hbi_mp_nav_water}
\title{Score HBI (Multiple-Plate Navigable Waters)}
\usage{
score_hbi_mp_nav_water(metrics.df)
}
\arguments{
\item{metrics.df}{= a data frame of calculated metrics values for each
sampling event.}
}
\value{
Scores the raw HBI calculations with the ranges assigned to
multiple-plate samples collected in navigable waters.
}
\description{
Score HBI (Multiple-Plate Navigable Waters)
}
|
f7976c267213b812946b3ca2dd671b968ab79d9d
|
0acaeb10c17ca4327b2aed3c10707793e1e707b2
|
/inst/tests/test-session.R
|
ee5d5831fc10a94b0e44d69e94e3434aff3c7544
|
[] |
no_license
|
rtirrell/databasr
|
18cd85cf614a5ff981d73921a48fdb1201e10710
|
b772e84b5711ddcd65dc8a23d8bd99247977a76e
|
refs/heads/master
| 2020-06-04T04:04:06.177626
| 2011-03-20T22:27:56
| 2011-03-20T22:27:56
| 1,368,624
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 203
|
r
|
test-session.R
|
context("Testing Session")
expect_is(session, "Session")
connection <- session$request("test")
expect_true("test" %in% session$users)
session$release(connection)
expect_false("test" %in% session$users)
|
8f80735c0cbd2a0714410af3237d0ad71ee61555
|
97a3ea44ae0103ea399778df78e42977b15c0230
|
/man/get_all_events.Rd
|
fd459b05e63910f821ae3d999503f376c79f4e3e
|
[] |
no_license
|
resplab/epicManual
|
cc069ce65b7b870d923d5ae1c896d95ce5aa7035
|
8e7290c7d7f31245e40aa9c9df0c62a6d6a77db2
|
refs/heads/master
| 2022-02-22T03:35:14.101436
| 2019-08-28T00:06:32
| 2019-08-28T00:06:32
| 198,313,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,339
|
rd
|
get_all_events.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{get_all_events}
\alias{get_all_events}
\title{Get Events Matrix}
\usage{
get_all_events()
}
\value{
Returns a data frame consisting of all events from the model simulation.
The data frame returned has parameters as follows:
\itemize{
\item id: A unique character string identifying a patient
\item local_time: time in years since start of the simulation
\item alive: whether the patient is alive; alive = 1, dead = 0
\item sex: whether the patient is male or female; male = 1, female = 0
\item height: height of the patient in metres
\item weight: weight of the patient in kilograms
\item age_at_creation: age of the patient at the start of the simulation (years)
\item smoking_status: whether or no the patient smokes; smoker = 1, non-smoker = 0
\item pack_years: 1 pack year = patient smokes 1 pack (20 cigarettes)/day for a year
\item fev1: forced expiratroy volume in 1 second in L (0--5)
\item fev1_slope:
\item fev1_slope_t:
\item ln_exac_rate_intercept:
\item logit_exac_severity_intercept:
\item cumul_exac0:
\item cumul_exac1:
\item cumul_exac2:
\item cumul_exac3:
\item weight_baseline:
\item followup_time:
\item age_baseline:
\item fev1_baseline: baseline FEV1 score at start of the simulation (L)
\item fev1_tail:
\item gold: GOLD status, (0-5)
\item local_time_at_COPD:
\item cumul_cost: cumulative cost in 2015 $CAD of direct maintenance costs and exacerbation costs of COPD
\item cumul_qaly: cumulative Quality Adjusted Life Years (QALYs) lost; 1 QALY = 1 year in perfect health
\item annual_cost: annual cost in 2015 $CAD of direct maintenance costs and exacerbation costs of COPD
\item annual_qaly: annual Quality Adjusted Life Years (QALYs) lost; 1 QALY = 1 year in perfect health
\item tte:
\item event: event type
\itemize{
\item 0 = person is created in simulation
\item 1 = fixed
\item 2 = person has a birthday
\item 3 = person starts or quits smoking
\item 4 = person is diagnosed with COPD
\item 5 = person starts to have an exacerbation
\item 6 = person ends exacerbation
\item 7 = person dies from exacerbation
\item 8 = person visits doctor
\item 9 = person changes medication
\item 10 = person has a myocardial infarction
\item 11 = person has a stroke
\item 12 = person has heart failure
\item 13 = person dies from non-exacerbation causes
\item 14 = end
}
\item symptom_score:
\item last_doctor_visit_time:
\item last_doctor_visit_type:
\item medication_status:
\item n_mi: number of myocardial infarctions
\item n_stroke: number of strokes
\item p_COPD:
\item cough: patient symptoms during doctor visit; 1 = has cough, 0 = no cough
\item phlegm: patient symptoms during doctor visit; 1 = has phlegm, 0 = no phlegm
\item dyspnea: patient symptoms during doctor visit; 1 = has dyspnea, 0 = no dyspnea
\item wheeze: patient symptoms during doctor visit; 1 = has wheeze, 0 = no wheeze
\item re_cough:
\item re_phlegm:
\item re_dyspnea:
\item re_wheeze:
\item gpvisits: number of GP visits in a year
\item diagnosis:
\item case_detection:
}
}
\description{
For a dynamic microsimulation, an event is defined to be a change in a simulated individual.
Examples of an event are birth, death due to COPD exacerbation, death due to other causes (background mortality),
change in smoking status (quitting smoking), etc.
}
|
cb0ed2266ddc5daeeff57536f6509b664f6334f8
|
91b563b0b521d5fdf446bf0441c9c04098eeb854
|
/bin/install
|
7c0751a2265bf64f887cc9394da53335e6130c8e
|
[] |
no_license
|
pedmiston/forks
|
4373945112763a741b38e0d504725f8ef4ebc23b
|
71a19433911c3a21f692a62706d1bef693681522
|
refs/heads/master
| 2021-01-01T06:42:43.950652
| 2017-07-22T23:12:47
| 2017-07-22T23:12:47
| 97,492,037
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
install
|
#!/usr/bin/env Rscript
devtools::document()
devtools::install()
|
|
bb002cb73cec7fb96c930664d7bc9caf521fee09
|
c27f0c3e89a68b8aa5591ba832c58e76ec507ad9
|
/better/chiq7/chiq7.R
|
5dbc4b78c6fccafa27fc0ec3ca651305c2641a01
|
[] |
no_license
|
darkryder/DMG_project
|
aeb164800352cb1fa969b1d8788039157aa04aac
|
5e23f3ea25bf93dbbb8ed917d7531a3b555ac5ff
|
refs/heads/master
| 2020-04-10T23:41:43.979490
| 2015-09-13T10:31:57
| 2015-09-13T10:31:57
| 41,957,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 734
|
r
|
chiq7.R
|
v<-read.csv('cleaned_train.csv',header=T)
printable_index <- function(i){
answer = i
if (i <= 218) {
answer = i-1
}
if (i >= 240) {
answer = i+1
}
return (i)
}
########make a list gg=[] having var names(eg 1,5,6,) which are CATEGORICAl then check if( i in gg) :do//// else i++
#i<-190
read<-read.csv('attr.csv')[[1]]
len<-1
####no i loop reqd then
while(len<=length(read))
{
i<-printable_index(read[[len]])
j<-1934# should be target 1934
d<-c(na.omit(v[[i]]),na.omit(v[[j]]))
chi<-chisq.test(d)[[1]]
print(i)
print (chi[[1]])
data<-paste(c(chi,printable_index(i),printable_index(j)),collapse=",")
write(data,'chiq7.csv',append=T)
len<-len+1
}
|
9282213e8c37085e58ec823058e46555d18f60d5
|
cd072a1ded3c066073f02be680e171d1f295c3e7
|
/AD_GWAS/pchic_analysis/gtf_processing.R
|
2953ece89db7d7016693abf3571b867681db215b
|
[] |
no_license
|
Nicolas-Eng/ShenLab
|
ead12265246c422cde17f1c03d8f1affe071776a
|
784460eaec1a4bc61bc2024ca28ae1e4a214321c
|
refs/heads/main
| 2023-04-21T05:26:39.099980
| 2021-05-05T05:47:05
| 2021-05-05T05:47:05
| 364,122,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,769
|
r
|
gtf_processing.R
|
#
# gtf_processing.R
# Author: Michael Song
# Last modified: 2019-01-15
# This script processes a GTF file for use with the main analysis script.
#
# Set up environment and load settings ------------------------------------
# Clear workspace before running script.
rm(list=ls())
# Turn off scientific notation for writing output.
options(scipen=999)
# Convert warnings into errors.
options(warn=2)
# Process CLI arguments (or override if running interactively).
cli.args = commandArgs(trailingOnly=TRUE)
print(cli.args[1]) # Set the home directory for the pipeline.
if ((exists("cli.args") == FALSE) | (length(cli.args) == 0)) {
cli.args <- c()
cli.args[1] <- "/Volumes/NE_ShenLab/20200219_MGENMOT"
}
# Set home directory.
home.dir <- cli.args[1]
# Main --------------------------------------------------------------------
# Define GTF file to be processed.
gtf.file <- paste0(home.dir, "/resources/gencode/gencode.v19.annotation.gtf")
gtf.version <- "V19"
# Read in GTF file downloaded from GENCODE.
gtf.raw <- read.table(paste0(gtf.file), stringsAsFactors=F, sep="\t", header=F)
gencode.transcripts <- gtf.raw[gtf.raw[, 3] == "transcript", ]
# Parse transcripts in GTF file to get metadata for each entry.
num.transcripts <- length(gencode.transcripts[, 1])
key.value.pairs <- c("gene_id", "transcript_id", "gene_type", "gene_status", "gene_name", "transcript_type", "transcript_status", "transcript_name", "exon_number", "exon_id", "level",
"tag", "ccdsid", "havana_gene", "havana_transcript", "protein_id", "ont", "transcript_support_level",
"remap_status", "remap_original_id", "remap_original_location", "remap_num_mappings", "remap_target_status", "remap_substituted_missing_target")
processed.transcripts <- cbind(gencode.transcripts[, 1:8], matrix("", num.transcripts, 24))
colnames(processed.transcripts) <- c("chrom", "source", "feature_type", "start", "end", "score", "strand", "phase", key.value.pairs)
for (key.value.pair in key.value.pairs)
processed.transcripts[, key.value.pair] <- as.character(processed.transcripts[, key.value.pair])
# Process key value pairs for each transcript one by one.
print(paste0(num.transcripts, " entries to process in the GTF file..."))
for (i in 1:num.transcripts) {
# Track progress.
if (i %% 10000 == 0)
print(paste0(i, " transcripts processed"))
# Explode 9th column with key-value pairs.
if (substr(gencode.transcripts[i, 9], nchar(gencode.transcripts[i, 9]), nchar(gencode.transcripts[i, 9])) == ";")
gencode.transcripts[i, 9] <- substr(gencode.transcripts[i, 9], 1, nchar(gencode.transcripts[i, 9]) - 1)
fields <- trimws(unlist(strsplit(gencode.transcripts[i, 9], split=";")))
expanded.keys <- unlist(strsplit(fields, split=" "))[(1:length(fields))*2 - 1]
expanded.values <- unlist(strsplit(fields, split=" "))[(1:length(fields))*2]
# Fill in the table with the expanded fields.
for (j in 1:length(expanded.keys)) {
# Append if there is already an existing entry.
if (processed.transcripts[i, expanded.keys[j]] == "") {
processed.transcripts[i, expanded.keys[j]] <- expanded.values[j]
} else {
processed.transcripts[i, expanded.keys[j]] <- paste0(processed.transcripts[i, expanded.keys[j]], ";", expanded.values[j])
}
# Identify unlisted key-value pairs.
# if ((expanded.keys[j] %in% colnames(processed.transcripts) == FALSE) && (!is.na(expanded.keys[j])))
# print(expanded.keys[j])
}
}
# Save results.
save(processed.transcripts, file=paste0(home.dir, "/resources/gencode/processed.transcripts.", gtf.version, ".Rdata"))
load(file=paste0(home.dir, "/resources/gencode/processed.transcripts.", gtf.version, ".Rdata"))
# Stitch together BED file based on the strand information.
tss.bed.plus <- processed.transcripts[processed.transcripts$strand == "+", c("chrom", "start", "start", "strand", "gene_id", "gene_type", "gene_name")]
tss.bed.minus <- processed.transcripts[processed.transcripts$strand == "-", c("chrom", "end", "end", "strand", "gene_id", "gene_type", "gene_name")]
tss.bed <- unique(rbind(tss.bed.plus, setNames(tss.bed.minus, names(tss.bed.plus))))
colnames(tss.bed) <- c("chrom", "start", "end", "strand", "gene_id", "gene_type", "gene_name")
# Filter BED entries.
allowed.chrs <- paste0("chr", c(as.character(c(1:22)), "X", "Y"))
tss.bed <- tss.bed[tss.bed[, 1] %in% allowed.chrs, ]
# Remove version number for the Ensembl gene ID.
tss.bed$gene_id <- unlist(strsplit(tss.bed$gene_id, split="\\."))[(1:length(tss.bed$gene_id))*2 - 1]
# Write BED file.
write.table(tss.bed, file=paste0(gtf.file, ".bed"), sep="\t", row.names=F, col.names=F, quote=F)
# End ---------------------------------------------------------------------
|
3ba5df31f3d4c00c8d8aaaa298564612add0d4cf
|
3a2b15c469cf4778a100340bcc2cf2642edd37b0
|
/man/current.other.Rd
|
86d272f63b16be850e4ae0e801d10353b680ea34
|
[] |
no_license
|
Qingys/MILC_backup
|
9657aaf2267ffad568c8f8fa2772d3381f31a874
|
cabc1bcbabf99cd250abf7b44f662138ed5a4f7d
|
refs/heads/master
| 2023-03-17T01:53:43.623830
| 2014-02-18T00:00:00
| 2014-02-18T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,191
|
rd
|
current.other.Rd
|
\name{current.other}
\alias{current.other}
\docType{data}
\title{current.other dataset
}
\description{
An R object of class "list" containing Cumulative Incidence Function estimates for other cause (not lung cancer) mortality, for current smokers by age (5-years age group), gender ("male" or "female") and smoking intensity group ("1-10", "11-20", or "21+" cigerettes) based on the average number of cigarettes smoked per day.
}
\usage{current.other}
\format{
The \emph{`current.other'} list comprises 66 components in total. Each component itself is an R object of class "list" with three components, i.e.:
[[1]] a vector with time points (in years)
[[2]] a vector with CIF estimates
[[3]] a vector with variances of the CIF estimates
}
\source{
CIF estimates are derived using data obtained from the National Health Interview Survey (NHIS) database covering the years 1986-2004.
}
\seealso{ \code{\link{ci.lung}, \link{former.other}, \link{never.other}}
}
\examples{
data(current.other)
current.other[1]
current.other[[1]][1] # time (years)
current.other[[1]][2] # CIF estimates
current.other[[1]][3] # variances of CIF estimates
}
\keyword{Datasets}
|
b89bdfb6005718438813d92e4c719f4f7bc8daa4
|
b667618e65e08cec98c0c75a00bb7b6d079bf34d
|
/code.R
|
09954e623fd6888c888845d3be28f644fb26e4aa
|
[] |
no_license
|
mingzhuye/Flarition_Health_Test
|
843b9cc5032b449bceb3758366d49d1e59e8bdd4
|
fa663582147107982fc12b52b9f9729cb50d66bc
|
refs/heads/master
| 2021-07-15T06:15:08.121920
| 2017-10-19T19:50:48
| 2017-10-19T19:50:48
| 107,591,451
| 0
| 0
| null | null | null | null |
WINDOWS-1258
|
R
| false
| false
| 3,149
|
r
|
code.R
|
orders = read.xlsx("flatiron_qs_orders_admins_july_16.xlsx", 1)
administration = read.xlsx("flatiron_qs_orders_admins_july_16.xlsx", 2)
demographics = read.xlsx("flatiron_qs_orders_admins_july_16.xlsx", 3)
patients = read.xlsx("flatiron_qs_orders_admins_july_16.xlsx", 4)
practices = read.xlsx("flatiron_qs_orders_admins_july_16.xlsx", 5)
dim()
demographics[duplicated(demographics$patient_id),]
##Question1 What is the average time elapsed between a patient’s initial diagnosis date and a patient’s first treatment? Does this time vary by gender?
orders2 = orders
orders2 = orders2[, -5]
orders2$order_date = as.Date(orders2$order_date, "%d-%b-%Y")
patients[patients$patient_id=="h9993d", ]$diagnosis_date ->date1
min(administration[administration$patient_id=="h9993d", ]$administered_date) ->date2
date2 - date1 # Time difference of 7501 days
subset2 = merge(patients, administration[, c(1, 4)], by.x = "patient_id", by.y = "patient_id")
subset2[with(subset2, order(subset2[,1], subset2[, 7])), ] ->test
for(i in nrow(patients)){
}
##Question2 How many patients are on nivolumab from 20122016?
subset1 = subset(administration, administration$administered_date>="2012-01-01" & administration$administered_date<="2016-12-31" &administration$drug_name=="nivolumab" )
length(unique(subset1$patient_id))
##Question3 Using the following risk stratification rules, please summarize the number of high, medium, and low risk patients.
for(i in 1:nrow(demographics2)){
if((demographics2[i, 2]=="female"&demographics2[i, 4]=="NON_WHITE")|(demographics2[i, 2]=="male"&demographics2[i, 4]=="NON_WHITE" & demographics2[i, 3]>=70)|(demographics2[i, 2]=="unknown"&demographics2[i, 4]=="NON_WHITE" & demographics2[i, 3]>=70)){
demographics2[i, 5] = "High_Risk"
}
else if((demographics2[i, 2]=="female"&demographics2[i, 4]=="WHITE" & demographics2[i, 3]>=75)|(demographics2[i, 2]=="male"&demographics2[i, 4]=="NON_WHITE" & demographics2[i, 3]<70)){
demographics2[i, 5] = "Medium_Risk"
}
else if((demographics2[i, 2]=="female"&demographics2[i, 4]=="WHITE" & demographics2[i, 3]<75)|(demographics2[i, 2]=="male"&demographics2[i, 4]=="WHITE")|(demographics2[i, 2]=="unknown"&demographics2[i, 4]=="WHITE" & demographics2[i, 3]<75) ){
demographics2[i, 5] = "Low_Risk"
}
else if((demographics2[i, 2]=="unknown"&demographics2[i, 4]=="WHITE" & demographics2[i, 3]>=75)){
demographics2[i, 5] = "Medium_to_Low_Risk"
}
else if((demographics2[i, 2]=="unknown"&demographics2[i, 4]=="NON_WHITE" & demographics2[i, 3]<70)){
demographics2[i, 5] = "High_to_Medium_Risk"
}
}
## Put assumptions here
##Question4 Please create a visualization that could be used to help a medical researcher? understand how drug prevalence has changed over time.
##Due to the small sample size, a ggplot of drug prevalence based on year has been created.
table(administration$drug_name)
p <- ggplot(test, aes(x=year, y=Freq, group=drug_name, label=test[,3])) +geom_line(aes(color=drug_name))+geom_point(aes(color=drug_name)) + geom_text(size=3.5) + labs(title = "Drug prevalence changed over years", x="Year", y ="Total Orders")
p
|
70eddc2e22096bb58eb658979f09d3cf37f44eaf
|
6cf9a94de51479dc65dad3608a4b315ba289a36f
|
/test_not_run/hipo15_subgroups_deseq2.R
|
20030b18a6491f0139b6be9899b4bba1de81923d
|
[] |
no_license
|
NagaComBio/cola
|
818c3afdab7e140d549ab9ebf6995a882c967cf5
|
304b3cf771e97ced7f4b20388815b882202cdd84
|
refs/heads/master
| 2021-04-27T08:31:19.184145
| 2018-02-26T10:00:07
| 2018-02-26T10:00:07
| 122,491,685
| 0
| 0
| null | 2018-02-22T14:45:23
| 2018-02-22T14:45:23
| null |
UTF-8
|
R
| false
| false
| 6,657
|
r
|
hipo15_subgroups_deseq2.R
|
library(methods)
library(GetoptLong)
datatype = "cell01"
GetoptLong(
"datatype=s", "cell01"
)
# library(cola)
source("/home/guz/project/development/cola/load.R")
# register_top_value_fun(AAC = function(mat) AAC(t(mat), cor_method = "spearman", mc.cores = 4))
source("/home/guz/project/analysis/hipo15/script_for_repo/hipo15_lib.R")
setwd("/icgc/dkfzlsdf/analysis/hipo/hipo_015/hipo15_rnaseq_cell_analysis")
default_param = list(
"normalization.method" = "rpkm",
"varianceStabilize" = 1,
"normalizeTOGeneLength" = 0
)
load("/icgc/dkfzlsdf/analysis/B080/guz/gencode/gencode_v19_lincRNA_transcript_merged.RData")
add_symbol_and_type = function(mat) {
rn = rownames(mat)
name = sapply(gene_annotation$gtf[rn], function(x) x$name)
type = sapply(gene_annotation$gtf[rn], function(x) x$type)
mat = data.frame(mat, name = name, type = type, stringsAsFactors = FALSE)
return(mat)
}
library(DESeq2)
library(ComplexHeatmap)
library(circlize)
# go/pathway analysis
deseq_analysis2 = function(count, expr, df, formula, main, cutoff = 0.01) {
qqcat("@{main}\n")
dds = DESeqDataSetFromMatrix(countData = count, colData = df, design = formula)
dd2 = DESeq(dds)
res = results(dd2)
res.table = as.data.frame(res)
res.table = add_symbol_and_type(res.table)
write.csv(res.table, file = qq("@{main}_deseq2_diffgene_all_genes.csv"))
res.table = res.table[!is.na(res.table$padj), ]
## fold change vs base mean
max = max(abs(res.table$log2FoldChange))
pdf(qq("@{main}_fc_vs_basemean_padj_@{cutoff}.pdf"), height = 6, width = 8)
plot(log2(res.table$baseMean+1), res.table$log2FoldChange, pch = 16, cex = 0.5, ylim = c(-max, max),
col = ifelse(res.table$padj < cutoff & res.table$log2FoldChange > 1, "#FF000080", ifelse(res.table$padj < cutoff & res.table$log2FoldChange < -1, "#00FF0080", "#00000080")),
xlab = "log2(base_mean + 1)", ylab = "log2(fold change)", main = qq("@{main}, padj < @{cutoff}"))
dev.off()
pdf(qq("@{main}_deseq2_diffgene_padj_@{cutoff}_diff_gene_heatmap.pdf"), height = 12, width = 8)
mat = expr[rownames(res.table[res.table$padj < cutoff, , drop = FALSE]), , drop = FALSE]
dn = dimnames(mat)
if(ncol(mat) > 2) {
mat = t(apply(mat, 1, scale))
dimnames(mat) = dn
}
column_dend = as.dendrogram(hclust(dist(t(mat))))
column_dend = stats:::reorder.dendrogram(column_dend, as.numeric(factor(df$subgroup), function(w) mean(ifelse(w == 2, 0.1, w))))
ht = Heatmap(mat, name = "scaled_deseq2", top_annotation = HeatmapAnnotation(df = df, col = list(subgroup = c("1" = "red", "2" = "blue"))),
col = colorRamp2(c(-2, 0, 2), c("blue", "white", "red")), show_row_names = FALSE,
cluster_column = column_dend, column_dend_reorder = FALSE,
column_title = qq("heatmap for differentially expressed genes (deseq2)\ncutoff: padj < @{cutoff}, all @{sum(res.table$padj < cutoff)} genes"))
draw(ht)
mat = expr[rownames(res.table[res.table$padj < cutoff & res.table$type == "protein_coding", ]), ]
dn = dimnames(mat)
if(ncol(mat) > 2) {
mat = t(apply(mat, 1, scale))
dimnames(mat) = dn
}
column_dend = as.dendrogram(hclust(dist(t(mat))))
column_dend = stats:::reorder.dendrogram(column_dend, as.numeric(factor(df$subgroup), function(w) mean(ifelse(w == 2, 0.1, w))))
ht = Heatmap(mat, name = "scaled_deseq2", top_annotation = HeatmapAnnotation(df = df, col = list(subgroup = c("1" = "red", "2" = "blue"))),
col = colorRamp2(c(-2, 0, 2), c("blue", "white", "red")), show_row_names = FALSE,
cluster_column = column_dend, column_dend_reorder = FALSE,
column_title = qq("heatmap for differentially expressed genes (protein_coding) (deseq2)\ncutoff: padj < @{cutoff}, all @{sum(res.table$padj < cutoff & res.table$type == 'protein_coding')} genes"))
draw(ht)
dev.off()
}
if(grepl("cell", datatype)) {
source("/home/guz/project/analysis/hipo15/script_for_repo/head.R")
deseq_analysis = deseq_analysis2
if(datatype == "cell01") {
res_list = readRDS("hipo15_c1_subgroups.rds")
res = get_single_run(res_list, "AAC", "skmeans")
count = expression$count[, colnames(res@.env$data)]
expr = expression$deseq2[, colnames(count)]
} else if(datatype == "cell02") {
res_list = readRDS("hipo15_c2_subgroups.rds")
res = get_single_run(res_list, "AAC", "skmeans")
count = expression$count[, colnames(res@.env$data)]
expr = expression$deseq2[, colnames(count)]
} else if(datatype == "cell03") {
res_list = readRDS("hipo15_c3_subgroups.rds")
res = get_single_run(res_list, "AAC", "skmeans")
count = expression$count[, colnames(res@.env$data)]
expr = expression$deseq2[, colnames(count)]
}
} else if(datatype == "primary_tumor") {
########################### primary tumor #################################
deseq_analysis = deseq_analysis2
load("/icgc/dkfzlsdf/analysis/hipo/hipo_015/data_types/RNAseq/expression_data/hipo15_rnaseq_primary_tumor_gencode19_lincRNA_expression.RData")
load("/home/guz/project/analysis/hipo15/signature_genes/moffitt_sigANDannotation.RData")
source("/home/guz/project/development/ngspipeline2/lib_expression.R")
res_list = readRDS("hipo15_primary_tumor_subgroups.rds")
res = get_single_run(res_list, "AAC", "skmeans")
count = expression$count[, colnames(res@.env$data)]
expr = normalize.count(count, method = "deseq2", gene_annotation, param = default_param)
} else if(datatype == "xenograft") {
######################## xenograft #########################
deseq_analysis = deseq_analysis2
load("/icgc/dkfzlsdf/analysis/hipo/hipo_015/data_types/RNAseq/expression_data/rnaseq_xenograft_human_v19_lincRNA_mouse_M2_expression.RData")
load("/icgc/dkfzlsdf/analysis/B080/guz/gencode/gencode_v19_lincRNA_transcript_merged.RData")
source("/home/guz/project/development/ngspipeline2/lib_expression.R")
gene_type = sapply(gene_annotation$gtf, function(x) x$type)
count = expression$count
l = !grepl("^ENSM", rownames(count))
count = count[l, ]
res_list = readRDS("hipo15_xenograft_subgroups.rds")
res = get_single_run(res_list, "AAC", "skmeans")
count = count[, colnames(res@.env$data)]
expr = normalize.count(count, method = "deseq2", gene_annotation, param = default_param)
}
cl = get_class(res, k = 2)
anno_df = data.frame(subgroup = as.character(cl$class))
deseq_analysis(count, expr, anno_df, ~ subgroup, qq("@{datatype}_2groups"), cutoff = 0.05)
# for(datatype in c("cell01", "cell02", "cell03", "primary_tumor", "xenograft")) {
# cmd = qq("Rscript-3.3.1 /home/guz/project/development/cola/test_not_run/hipo15_subgroups_deseq2.R --datatype @{datatype}")
# cmd = qq("perl /home/guz/project/development/ngspipeline2/qsub_single_line.pl '-l walltime=10:00:00,mem=10G -N hipo15_subgroups_@{datatype}_deseq2' '@{cmd}'")
# system(cmd)
# }
|
287f43e7674c47230f1782e11e35da94c1784db1
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Basler/terminator/stmt21_252_267/stmt21_252_267.R
|
04b47354d606c7ce533fa9d9bd21db2fb330b345
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 720
|
r
|
stmt21_252_267.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 13204
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 13203
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 13203
c
c Input Parameter (command line, file):
c input filename QBFLIB/Basler/terminator/stmt21_252_267.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4080
c no.of clauses 13204
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 13203
c
c QBFLIB/Basler/terminator/stmt21_252_267.qdimacs 4080 13204 E1 [1] 0 210 3869 13203 RED
|
e301f5335bc3ae660aa660ba0feb715666afd410
|
da8dae69e597072bc616936d1d72a96f65e4efa0
|
/code/oldversions/v4_20190329/tools/nictools/R/cheb.R
|
9adb2638b6552a27ec03da2b110ad878977b776d
|
[] |
no_license
|
UCL/provis
|
71e82c383cd9414840e57c2a2867826d6b4ee3e6
|
86a287c7bc705d4aeffb9bbcf96747e97e6d688b
|
refs/heads/master
| 2020-08-01T04:08:20.198284
| 2019-11-08T12:09:43
| 2019-11-08T12:09:43
| 210,310,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 478
|
r
|
cheb.R
|
#' Compute matrix of chebyshev polynomials
#' @param x vector of values betwen -1 and 1
#' @param n n-1 = highest order polynomial
#' @return cheb (nx x n) matrix of results
#' @examples
#' x<-matrix(2*c(0:10)/11-1,11,1)
#' c1< cheb(x,5)
cheb<-function(x,n) {
nx<-nrow(as.matrix(x))
cheb <-matrix(rep(1,n*nx),nx,n)
cheb[,2] <- as.matrix(x)
for (j in 3:n) {
cheb[,j] <- 2.0*as.matrix(x) * cheb[,j-1]-cheb[,j-2]
}
cheb[,1]<- 0.5*cheb[,1]
return(cheb)
}
|
46c661eb3ef6508f9338c472c410cfbc021d93bc
|
5ee3003b66131a65253f18b82608e9fc526ae0c5
|
/Figure_2/monocle_GBS_unexp.R
|
6f8b3bc1d60a5b3f3029c17f57d7d0347a46ef63
|
[] |
no_license
|
yanailab/Pathcourse
|
1b8e5d546a71a683a703052e38b1969d5b057217
|
fd8e53e9e54e0c23b3116a2de4d838bc51ed5f4f
|
refs/heads/main
| 2023-04-14T19:51:19.357475
| 2022-08-19T19:30:39
| 2022-08-19T19:30:39
| 390,096,509
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,473
|
r
|
monocle_GBS_unexp.R
|
#### Installing and loading packages ####
setwd("C:/Users/Gal/Dropbox (NYU Langone Health)/Projects/Pap36_Pathcourse/analysis/analysis_by_figure/new_2020_5_16/Figure_2")
options(warn=-1)
#source("http://bioconductor.org/biocLite.R")
#biocLite("monocle")
library(monocle)
#### Loading data ####
## you need raw matrix, gene names as .txt, sample/time point/ treatment IDs as .txt.
expr = as.matrix(read.delim("C:/Users/Gal/Dropbox (NYU Langone Health)/Projects/Pap36_Pathcourse/analysis/analysis_by_figure/new_2020_5_16/Figure_2/GBS_unexp_macs_mat.txt", header=FALSE))
gene_names <- read.table("C:/Users/Gal/Dropbox (NYU Langone Health)/Projects/Pap36_Pathcourse/analysis/analysis_by_figure/new_2020_5_16/Figure_2/gene_names", quote="\"", comment.char="", stringsAsFactors = FALSE, header = FALSE)[,1]
rownames(expr) = gene_names
#### creating it myself
pd = new("AnnotatedDataFrame", data = data.frame('Cell' = colnames(expr), row.names = colnames(expr)))
fd = new("AnnotatedDataFrame", data = data.frame('gene_short_name' = rownames(expr), row.names = rownames(expr)))
cds = newCellDataSet(expr, phenoData = pd, featureData = fd, expressionFamily=negbinomial.size())
cds <- estimateSizeFactors(cds)
cds <- estimateDispersions(cds)
#cds <- load_lung()
cds
View(pData(cds))
info_gene_names <- read.table("C:/Users/Gal/Dropbox (NYU Langone Health)/Projects/Pap36_Pathcourse/analysis/analysis_by_figure/new_2020_5_16/Figure_2/info_genes", quote="\"", comment.char="", stringsAsFactors = FALSE, header = FALSE)[,1]
#temp_genes = sample(ordering_genes, 1000)
cds <- setOrderingFilter(cds, info_gene_names)
#plot_ordering_genes(cds)
cds <- reduceDimension(cds, max_components = 2, method = 'DDRTree')
cds <- orderCells(cds)
#cds <- orderCells(cds, root_state = "5")
#plot_cell_trajectory(cds, color_by = "orig.ident")
plot_cell_trajectory(cds, color_by = "State")
#### Building tree ####
#cds <- reduceDimension(cds, max_components = 2, method = 'DDRTree')
#cds <- orderCells(cds)
#plot_cell_trajectory(cds, color_by = "my_sample")
plot_cell_trajectory(cds, color_by = "State")
plot_cell_trajectory(cds, color_by = "Pseudotime")
pseudotime_vec = cds$Pseudotime
write(pseudotime_vec,file = "pseudotime_vec_R_GBS_unexp",sep = "\n")
state_vec = cds$State
write(state_vec,file = "state_vec_R_GBS_unexp",sep = "\n")
all_coord = reducedDimS(cds)
write(all_coord[1,],file = "coord_x_GBS_unexp",sep = "\n")
write(all_coord[2,],file = "coord_y_GBS_unexp",sep = "\n")
|
b6c705134dbd4c7a740f7164d44776312037fc48
|
8eed7a301375eb2920f0ba032fb6428bce9e33a7
|
/SS/Base_model1/ParameterCheck.R
|
e9c024cc471c0412ea370b737255f55f134e373f
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
melissamonk-NOAA/GopherBlackYellow2019
|
363b9c5b6b2b32703cd31225678db288d53a10dc
|
bfa62d2ae6e158ed17b66ee01b03b22ad79999d1
|
refs/heads/master
| 2022-02-22T18:49:22.490814
| 2019-10-02T19:21:53
| 2019-10-02T19:21:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 615
|
r
|
ParameterCheck.R
|
rm(list=ls(-1))
library('r4ss')
# update_r4ss_files()
# devtools::install_github('r4ss/r4ss')
WD <- c('C:/XiHe1/GPHR2019/SSModel/SS/')
setwd(WD)
with.covar = T # T: without -nohess; F: with -nohess
a1 <- SS_output(dir=WD,covar=with.covar)
a2 = a1$parameters
a3 = a2[!is.na(a2$Active_Cnt),]
a4 = a3[,1:12]
rownames(a4) = c()
a5 = a4[!grepl("Early_RecrDev",a4$Label),]
a6 = a5[!grepl("Main_RecrDev",a5$Label),]
a7 = a6[!grepl("Late_RecrDev",a6$Label),]
a8 = a7[!grepl("ForeRecr",a7$Label),]
a8$cv1 = a8$Parm_StDev / a8$Value
# sort data by abs(cv1)
a9 = a8[order(abs(a8$cv1), na.last=T, decreasing=T),]
|
daaab689340eb235e0a8ac214ddd493ffe44ff7e
|
b4c83dfd4419ad6ff8a41cfca4992b0e580907f0
|
/DADM_Final_Project.R
|
4e5f4d4fbfe151b3662ecfade92366254ae03fa4
|
[] |
no_license
|
jainkavisha/DADM
|
e7d33852a0ccaa4c522288e640b3ecfdf2fbb6e9
|
c56709f6db7255c97c42dda4a08fd3180fea8c02
|
refs/heads/main
| 2023-01-18T21:50:53.117360
| 2020-11-29T19:36:58
| 2020-11-29T19:36:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,229
|
r
|
DADM_Final_Project.R
|
# Getting and setting the appropriate working directory
getwd()
setwd("/Users/mihikagupta/Desktop/SEM_2/DADM/DADM_FinalProject")
# Loading relevant libraries
suppressPackageStartupMessages(library(tidyverse))
suppressPackageStartupMessages(library(skimr))
suppressPackageStartupMessages(library(GGally))
suppressPackageStartupMessages(library(plotly))
suppressPackageStartupMessages(library(viridis))
suppressPackageStartupMessages(library(caret))
suppressPackageStartupMessages(library(randomForest))
suppressPackageStartupMessages(library(e1071))
suppressPackageStartupMessages(library(rpart))
suppressPackageStartupMessages(library(xgboost))
suppressPackageStartupMessages(library(h2o))
suppressPackageStartupMessages(library(ggcorrplot))
suppressPackageStartupMessages(library(rpart.plot))
suppressPackageStartupMessages(library(corrgram))
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(ggthemes))
suppressPackageStartupMessages(library(psych))
suppressPackageStartupMessages(library(scales))
suppressPackageStartupMessages(library(treemap))
suppressPackageStartupMessages(library(treemapify))
suppressPackageStartupMessages(library(repr))
suppressPackageStartupMessages(library(cowplot))
suppressPackageStartupMessages(library(magrittr))
suppressPackageStartupMessages(library(ggpubr))
suppressPackageStartupMessages(library(RColorBrewer))
suppressPackageStartupMessages(library(plotrix))
suppressPackageStartupMessages(library(ggrepel))
suppressPackageStartupMessages(library(forcats))
suppressPackageStartupMessages(library(reshape2))
suppressPackageStartupMessages(library(caTools))
suppressPackageStartupMessages(library(tree))
suppressPackageStartupMessages(library(rattle))
library("car")
library(MASS)
library(cluster)
library(MLmetrics)
library(vegan)
library(party)
library(lmtest)
library(stats)
library(corrplot)
library(pROC)
library(plyr)
# Loading the dataset
data<-read.csv("attrition.csv")
# Viewing inistial data
head(data)
# Checking Column-names and attributes
names(data)
# Checking the Dependent variable, and prediction of which is our main objective of the project
data$Attrition
# we observe here that our dependent variable is a binary factor variable having levels "Yes" and "No"
################################## Getting to know our data firstly #######################################
# Shape and size
nrow(data)
ncol(data)
# Checking the data structure we have
str(data)
summary(data)
# Checking for null attributes
is.null(data)
# we have no nulls and therefore can easily move ahead
# Using an insightful summary with skim and table
data %>% glimpse()
# looking at type of data
class(data)
# looking at datatypes of each attribute
t(sapply(data, class))
# dropping irrelevant data columns
data = data[, !(names(data)%in% c('Over18','EmployeeNumber', 'EmployeeCount', 'StandardHours'))]
# Checking new data again
head(data)
View(data)
nrow(data)
ncol(data)
################################################### EDA ################################################################################
# checking correlation between attributes
num_cols <- unlist(lapply(data, is.numeric)) # Identify numeric columns
num_cols
data_num <- data[ , num_cols] # Subset numeric columns of data
data_num
M <- cor(data_num)
M
corrplot(M, method="circle")
# here we observe that there are some strong positive correlation between:-
# 1. Age and Total Working years
# 2. Job Level and Job Satisfaction
# 3. Job Level and Total Working years
# 4. Job Level and Monthly Income
# 5. Total Working years and Monthly Income
# 6. Performance Rating and Percent Salary Hike
# Changing the dependent variable to factor
data$Attrition<-as.factor(data$Attrition)
class(data$Attrition)
# checking current level names
levels(data$Attrition)
# changing the factor names to 0 and 1
data$Attrition<-revalue(data$Attrition, c("No"="0", "Yes"="1"))
View(data)
# some variables are categorical and should have datatype as factor, but they are labeled integers
# now we fix this
factors <- c("BusinessTravel", "Department", "DistanceFromHome",
"Education", "EducationField", "EnvironmentSatisfaction", "Gender",
"JobInvolvement", "JobLevel", "JobRole", "JobSatisfaction", "MaritalStatus",
"OverTime", "PerformanceRating", "RelationshipSatisfaction", "StockOptionLevel", "WorkLifeBalance")
data[factors] = lapply(data[factors], factor)
t(sapply(data, class)) # successfully changed data types
################################################### Visualization #######################################################
# Imbalance in dataset
options(repr.plot.width=8, repr.plot.height=4)
ggplot(data, aes(Attrition,fill = Attrition))+geom_histogram(stat="count")+
labs(title="Employee Attrition (Amount)", x="Employee Attrition",y="Amount")
################################################### Visualization #######################################################
# Checking data normality for monthly income vs attrition
qplot(TotalWorkingYears, MonthlyIncome, data = data)+ geom_smooth(method = "lm", se=F)+ facet_grid(. ~ Attrition)
################################################### Visualization #######################################################
# Is overtime a contributor for higher attrition levels
# Overtime vs Attrition
ggplot(data, aes(OverTime,fill = Attrition))+ geom_histogram(stat="count")
# here we observe that when employee was working overtime, the attrition levels were also higher
################################################### Visualization #######################################################
# Marital Status vs Attrition
ggplot(data, aes(MaritalStatus,fill = Attrition))+geom_histogram(stat="count")
# single people have more tendency to be subject to attrition
################################################### Visualization #######################################################
###JobRole vs Attrition
ggplot(data, aes(JobRole,fill = Attrition))+geom_histogram(stat="count")
# here we see some job roles were more likely to face attrition than others
################################################### Visualization #######################################################
# Gender vs Attrition
ggplot(data, aes(Gender,fill = Attrition))+geom_histogram(stat="count")
# Gender does not seem to be a significant contributor towards attrition
################################################### Visualization #######################################################
# Education Field vs Attrition
ggplot(data, aes(EducationField,fill = Attrition))+geom_histogram(stat="count")
################################################### Visualization #######################################################
# Department vs Attrition
ggplot(data, aes(Department,fill = Attrition))+geom_histogram(stat="count")
# we dont see any significant trend here
################################################### Visualization #######################################################
# Education vs Attrition
ggplot(data, aes(Education,fill = Attrition))+geom_histogram(stat="count")
################################################### Visualization #######################################################
# Business Travel vs Attrition
ggplot(data, aes(BusinessTravel,fill = Attrition))+geom_histogram(stat="count")
# when travel freq is less, this case is less prone to attrition
################################################### Visualization #######################################################
# LOOKING AT GENDER, AGE DISTRIBUTION
# Average age by gender
average_ageGender <- data %>% group_by(Gender) %>% summarize(avg=mean(Age))
average_ageGender
# Age distribution by gender
gender.dist <- data %>% filter(Gender == 'Male' | Gender== "Female") %>%
filter(!is.na(Age)) %>% group_by(Gender) %>%
ggplot(aes(x=Age)) + geom_density(aes(fill=Gender), alpha=0.8, show.legend=FALSE) + facet_wrap(~Gender) + theme_minimal() +
geom_vline(aes(xintercept=mean(Age)),
color="red", linetype="dashed", size=1) + labs(title="Age Distribution")
overall.dist <- data %>% filter(!is.na(Age)) %>%
ggplot(data=data, mapping=aes(x=Age)) + geom_density(color="darkblue", fill="lightblue") +
geom_vline(aes(xintercept=mean(Age)),
color="red", linetype="dashed", size=1) + theme_minimal() + labs(x="Overall Age")
plot_grid(gender.dist, overall.dist, nrow=2)
################################################### Visualization #######################################################
# Monthly Income by gender, we observe almost equal payscales irrespective of the gender, females being slightly higher
ggplot(data, aes(x=Gender, y=MonthlyIncome, color=Gender, fill=Gender)) + geom_boxplot()+labs(title="Are there any Gender Disparities in Income?")
################################################### Visualization #######################################################
# Understanding generational behaviour, let's see if young people worked in more companies than the older generation
# This might prove that the millenial's tend to be more picky with regards to jobs than the older generation.
# First we must create categorical variables based on Age
data$Generation <- ifelse(data$Age<37,"Millenials",
ifelse(data$Age>=38 & data$Age<54,"Generation X",
ifelse(data$Age>=54 & data$Age<73,"Boomers","Silent"
)))
# Let's see the distribution by generation now
generation.dist <- data %>%
ggplot() + geom_boxplot(aes(x=reorder(Generation, NumCompaniesWorked, FUN=median),
y=NumCompaniesWorked, fill=Generation)) + facet_wrap(~Attrition) + labs(title="Knowing Past Generations",x="Generation", y="Number of Companies Previously Worked")
generation.dist
# here we observe by the general trend that irrespective of any generation, when people worked in more number of companies,
# attrition was more likely to happen.
################################################### Visualization #######################################################
# Overtime,Age, and Marital Status VS Attrition
ggplot(data, aes(OverTime, Age)) +
facet_grid(.~MaritalStatus) +
geom_jitter(aes(color = Attrition),alpha = 0.4) +
ggtitle("x=Overtime, y= Age, z = MaritalStatus , t = Attrition")
################################################# LOGISTIC REGRESSION ######################################################
# Since our dependent variable is a binary categorical variable, we use logistic regression method
data<-data[,-32] # removing the Generation created attribute
# Splitting data,set the seed to make your partition reproducible
set.seed(123)
# 75% of the sample size will be train and 25% is test
smp_size <- floor(0.75 * nrow(data))
train_size <- sample(seq_len(nrow(data)), size = smp_size)
train <- data[train_size, ]
test <- data[-train_size, ]
# Applying logistic regression
model_log<- glm(Attrition ~., family = binomial(link = 'logit'), data =train)
# checking summary
summary(model_log)
# predictions using first initial model
pred<-predict(model_log,test)
pred<-ifelse(pred>0.5,1,0)
pred
# Checking the confusion matrix
table(pred,test$Attrition)
confusionMatrix(test$Attrition,pred)
# Reducing or pruning the model for better results, lets check the p values
summary(model_log)
# After checking the p-values for all variables, we observe that few are more significant than others, we perform logistic regression with only these variables
# ['Age','BusinessTravel', 'DistanceFromHome', 'EnvironmentSatisfaction', 'JobInvolvement','JobLevel', 'JobSatisfaction', 'MaritalStatus', 'NumCompaniesWorked', 'OverTime', 'RelationshipSatisfaction','StockOptionLevel' ,'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion']
selection<-c('Attrition','Age','BusinessTravel', 'DistanceFromHome', 'EnvironmentSatisfaction', 'JobInvolvement','JobLevel', 'JobSatisfaction', 'MaritalStatus', 'NumCompaniesWorked', 'OverTime', 'RelationshipSatisfaction','StockOptionLevel' ,'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion')
data1= data[,selection]
# 75% of the sample size will be train and 25% is test
smp_size1 <- floor(0.75 * nrow(data1))
train_size1 <- sample(seq_len(nrow(data1)), size = smp_size1)
train1 <- data1[train_size1, ]
test1 <- data1[-train_size1, ]
# Applying new model
model_select<- glm(Attrition ~., family = binomial(link = 'logit'), data = train1)
# checking summary
summary(model_select)
# predictions using first initial model
pred1<-predict(model_select,test1)
pred1<-ifelse(pred1>0.5,1,0)
pred1
# Checking the confusion matrix
table(pred1,test1$Attrition)
## Checking the new confusion matrix shows higher accuracy levels than original model
# Comapring the original and select model using ANOVA
anova(model_log,model_select)
# the results of anova also show that the select model performed better
########### Now lets apply Stepwise regression on this model ###########
step(model_select)
|
7ae8ff25c9689499e1cbd9fc3c124e9edbbf9441
|
8b209188d063cd3c173e0fb1262b11ef16ec6cfe
|
/man/add_window_l.Rd
|
08bc3f84c514cd539bd1ef7adcf32d36b309012d
|
[] |
no_license
|
cran/tnet
|
26e7a2f9e737b9cde34e29a9918315d183a2ca31
|
15bd16db1192d2ff0712aa916785fd50d6927fd0
|
refs/heads/master
| 2021-01-15T12:26:21.300405
| 2020-02-24T17:00:02
| 2020-02-24T17:00:02
| 17,700,542
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,273
|
rd
|
add_window_l.Rd
|
\name{add_window_l}
\alias{add_window_l}
\title{ Add smoothing window to a longitudinal network }
\description{
This function adds negative ties (i.e., a smoothing window) to a longitudinal network.
}
\usage{add_window_l(net,window=21, remove.nodes=TRUE)}
\arguments{
\item{net}{ Longitudinal network }
\item{window}{ Number of days before ties 'expire'.}
\item{remove.nodes}{ Whether or not nodes should be removed from the network if they have no more ties. This function adds a self-loop with a negative weight at the time of a node's last tie plus the length of the window. }.
}
\value{Returns the longitudinal network with negative arcs.}
\references{ tore@opsahl.co.uk }
\author{ Tore Opsahl; http://toreopsahl.com }
\note{ version 1.0.0}
\examples{
t <- c('2007-09-12 13:45:00',
'2007-09-12 13:46:31',
'2007-09-12 13:47:54',
'2007-09-12 13:48:21',
'2007-09-12 13:49:27',
'2007-09-12 13:58:14',
'2007-09-12 13:52:17',
'2007-09-12 13:56:59');
i <- c(1,1,1,1,1,1,1,1);
j <- c(2,2,2,2,2,2,3,3);
w <- c(1,1,1,1,1,1,1,1);
sample <- data.frame(t, i, j, w);
## Run the programme
add_window_l(sample, window=21)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ networks }
|
a232b63aed52ecd8d7fa4b248ee8cb60aa9231e6
|
f19e1d54a1405ca75e2b4a059278246a47423da6
|
/R/1_worldclim_crop.R
|
37d4258a0d1c937ba214354d813942da8b602487
|
[] |
no_license
|
Projeto-BHRD-INMA/clima
|
934257a337145dbb721b47b818339571e9f61a4c
|
1688fc6930051bf93d87019827f3af51b2cc2b0f
|
refs/heads/master
| 2022-11-14T06:15:09.832652
| 2020-06-19T22:41:37
| 2020-06-19T22:41:37
| 269,772,150
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,190
|
r
|
1_worldclim_crop.R
|
############################################
# Explorando dados climáticos da BHRD
# Bruno M. Carvalho - brunomc.eco@gmail.com
# Cortando rasters de GCMs para a BHRD
############################################
library(raster)
library(rgdal)
library(stringr)
# dados worldclim
# mask da regiao da BHRD (extensão do polígono BHRD + buffer de 5 km)
bhrd <- readOGR("./data/vector/extensao+5km_BHRD.shp")
bhrd <- buffer(bhrd, width=0.5) # só mais um pouquinho de buffer pra pegar as estações
# carregar rasters por variável e cortar
pr <- list.files("./big/worldclim/wc2.1_10m_prec/", full.names = TRUE, pattern = ".tif$", recursive = TRUE) %>%
stack() %>%
crop(bhrd)
tasmax <- list.files("./big/worldclim/wc2.1_10m_tmax/", full.names = TRUE, pattern = ".tif$", recursive = TRUE) %>%
stack() %>%
crop(bhrd)
tasmin <- list.files("./big/worldclim/wc2.1_10m_tmin/", full.names = TRUE, pattern = ".tif$", recursive = TRUE) %>%
stack() %>%
crop(bhrd)
# salvar objetos (ocupam menos espaço que rasters)
save(pr, file = "./outputs/1_worldclim_pr.RData")
save(tasmax, file = "./outputs/1_worldclim_tasmax.RData")
save(tasmin, file = "./outputs/1_worldclim_tasmin.RData")
|
8f546e4a915e9106689e96e4adeb11d4ed72270f
|
9a430b05c1e8cd124be0d0323b796d5527bc605c
|
/wsim.distributions/R/find_cdf.R
|
a5b7de01136ac2d68af4111a4459be2a5e45e767
|
[
"Apache-2.0"
] |
permissive
|
isciences/wsim
|
20bd8c83c588624f5ebd8f61ee5d9d8b5c1261e6
|
a690138d84872dcd853d2248aebe5c05987487c2
|
refs/heads/master
| 2023-08-22T15:56:46.936967
| 2023-06-07T16:35:16
| 2023-06-07T16:35:16
| 135,628,518
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 931
|
r
|
find_cdf.R
|
# Copyright (c) 2018 ISciences, LLC.
# All rights reserved.
#
# WSIM is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. You may
# obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Lookup a CDF by distribution name
#'
#' @param distribution name of a statistical distribution
#' @return cumulative distribution function, or NULL if no
#' @export
find_cdf <- function(distribution) {
dist <- switch(distribution,
gev=lmom::cdfgev,
NULL)
stopifnot(!is.null(dist))
return(dist)
}
|
fb8abb0fd050500c318ceef2722879eaeed17208
|
def66edebf317dd925261ee343655b06681e7490
|
/man/gg_hide_X_axis.Rd
|
48e83c77e16d321d513fc84220c18f3dccf3d2bf
|
[
"MIT"
] |
permissive
|
terminological/ggrrr
|
5e0a925fc858c7b7e020372b8dd6be29688bef1c
|
82ebcec9c1f71a8b49d733302cd6c6e72d6d5a57
|
refs/heads/main
| 2023-04-06T23:56:58.090401
| 2023-03-28T17:21:48
| 2023-03-28T17:21:48
| 489,738,724
| 0
| 0
|
MIT
| 2022-06-16T14:18:41
| 2022-05-07T17:24:53
|
R
|
UTF-8
|
R
| false
| true
| 259
|
rd
|
gg_hide_X_axis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot-utils.R
\name{gg_hide_X_axis}
\alias{gg_hide_X_axis}
\title{Hide the x axis of a plot}
\usage{
gg_hide_X_axis()
}
\value{
a theme
}
\description{
Hide the x axis of a plot
}
|
97491cc074e7fcf8b6c8f3b41aa2535e4b17a2c7
|
d25ffe6711f9b621f5cf3d9e0daed2ff5dba5d8a
|
/R/rp_sample_parallel.R
|
bf3322be406f6109f9f02d70ce6f01c750acf1d5
|
[] |
no_license
|
bplloyd/CoreHF
|
0313e782b3f199026e3e8db7273ce9e8bef3f78a
|
2aae9c6817db0c3168d104d8654d2b180e3e6ce6
|
refs/heads/master
| 2021-01-21T11:24:05.395756
| 2017-07-20T14:01:11
| 2017-07-20T14:01:11
| 91,341,924
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,664
|
r
|
rp_sample_parallel.R
|
rp_sample_parallel = function (portfolio, permutations = NULL, max_permutations = NULL)
{
#library(parallel)
#portfolio
if(is.null(max_permutations))
max_permutations = 200
if(is.null(permutations))
permutations = 100000
ncore = parallel::detectCores()
cl = parallel::makeCluster(ncore)
env = new.env()
assign(x = "portfolio", value = portfolio, envir = env)
assign(x = "permutations", value = permutations, envir = env)
assign(x = "max_permutations", value = max_permutations, envir = env)
parallel::clusterEvalQ(cl=cl, {
library(PortfolioAnalytics);
NULL;
})
callRP = function(portfolio, max_permutations){return(PortfolioAnalytics::randomize_portfolio(portfolio, max_permutations))}
parallel::clusterExport(cl=cl,
varlist = c("portfolio", "max_permutations", "permutations", "callRP"),
envir = env
)
seed = portfolio$assets
result <- matrix(nrow = permutations, ncol = length(seed))
result[1, ] <- seed
ew = rep(1/length(seed), length(seed))
result[2, ] <- ew
result[3:permutations, ] <- t(parallel::parSapply(cl=cl,
X=3:permutations,
FUN = function(j){return(callRP(portfolio = portfolio,
max_permutations = max_permutations))
}
))
parallel::stopCluster(cl)
colnames(result) <- names(seed)
return(unique(result))
}
|
935ae46080c8e4450987201adcc223d0b2ac6d1a
|
39b6b094b248c1f423539052ea81bb40ec37ff00
|
/plotCalibrationFits.R
|
3971f9bc3f7cbc8294bba9557ba94628bea1a709
|
[
"MIT"
] |
permissive
|
k-wheeler/coralReef
|
6eb187fe15d1362f8aaa8c197f01fd6f0890605e
|
374348838ba2710607717527b87b900402ad04de
|
refs/heads/master
| 2020-04-26T05:04:54.203518
| 2019-05-06T21:58:46
| 2019-05-06T21:58:46
| 173,322,229
| 0
| 0
|
MIT
| 2019-03-01T15:11:41
| 2019-03-01T15:11:40
| null |
UTF-8
|
R
| false
| false
| 680
|
r
|
plotCalibrationFits.R
|
##' Plots the calibration fits
##'
##' @param out.mat JAGS output in matrix form
##' @export
plotCalibrationFits <- function(out.mat,dat,years){
regions <- c("Lower Keys","Middle Keys","Upper Keys","Biscayne Bay","Dry Tortugas")
for(r in 1:5){
xCIs <- matrix(nrow=3,ncol=0)
for(j in 1:(ncol(out.mat)/5)){
colName <- paste("x[",r,",",j,"]",sep="")
x <- out.mat[,colName]
xCIs <- cbind(xCIs,quantile(x,c(0.025,0.5,0.975)))
}
plot(years,dat$y[r,],pch=20,ylab="Percent Bleached",xlab="Time (year)",main=regions[r])
ciEnvelope(years,xCIs[1,],xCIs[3,],col="lightblue")
lines(years,xCIs[2,])
points(years,dat$y[r,],pch=20)
}
}
|
dcc47c21be8aa1261b0e2605e5f94dfb416f284b
|
47aa1badb4a1b01b634ab6fa0e2c18050b4706d2
|
/scripts/IRkernel.R
|
55bbad8f26506327205e18f5364fdb313b75d6d9
|
[] |
no_license
|
bryanpaget/DataScienceWorkstation
|
e016ac7e13b900c4e2fc39e2f2d5cc07a5de50bb
|
64130e0417dfd7cb7935c401ea3ecf1da3fe5d37
|
refs/heads/master
| 2023-01-02T04:56:18.912286
| 2020-10-16T03:25:34
| 2020-10-16T03:25:34
| 299,076,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 109
|
r
|
IRkernel.R
|
install.packages('IRkernel')
IRkernel::installspec() # to register the kernel in the current R installation
|
76d77f24fa025350327e6ab37269e50e018b630e
|
4863435653da8649080259a7bb151c436782be6d
|
/style.R
|
f00b80a812f8df94ef98e7329b27732c58f95631
|
[] |
no_license
|
Carnuntum/agree
|
842e5e8898deb3fae7775e03edcff7cb95aa0f5e
|
2ca81550cf8512075515c62d08b96da746d2f1ce
|
refs/heads/master
| 2023-05-30T04:00:05.889149
| 2021-06-23T18:15:08
| 2021-06-23T18:15:08
| 288,434,397
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,159
|
r
|
style.R
|
navbarCol <- tags$style(HTML(
'.skin-blue .main-header .navbar {transition:1s}
.skin-blue .main-header .navbar:hover {background-color: dimgrey}'
))
bodyCol <- tags$style(HTML(
'.content-wrapper {background-color: dimwhite;}'))
dropMenuStyle <- tags$style(HTML(
'.tippy-tooltip.translucent-theme {background-color: #3c8dbc !important;}
.tippy-tooltip.translucent-theme[data-placement^="left-start"] > .tippy-arrow {border-left-color: #3c8dbc;}
.tippy-tooltip.translucent-theme[data-placement^="bottom"] > .tippy-arrow {border-bottom-color: #3c8dbc;}
.tippy-tooltip .table {background-color: #3c8dbc !important; color: white;}
.tippy-tooltip tr:hover {background-color: #51BFFF !important;}
'
))
boxCol <- tags$style(HTML(
'.box.box{
border-style: solid;
border-bottom-color:white;
border-left-color:white;
border-right-color:white;
border-top-color:white;
background:white
}'
))
tabCol <- tags$style(HTML(
".datatables.html-widget.html-widget-output.shiny-bound-output {
background-color: white;
color: black;
}
table.dataTable.display tbody tr.odd {
background-color: white;
}
table.dataTable.display tbody tr.even {
background-color: white;
}
table.dataTable.display tbody tr:hover {
background-color: lightgrey;
}
.dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter, .dataTables_wrapper .dataTables_info, .dataTables_wrapper .dataTables_paginate, .dataTables_wrapper .select {
color: black;
border: none;
}
.dataTables_wrapper select, .dataTables_wrapper input {
background-color: lightgrey !important;
border: none;
color: black !important;
}
.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active {
color: black !important;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button {
color: black !important;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button:hover {
color: white;
background: dimgrey;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover {
color: black;
background: lightgrey;
border: none;
}
table.dataTable thead th, table.dataTable thead td {
border: none;
}
table.dataTable.no-footer {
border-bottom-color: #404040;
}"
))
histPlotCol <<- 'white'
histPlotText <<- 'black'
mainColLight <- function(input, output) {
output$dark <- renderUI({
list(
navbarCol <- tags$style(HTML(
'.skin-blue .main-header .navbar {transition:1s}
.skin-blue .main-header .navbar:hover {background-color: dimgrey}'
)),
bodyCol <- tags$style(HTML(
'.content-wrapper {background-color: dimwhite;}')),
dropMenuStyle <- tags$style(HTML(
'.tippy-tooltip.translucent-theme {background-color: #3c8dbc !important;}
.tippy-tooltip.translucent-theme[data-placement^="left-start"] > .tippy-arrow {border-left-color: #3c8dbc;}
.tippy-tooltip.translucent-theme[data-placement^="bottom"] > .tippy-arrow {border-bottom-color: #3c8dbc;}
.tippy-tooltip .table {background-color: #3c8dbc !important; color: white;}
.tippy-tooltip tr:hover {background-color: #51BFFF !important;}
'
)),
boxCol <- tags$style(HTML(
'.box.box{
border-style: solid;
border-bottom-color:white;
border-left-color:white;
border-right-color:white;
border-top-color:white;
background:white
}'
)),
tabCol <- tags$style(HTML(
".datatables.html-widget.html-widget-output.shiny-bound-output {
background-color: white;
color: black;
}
table.dataTable.display tbody tr.odd {
background-color: white;
}
table.dataTable.display tbody tr.even {
background-color: white;
}
table.dataTable.display tbody tr:hover {
background-color: lightgrey;
}
.dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter, .dataTables_wrapper .dataTables_info, .dataTables_wrapper .dataTables_paginate, .dataTables_wrapper .select {
color: black;
border: none;
}
.dataTables_wrapper select, .dataTables_wrapper input {
background-color: #3B3B3B24 !important;
border: none;
color: black !important;
}
.dataTables_wrapper select:hover, {
background-color: #3B3B3B24 !important;
border: none;
color: black !important;
}
.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active {
color: black !important;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button {
color: black !important;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button:hover {
color: white;
background: dimgrey;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover {
color: black;
background: lightgrey;
border: none;
}
table.dataTable thead th, table.dataTable thead td {
border: none;
}
table.dataTable.no-footer {
border-bottom-color: #404040;
}"
))
)
})
}
mainColDark <- function(input, output) {
output$dark <- renderUI({
list(
dropMenuStyle <- tags$style(HTML(
'.tippy-tooltip.translucent-theme {background-color: #3c8dbc !important;}
.tippy-tooltip.translucent-theme[data-placement^="left-start"] > .tippy-arrow {border-left-color: #3c8dbc;}
.tippy-tooltip.translucent-theme[data-placement^="bottom"] > .tippy-arrow {border-bottom-color: #3c8dbc;}
.tippy-tooltip .table {background-color: #3c8dbc !important; color: white;}
.tippy-tooltip tr:hover {background-color: #51BFFF !important;}
'
)),
bodyCol <- tags$style(
'.content-wrapper {background-color: #2E2E2E;}'),
boxCol <- tags$style(
'
.box.box{
border-style: solid;
border-bottom-color:#404040;
border-left-color:#404040;
border-right-color:#404040;
border-top-color:white;
background:#404040;
color: white;
}
.shiny-input-container{
color:white;}'),
tags$style('p {color:white;}
h1 {color:white;}
h2 {color:white;}
h3 {color:white;}
h4 {color:white;}
h5 {color:white;}
table {color:white;}
tr:hover {background-color:dimgrey;}'),
tabCol <- tags$style(
HTML(
".datatables.html-widget.html-widget-output.shiny-bound-output {
background-color: #404040;
color: white;
}
table.dataTable.display tbody tr.odd {
background-color: #404040;
}
table.dataTable.display tbody tr.even {
background-color: #404040;
}
table.dataTable.display tbody tr:hover {
background-color: dimgrey;
}
.dataTables_wrapper .dataTables_length, .dataTables_wrapper .dataTables_filter, .dataTables_wrapper .dataTables_info, .dataTables_wrapper .dataTables_paginate, .dataTables_wrapper .select {
color: white;
border: none;
}
.dataTables_wrapper select, .dataTables_wrapper input {
background-color: dimgrey !important;
border: none;
color: white !important;
}
.dataTables_wrapper .dataTables_paginate .paginate_button.disabled, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:hover, .dataTables_wrapper .dataTables_paginate .paginate_button.disabled:active {
color: white !important;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button {
color: white !important;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button:hover {
color: white;
background: dimgrey;
border: none;
}
.dataTables_wrapper .dataTables_paginate .paginate_button.current, .dataTables_wrapper .dataTables_paginate .paginate_button.current:hover {
color: black;
background: white;
border: none;
}
table.dataTable thead th, table.dataTable thead td {
border: none;
}
table.dataTable.no-footer {
border-bottom-color: #404040;
}"
)
))
})
}
# btn_hover <- function(input, output) {
# output$bttnCol <- renderUI({
# if (input$changCol %% 2 != 0) {
# bttnCol <- tags$style(
# '.btn {transition-duration:0.4s}
# .btn:hover {
# background-color:darkcyan;
# color:white}'
# )
# }
# })
# }
js_upload_msg_ordinalInput <- "
Shiny.addCustomMessageHandler('upload_msg', function(msg) {
var target = $('#ordinalInput_progress').children()[0];
target.innerHTML = msg;
});
"
measure_title_style <- 'text-align: center; padding: 0;'
centerText <- 'text-align: center;'
|
d7b7b8dfa73990222cb0bbfa24906a54a2e084f2
|
fb4f18c9816cff08debc4d7c4d0ca035920952ea
|
/Ex_4/functions/obj_kernel.R
|
c740acc54b8557ad63aa352cbe14b988ba784774
|
[] |
no_license
|
marcohv/exercises_ts_dtu
|
cdd880768df7bbb7d12a92feb5738249777fe6e7
|
cdf5818a51447b4adce742024b68e387901ea39a
|
refs/heads/master
| 2020-03-27T19:39:13.452140
| 2018-09-18T13:43:11
| 2018-09-18T13:43:11
| 147,003,352
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,738
|
r
|
obj_kernel.R
|
## Wrap the leave-one-out in a function which returns the score
obj_kernel <- function(h, frml, data, k = k, ieval = 1:nrow(data), n_min=10, local_second_order = TRUE, return_yhat = FALSE){
## Should second order terms be added to the data and frml
if(local_second_order){
## Input variables
nms <- all.vars(as.formula(frml))[-1]
tmp <- sapply(nms, function(nm){
data[ ,nm]^2
})
colnames(tmp) <- pst(nms,"2")
data <- cbind(data,tmp)
frml <- paste(frml,"+",pst(nms,"2",collapse=" + "))
}
## Keep the output in yhat, only for ieval points
yhat <- sapply(ieval, function(i){
## check if there are data to fit
if((i-k) < n_min){ return(NA) }
if(any(is.na(data[i,names(h)]))){ return(NA) }
## Only use values available k steps behind (otherwise future values would be used)
ipast <- 1:(i-k)
## Calculate the weights for the variables in h
tmp <- sapply(names(h), function(nm){
epanechnikov(data[i,nm], data[ipast,nm], h=h[nm])
})
w <- apply(tmp,1,prod)
if(sum(!is.na(w)) < n_min){ return(NA) }
## Only everything before the i'th observation will be included in the fit
fit <- lm(as.formula(frml), data[ipast, ], weights = w)
## Now predict for the i point (for the k'th horizon)
predict(fit, newdata = data[i, ])
})
##
if(return_yhat){
return(yhat)
}else{
## The score value
nm <- all.vars(as.formula(frml))[1]
val <- rmse(data[ieval,nm] - yhat)
##
print(pst("h = c(",pst(names(h)," = ",h,collapse=", "),"), val = ",val))
##
return(val)
}
}
|
4039e1c2ef6407bee1789d34b68983087d7d27d2
|
877cca47bedf6725dda3fd99a97a9cc60f030beb
|
/scripts/dataframe_calendar.R
|
db5cd1ed100515f67e6d4056afac7d9f6cc674bf
|
[
"MIT"
] |
permissive
|
info-201a-au20/airbnb-covid-analysis
|
7a55bb8ec4851e0e08c70bb5b5b51152a012f65e
|
6985ec2d54ba740a731a37d912c6112b6a758858
|
refs/heads/master
| 2023-04-04T19:28:30.521964
| 2021-03-18T00:06:25
| 2021-03-18T00:06:25
| 312,765,685
| 0
| 1
|
MIT
| 2021-04-18T22:11:01
| 2020-11-14T07:10:19
|
R
|
UTF-8
|
R
| false
| false
| 229
|
r
|
dataframe_calendar.R
|
# will use next analysis
# seattle_calendar <- read.csv("../data/Seattle_2020_October/calendar.csv.gz", stringsAsFactors = FALSE)
# tokyo_calendar <- read.csv("data/Tokyo_2020_October/calendar.csv.gz", stringsAsFactors = FALSE)
|
b6fcd06ad289d84e5da6512d82d581f39e7baeed
|
8b23d55e966e7bc0b636374296d0077aa28a7d17
|
/Plots_for_Paper_vmat/scripts2/scriptforEDrepeat.R
|
b9829767bc9632d32075045b81195bdaadea5f6b
|
[] |
no_license
|
pcarbo/gtexresults_matrixash
|
74ab9f56a31fe4125ae5c368f3d21f638db0828e
|
4e9b14e836054178942c9d58c6405cc2ec0afbc8
|
refs/heads/master
| 2021-01-22T20:26:33.891110
| 2017-03-23T18:22:28
| 2017-03-23T18:22:28
| 85,109,000
| 0
| 0
| null | 2017-03-15T18:45:27
| 2017-03-15T18:45:27
| null |
UTF-8
|
R
| false
| false
| 1,282
|
r
|
scriptforEDrepeat.R
|
library('mashr')
library('ExtremeDeconvolution')
t.stat=read.table("~/jul3/maxz.txt")
s.j=matrix(rep(1,ncol(t.stat)*nrow(t.stat)),ncol=ncol(t.stat),nrow=nrow(t.stat))
v.mat=readRDS("~/test.train/vhat.RDS")
v.j=list()
for(i in 1:nrow(t.stat)){v.j[[i]]=v.mat}
mean.mat=matrix(rep(0,ncol(t.stat)*nrow(t.stat)),ncol=ncol(t.stat),nrow=nrow(t.stat))
lambda.mat=as.matrix(read.table("~//jul3/zsfa_lambda.out"))
factor.mat=as.matrix(read.table("~//jul3/zsfa_F.out"))
permsnp=10
K=3;P=3;R=44
init.cov=init.covmat(t.stat=t.stat,factor.mat = factor.mat,lambda.mat = lambda.mat,K=K,P=P)
init.cov.list=list()
for(i in 1:K){init.cov.list[[i]]=init.cov[i,,]}
head(init.cov.list)
ydata= t.stat
xamp= rep(1/K,K)
xcovar= init.cov.list
fixmean= TRUE
ycovar= v.j
xmean= mean.mat
projection= list();for(l in 1:nrow(t.stat)){projection[[l]]=diag(1,R)}
e=extreme_deconvolution(ydata=ydata,ycovar=ycovar,xamp=xamp,xmean=xmean,xcovar=init.cov.list,fixmean=T,projection=projection)
true.covs=array(dim=c(K,R,R))
for(i in 1:K){true.covs[i,,]=e$xcovar[[i]]}
pi=e$xamp
max.step=list(true.covs=true.covs,pi=pi)
saveRDS(max.step,paste0("max.steps303",P,".rds"))
ms=deconvolution.em.with.bovy(t.stat,factor.mat,v.j,lambda.mat,K=3,P=3)
saveRDS(ms,"mswithfunction.rds")
all.equal(ms,max.step)
|
3b8fa575b00b86c38f7eeaa8cf0213db49de1854
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/DWreg/examples/dw.meanvar.Rd.R
|
806bc05f3f5eee4c7213d841eec518f6972dded7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 283
|
r
|
dw.meanvar.Rd.R
|
library(DWreg)
### Name: dw.meanvar
### Title: Mean and Variance of Discrete Weibull
### Aliases: dw.meanvar
### Keywords: dw.meanvar
### ** Examples
dw.meanvar(q=0.9,beta=1.5)
#compare with sample mean/variance from a random sample
x<-rdw(1000,q=0.9,beta=1.5)
mean(x)
var(x)
|
671f61cc379b90cd607286db92040d9315d9a922
|
ba945c14ead9387327f06f36fd744b4be6c0fdba
|
/Sun/Sun_O5.R
|
56271c2627445c9ff13da309d6ad0eb370e081bd
|
[] |
no_license
|
wissebarkhof/hpc-matrix-multiplication
|
11b63543a4be259b65cdbe34a8044f7ded52cccb
|
760945a3a5c0731873cea5a13fe505e5f3a89a77
|
refs/heads/master
| 2020-04-15T16:29:01.721027
| 2019-01-11T14:43:05
| 2019-01-11T14:43:05
| 164,838,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,396
|
r
|
Sun_O5.R
|
setwd("~/Google Drive/10.semester/02614 HPC/Assignment 1/GIT/Data")
Sun_O5_kmn <- read.table("DataSunO5/SunO5kmn.dat", quote="\"", comment.char="")
Sun_O5_kmn$TYPE <- "kmn"
Sun_O5_knm <- read.table("DataSunO5/SunO5knm.dat", quote="\"", comment.char="")
Sun_O5_knm$TYPE <- "knm"
Sun_O5_mkn <- read.table("DataSunO5/SunO5mkn.dat", quote="\"", comment.char="")
Sun_O5_mkn$TYPE <- "mkn"
Sun_O5_mnk <- read.table("DataSunO5/SunO5mnk.dat", quote="\"", comment.char="")
Sun_O5_mnk$TYPE <- "mnk"
Sun_O5_nkm <- read.table("DataSunO5/SunO5nkm.dat", quote="\"", comment.char="")
Sun_O5_nkm$TYPE <- "nkm"
Sun_O5_nmk <- read.table("DataSunO5/SunO5nmk.dat", quote="\"", comment.char="")
Sun_O5_nmk$TYPE <- "nmk"
Sun_O5_nat <- read.table("DataSunO5/SunO5nat.dat", quote="\"", comment.char="")
Sun_O5_nat$TYPE <- "nat"
Sun_O5_lib <- read.table("DataSunO5/SunO5lib.dat", quote="\"", comment.char="")
Sun_O5_lib$TYPE <- "lib"
Sun_O5_blk <- read.table("DataSunO5/SunO5blk.dat", quote="\"", comment.char="")
Sun_O5_blk$TYPE <- "blk"
Sun_O5_data_loop <- rbind(Sun_O5_kmn,Sun_O5_knm,Sun_O5_mkn,Sun_O5_mnk,Sun_O5_nkm,Sun_O5_nmk)
Sun_O5_data_loop <- Sun_O5_data_loop[c(1:2,5)]
Sun_O5_data_lib_nat <- rbind(Sun_O5_lib,Sun_O5_nat)
Sun_O5_data_lib_nat <- Sun_O5_data_lib_nat[c(1:2,5)]
Sun_O5_data_blk <- Sun_O5_blk[c(1:2,5:6)]
library(gdata)
keep(Sun_O5_data_blk,Sun_O5_data_lib_nat,Sun_O5_data_loop,sure=TRUE)
|
8d683bf45c96b507523ebaf11ee70cb986898b8b
|
2ee70a959208a50de0f96bef772b6f5f025b67c4
|
/CIS8695_NeuralNet_Basic.R
|
062a1a67c0dbf92ea4d86dc814c5d012100d97f4
|
[] |
no_license
|
DarshikaKesarwani/Project
|
16775923171f2705fec7aa0f0486409829d113a9
|
f669a7c2fbd7058cc12705ada6833938bcd2732a
|
refs/heads/master
| 2020-04-27T15:42:09.796593
| 2019-03-31T06:06:33
| 2019-03-31T06:06:33
| 174,456,361
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,040
|
r
|
CIS8695_NeuralNet_Basic.R
|
rm(list = ls())
setwd("C:/Users/lxue5/Dropbox/2019Sp/CIS8695/5 NN AI")
install.packages ("neuralnet")
library(neuralnet)
library(nnet)
library(caret)
accidents.df <- read.csv("CSV_Accidents_new.csv")
# selected variables
vars <- c("ALCHL_I", "PROFIL_I_R", "VEH_INVL")
# partition the data
set.seed(2)
training<-sample(row.names(accidents.df), dim(accidents.df)[1]*0.6)
validation<-setdiff(row.names(accidents.df), training)
# when y has multiple classes - need to dummify
trainData <- cbind(accidents.df[training,c(vars)],
class.ind(accidents.df[training,]$SUR_COND),
class.ind(accidents.df[training,]$MAX_SEV_IR))
names(trainData) <- c(vars,
paste("SUR_COND_", c(1, 2, 3, 4, 9), sep=""), paste("MAX_SEV_IR_", c(0, 1), sep=""))
validData <- cbind(accidents.df[validation,c(vars)],
class.ind(accidents.df[validation,]$SUR_COND),
class.ind(accidents.df[validation,]$MAX_SEV_IR))
names(validData) <- c(vars, paste("SUR_COND_", c(1, 2, 3, 4, 9), sep=""), paste("MAX_SEV_IR_", c(0, 1), sep=""))
# run nn with 2 hidden nodes
# use hidden= with a vector of integers specifying number of hidden nodes in each layer
nn <- neuralnet(MAX_SEV_IR_0 + MAX_SEV_IR_1 ~
ALCHL_I + PROFIL_I_R + VEH_INVL + SUR_COND_1 + SUR_COND_2
+ SUR_COND_3 + SUR_COND_4, data = trainData, hidden = 2)
training.prediction <- compute(nn, trainData[,-c(8:11)])
training.class <- apply(training.prediction$net.result,1,which.max)-1
confusionMatrix(as.factor(training.class), as.factor(accidents.df[training,]$MAX_SEV_IR))
validation.prediction <- compute(nn, validData[,-c(8:11)])
validation.class <-apply(validation.prediction$net.result,1,which.max)-1
confusionMatrix(as.factor(validation.class), as.factor(accidents.df[validation,]$MAX_SEV_IR))
install.packages("NeuralNetTools")
library(NeuralNetTools)
# Plot neural net
par(mfcol=c(1,1))
plotnet(nn)
# get the neural weights
neuralweights(nn)
# Plot the importance
olden(nn)
|
c3b075b5cc8b3d59a311b10d3f384f4099338121
|
a980b2fa82b3c6ba0100457402c84d500923215a
|
/tests/testthat/test-wait.R
|
c841910babdcc19901261427e327b0bea75d4efa
|
[
"MIT"
] |
permissive
|
RLesur/crrri
|
b5bc761f9e0571012f5ef36e89a69503ec3e7cc7
|
69c54e657f117b9e30b6a5475604e1e0c6584150
|
refs/heads/master
| 2022-09-09T06:55:11.776063
| 2021-03-11T14:48:11
| 2021-03-11T14:48:11
| 157,903,442
| 167
| 13
|
NOASSERTION
| 2022-08-22T15:48:30
| 2018-11-16T18:00:50
|
R
|
UTF-8
|
R
| false
| false
| 1,206
|
r
|
test-wait.R
|
context("test-wait")
test_that("wait(): both pipes work with a promise as an argument", {
value <- runif(1)
pr <- promises::promise_resolve(value)
with_magrittr_pipe <-
pr %>% wait(0.1)
expect_identical(hold(with_magrittr_pipe), value)
with_promises_pipe <-
pr %...>% wait(0.1)
expect_identical(hold(with_promises_pipe), value)
})
test_that("wait() also works with a non-promise object", {
value <- runif(1)
pr <- wait(value, 0.1)
expect_is(pr, "promise")
expect_identical(hold(pr), value)
})
test_that("timeout() works with a non promise argument", {
value <- runif(1)
pr <- timeout(x = value, delay = 0.1)
expect_is(pr, "promise")
expect_error(hold(pr), regexp = "0\\.1")
})
test_that("timeout() returns the value of the promise when it is fulfilled before the delay expires", {
value <- runif(1)
pr <- timeout(wait(x = value, delay = 0.1), delay = 10)
expect_is(pr, "promise")
expect_identical(hold(pr), value)
})
test_that("timeout() returns a promise which is rejected when the delay expires", {
value <- runif(1)
pr <- timeout(wait(x = value, delay = 10), delay = 0.1)
expect_is(pr, "promise")
expect_error(hold(pr), regexp = "0\\.1")
})
|
96518eac783b29e46e093cbcec68342207d41d33
|
4fc2fbdb5adb83ecda830f3054dc019a9e3aba12
|
/R/plot.shapes.R
|
c77fc14f1659e8c6f8013ed008fd1887742713d1
|
[] |
no_license
|
briatte/rigraph
|
a8d520edd5aa727dac3ab4ea92b5efa4dfc7af07
|
7403e8a65bb99f1047b85f62e162a1c34e9d6137
|
refs/heads/dev
| 2021-01-22T11:27:40.977904
| 2017-05-03T16:08:28
| 2017-05-03T16:08:28
| 92,696,135
| 1
| 1
| null | 2017-05-29T00:49:22
| 2017-05-29T00:49:22
| null |
UTF-8
|
R
| false
| false
| 34,442
|
r
|
plot.shapes.R
|
# IGraph R package
# Copyright (C) 2003-2012 Gabor Csardi <csardi.gabor@gmail.com>
# 334 Harvard street, Cambridge, MA 02139 USA
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
#
###################################################################
###################################################################
## API design
##
## A vertex shape is defined by two functions: the clipping function and
## the plotting function.
##
## The clipping function is called to determine where to put the
## arrowhead of a potential (incoming) incident edge. Its signature is
## function(coords, el, params, end=c("both", "from", "to"))
## where the arguments are:
## coords A matrix with one row for each edge, and four columns.
## It contains the coordinates of the end points of all
## edges. The first two columns are the coordinates of the
## first end points (sources, if the graph is directed),
## the last two columns are for the other end points
## (targets if the graph is directed).
## el The edge list itself, with vertex ids.
## params A function object to query plotting parameters.
## end Which end points to calculate. "both" means both,
## "from" means the first end point, "to" the second.
## The clipping function must return the new version of "coords",
## modified according to the vertex sizes/shapes, with proper positions
## for the potential arrow heads. The positions are for the tips of the
## arrows.
##
## The plotting function plots the vertex. Its signature is
## function(coords, v=NULL, params)
## where the arguments are
## coords Two column matrix, the coordinates for the vertices to draw.
## v The vertex ids of the vertices to draw. If NULL, then all
## vertices are drawn.
## params A function object to query plotting parameters.
##
## shapes() - lists all vertex shapes
## shapes(shape) - returns the clipping and plotting functions
## for a given vertex shape
## add_shape() - adds a new vertex shape, the clipping and
## plotting functions must be given, and
## optionally the newly introduced plotting
## parameters. This function can also be used
## to overwrite a given vertex shape.
##
## Examples:
## add_shape("image", clip=image.clip, plot=image.plot,
## parameters=list(filename=NA))
##
## add_shape("triangle", clip=shapes("circle")$clip,
## plot=triangle.plot)
##
## add_shape("polygon", clip=shapes("circle")$clip,
## plot=polygon.plot)
##
###################################################################
#' Various vertex shapes when plotting igraph graphs
#'
#' Starting from version 0.5.1 igraph supports different
#' vertex shapes when plotting graphs.
#'
#' @details
#' In igraph a vertex shape is defined by two functions: 1) provides
#' information about the size of the shape for clipping the edges and 2)
#' plots the shape if requested. These functions are called \dQuote{shape
#' functions} in the rest of this manual page. The first one is the
#' clipping function and the second is the plotting function.
#'
#' The clipping function has the following arguments:
#' \describe{
#' \item{coords}{A matrix with four columns, it contains the
#' coordinates of the vertices for the edge list supplied in the
#' \code{el} argument.}
#' \item{el}{A matrix with two columns, the edges of which some end
#' points will be clipped. It should have the same number of rows as
#' \code{coords}.}
#' \item{params}{This is a function object that can be called to query
#' vertex/edge/plot graphical parameters. The first argument of the
#' function is \dQuote{\code{vertex}}, \dQuote{\code{edge}} or
#' \dQuote{\code{plot}} to decide the type of the parameter, the
#' second is a character string giving the name of the
#' parameter. E.g.
#' \preformatted{
#' params("vertex", "size")
#' }
#' }
#' \item{end}{Character string, it gives which end points will be
#' used. Possible values are \dQuote{\code{both}},
#' \dQuote{\code{from}} and \dQuote{\code{to}}. If
#' \dQuote{\code{from}} the function is expected to clip the
#' first column in the \code{el} edge list, \dQuote{\code{to}}
#' selects the second column, \dQuote{\code{both}} selects both.}
#' }
#'
#' The clipping function should return a matrix
#' with the same number of rows as the \code{el} arguments.
#' If \code{end} is \code{both} then the matrix must have four
#' columns, otherwise two. The matrix contains the modified coordinates,
#' with the clipping applied.
#'
#' The plotting function has the following arguments:
#' \describe{
#' \item{coords}{The coordinates of the vertices, a matrix with two
#' columns.}
#' \item{v}{The ids of the vertices to plot. It should match the number
#' of rows in the \code{coords} argument.}
#' \item{params}{The same as for the clipping function, see above.}
#' }
#'
#' The return value of the plotting function is not used.
#'
#' \code{shapes} can be used to list the names of all installed
#' vertex shapes, by calling it without arguments, or setting the
#' \code{shape} argument to \code{NULL}. If a shape name is given, then
#' the clipping and plotting functions of that shape are returned in a
#' named list.
#'
#' \code{add_shape} can be used to add new vertex shapes to
#' igraph. For this one must give the clipping and plotting functions of
#' the new shape. It is also possible to list the plot/vertex/edge
#' parameters, in the \code{parameters} argument, that the clipping
#' and/or plotting functions can make use of. An example would be a
#' generic regular polygon shape, which can have a parameter for the
#' number of sides.
#'
#' \code{shape_noclip} is a very simple clipping function that the
#' user can use in their own shape definitions. It does no clipping, the
#' edges will be drawn exactly until the listed vertex position
#' coordinates.
#'
#' \code{shape_noplot} is a very simple (and probably not very
#' useful) plotting function, that does not plot anything.
#'
#' @aliases add.vertex.shape igraph.shape.noclip igraph.shape.noplot
#' vertex.shapes igraph.vertex.shapes
#'
#' @param shape Character scalar, name of a vertex shape. If it is
#' \code{NULL} for \code{shapes}, then the names of all defined
#' vertex shapes are returned.
#' @param clip An R function object, the clipping function.
#' @param plot An R function object, the plotting function.
#' @param parameters Named list, additional plot/vertex/edge
#' parameters. The element named define the new parameters, and the
#' elements themselves define their default values.
#' Vertex parameters should have a prefix
#' \sQuote{\code{vertex.}}, edge parameters a prefix
#' \sQuote{\code{edge.}}. Other general plotting parameters should have
#' a prefix \sQuote{\code{plot.}}. See Details below.
#' @param coords,el,params,end,v See parameters of the clipping/plotting
#' functions below.
#' @return \code{shapes} returns a character vector if the
#' \code{shape} argument is \code{NULL}. It returns a named list with
#' entries named \sQuote{clip} and \sQuote{plot}, both of them R
#' functions.
#'
#' \code{add_shape} returns \code{TRUE}, invisibly.
#'
#' \code{shape_noclip} returns the appropriate columns of its
#' \code{coords} argument.
#' @export
#'
#' @examples
#' # all vertex shapes, minus "raster", that might not be available
#' shapes <- setdiff(shapes(), "")
#' g <- make_ring(length(shapes))
#' set.seed(42)
#' plot(g, vertex.shape=shapes, vertex.label=shapes, vertex.label.dist=1,
#' vertex.size=15, vertex.size2=15,
#' vertex.pie=lapply(shapes, function(x) if (x=="pie") 2:6 else 0),
#' vertex.pie.color=list(heat.colors(5)))
#'
#' # add new vertex shape, plot nothing with no clipping
#' add_shape("nil")
#' plot(g, vertex.shape="nil")
#'
#' #################################################################
#' # triangle vertex shape
#' mytriangle <- function(coords, v=NULL, params) {
#' vertex.color <- params("vertex", "color")
#' if (length(vertex.color) != 1 && !is.null(v)) {
#' vertex.color <- vertex.color[v]
#' }
#' vertex.size <- 1/200 * params("vertex", "size")
#' if (length(vertex.size) != 1 && !is.null(v)) {
#' vertex.size <- vertex.size[v]
#' }
#'
#' symbols(x=coords[,1], y=coords[,2], bg=vertex.color,
#' stars=cbind(vertex.size, vertex.size, vertex.size),
#' add=TRUE, inches=FALSE)
#' }
#' # clips as a circle
#' add_shape("triangle", clip=shapes("circle")$clip,
#' plot=mytriangle)
#' plot(g, vertex.shape="triangle", vertex.color=rainbow(vcount(g)),
#' vertex.size=seq(10,20,length=vcount(g)))
#'
#' #################################################################
#' # generic star vertex shape, with a parameter for number of rays
#' mystar <- function(coords, v=NULL, params) {
#' vertex.color <- params("vertex", "color")
#' if (length(vertex.color) != 1 && !is.null(v)) {
#' vertex.color <- vertex.color[v]
#' }
#' vertex.size <- 1/200 * params("vertex", "size")
#' if (length(vertex.size) != 1 && !is.null(v)) {
#' vertex.size <- vertex.size[v]
#' }
#' norays <- params("vertex", "norays")
#' if (length(norays) != 1 && !is.null(v)) {
#' norays <- norays[v]
#' }
#'
#' mapply(coords[,1], coords[,2], vertex.color, vertex.size, norays,
#' FUN=function(x, y, bg, size, nor) {
#' symbols(x=x, y=y, bg=bg,
#' stars=matrix(c(size,size/2), nrow=1, ncol=nor*2),
#' add=TRUE, inches=FALSE)
#' })
#' }
#' # no clipping, edges will be below the vertices anyway
#' add_shape("star", clip=shape_noclip,
#' plot=mystar, parameters=list(vertex.norays=5))
#' plot(g, vertex.shape="star", vertex.color=rainbow(vcount(g)),
#' vertex.size=seq(10,20,length=vcount(g)))
#' plot(g, vertex.shape="star", vertex.color=rainbow(vcount(g)),
#' vertex.size=seq(10,20,length=vcount(g)),
#' vertex.norays=rep(4:8, length=vcount(g)))
#'
#' #################################################################
#' # Pictures as vertices.
#' # Similar musicians from last.fm, we start from an artist and
#' # will query two levels. We will use the XML, png and jpeg packages
#' # for this, so these must be available. Otherwise the example is
#' # skipped
#'
#' loadIfYouCan <- function(pkg) suppressWarnings(do.call(require, list(pkg)))
#'
#' if (loadIfYouCan("XML") && loadIfYouCan("png") &&
#' loadIfYouCan("jpeg")) {
#' url <- paste(sep="",
#' 'http://ws.audioscrobbler.com/',
#' '2.0/?method=artist.getinfo&artist=%s',
#' '&api_key=1784468ada3f544faf9172ee8b99fca3')
#' getartist <- function(artist) {
#' cat("Downloading from last.fm. ... ")
#' txt <- readLines(sprintf(url, URLencode(artist)))
#' xml <- xmlTreeParse(txt, useInternal=TRUE)
#' img <- xpathSApply(xml, "/lfm/artist/image[@@size='medium'][1]",
#' xmlValue)
#' if (img != "") {
#' con <- url(img, open="rb")
#' bin <- readBin(con, what="raw", n=10^6)
#' close(con)
#' if (grepl("\\\\.png$", img)) {
#' rast <- readPNG(bin, native=TRUE)
#' } else if (grepl("\\\\.jpe?g$", img)) {
#' rast <- readJPEG(bin, native=TRUE)
#' } else {
#' rast <- as.raster(matrix())
#' }
#' } else {
#' rast <- as.raster(numeric())
#' }
#' sim <- xpathSApply(xml, "/lfm/artist/similar/artist/name", xmlValue)
#' cat("done.\\n")
#' list(name=artist, image=rast, similar=sim)
#' }
#'
#' ego <- getartist("Placebo")
#' similar <- lapply(ego$similar, getartist)
#'
#' edges1 <- cbind(ego$name, ego$similar)
#' edges2 <- lapply(similar, function(x) cbind(x$name, x$similar))
#' edges3 <- rbind(edges1, do.call(rbind, edges2))
#' edges <- edges3[ edges3[,1] %in% c(ego$name, ego$similar) &
#' edges3[,2] %in% c(ego$name, ego$similar), ]
#'
#' musnet <- simplify(graph_from_data_frame(edges, dir=FALSE,
#' vertices=data.frame(name=c(ego$name, ego$similar))))
#' print_all(musnet)
#'
#' V(musnet)$raster <- c(list(ego$image), lapply(similar, "[[", "image"))
#' plot(musnet, layout=layout_as_star, vertex.shape="raster",
#' vertex.label=V(musnet)$name, margin=.2,
#' vertex.size=50, vertex.size2=50,
#' vertex.label.dist=2, vertex.label.degree=0)
#' } else {
#' message("You need the `XML', `png' and `jpeg' packages to run this")
#' }
shapes <- function(shape=NULL) {
if (is.null(shape)) {
ls(.igraph.shapes)
} else {
## checkScalarString(shape)
.igraph.shapes[[shape]]
}
}
#' @rdname shapes
#' @export
shape_noclip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- igraph.match.arg(end)
if (end=="both") {
coords
} else if (end=="from") {
coords[,1:2,drop=FALSE]
} else {
coords[,3:4,drop=FALSE]
}
}
#' @rdname shapes
#' @export
shape_noplot <- function(coords, v=NULL, params) {
invisible(NULL)
}
#' @rdname shapes
#' @export
add_shape <- function(shape, clip=shape_noclip,
plot=shape_noplot,
parameters=list()) {
## TODO
## checkScalarString(shape)
## checkFunction(clip)
## checkFunction(plot)
## checkList(parameters, named=TRUE)
assign(shape, value=list(clip=clip, plot=plot), envir=.igraph.shapes)
do.call(igraph.options, parameters)
invisible(TRUE)
}
## These are the predefined shapes
.igraph.shape.circle.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
if (end=="from") {
phi <- atan2(coords[,4] - coords[,2], coords[,3] - coords[,1])
vsize.from <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
res <- cbind(coords[,1] + vsize.from*cos(phi),
coords[,2] + vsize.from*sin(phi) )
} else if (end=="to") {
phi <- atan2(coords[,4] - coords[,2], coords[,3] - coords[,1])
r <- sqrt( (coords[,3] - coords[,1])^2 + (coords[,4] - coords[,2])^2 )
vsize.to <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
res <- cbind(coords[,1] + (r-vsize.to)*cos(phi),
coords[,2] + (r-vsize.to)*sin(phi) )
} else if (end=="both") {
phi <- atan2(coords[,4] - coords[,2], coords[,3] - coords[,1])
r <- sqrt( (coords[,3] - coords[,1])^2 + (coords[,4] - coords[,2])^2 )
vsize.from <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
vsize.to <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
res <- cbind(coords[,1] + vsize.from*cos(phi),
coords[,2] + vsize.from*sin(phi),
coords[,1] + (r-vsize.to)*cos(phi),
coords[,2] + (r-vsize.to)*sin(phi) )
}
res
}
#' @importFrom graphics symbols
.igraph.shape.circle.plot <- function(coords, v=NULL, params) {
vertex.color <- params("vertex", "color")
if (length(vertex.color) != 1 && !is.null(v)) {
vertex.color <- vertex.color[v]
}
vertex.frame.color <- params("vertex", "frame.color")
if (length(vertex.frame.color) != 1 && !is.null(v)) {
vertex.frame.color <- vertex.frame.color[v]
}
vertex.size <- 1/200 * params("vertex", "size")
if (length(vertex.size) != 1 && !is.null(v)) {
vertex.size <- vertex.size[v]
}
vertex.size <- rep(vertex.size, length=nrow(coords))
symbols(x=coords[,1], y=coords[,2], bg=vertex.color, fg=vertex.frame.color,
circles=vertex.size, add=TRUE, inches=FALSE)
}
.igraph.shape.square.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
square.shift <- function(x0, y0, x1, y1, vsize) {
m <- (y0-y1)/(x0-x1)
l <- cbind(x1-vsize/m , y1-vsize,
x1-vsize , y1-vsize*m,
x1+vsize/m, y1+vsize,
x1+vsize , y1+vsize*m )
v <- cbind(x1-vsize <= l[,1] & l[,1] <= x1+vsize &
y1-vsize <= l[,2] & l[,2] <= y1+vsize,
x1-vsize <= l[,3] & l[,3] <= x1+vsize &
y1-vsize <= l[,4] & l[,4] <= y1+vsize,
x1-vsize <= l[,5] & l[,5] <= x1+vsize &
y1-vsize <= l[,6] & l[,6] <= y1+vsize,
x1-vsize <= l[,7] & l[,7] <= x1+vsize &
y1-vsize <= l[,8] & l[,8] <= y1+vsize)
d <- cbind((l[,1]-x0)^2 + (l[,2]-y0)^2,
(l[,3]-x0)^2 + (l[,4]-y0)^2,
(l[,5]-x0)^2 + (l[,6]-y0)^2,
(l[,7]-x0)^2 + (l[,8]-y0)^2)
t(sapply(seq(length=nrow(l)), function(x) {
d[x,][!v[x,]] <- Inf
m <- which.min(d[x,])
l[x, c(m*2-1, m*2)]
}))
}
if (end %in% c("from", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
res <- res1 <- square.shift(coords[,3], coords[,4], coords[,1], coords[,2],
vsize)
}
if (end %in% c("to", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
res <- res2 <- square.shift(coords[,1], coords[,2], coords[,3], coords[,4],
vsize)
}
if (end=="both") {
res <- cbind(res1, res2)
}
res
}
#' @importFrom graphics symbols
.igraph.shape.square.plot <- function(coords, v=NULL, params) {
vertex.color <- params("vertex", "color")
if (length(vertex.color) != 1 && !is.null(v)) {
vertex.color <- vertex.color[v]
}
vertex.frame.color <- params("vertex", "frame.color")
if (length(vertex.frame.color) != 1 && !is.null(v)) {
vertex.frame.color <- vertex.frame.color[v]
}
vertex.size <- 1/200 * params("vertex", "size")
if (length(vertex.size) != 1 && !is.null(v)) {
vertex.size <- vertex.size[v]
}
vertex.size <- rep(vertex.size, length=nrow(coords))
symbols(x=coords[,1], y=coords[,2], bg=vertex.color, fg=vertex.frame.color,
squares=2*vertex.size, add=TRUE, inches=FALSE)
}
.igraph.shape.csquare.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
square.shift <- function(x0, y0, x1, y1, vsize) {
l <- cbind(x1, y1-vsize,
x1-vsize, y1,
x1, y1+vsize,
x1+vsize, y1)
d <- cbind((l[,1]-x0)^2 + (l[,2]-y0)^2,
(l[,3]-x0)^2 + (l[,4]-y0)^2,
(l[,5]-x0)^2 + (l[,6]-y0)^2,
(l[,7]-x0)^2 + (l[,8]-y0)^2)
t(sapply(seq(length=nrow(l)), function(x) {
m <- which.min(d[x,])
l[x, c(m*2-1, m*2)]
}))
}
if (end %in% c("from", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
res <- res1 <- square.shift(coords[,3], coords[,4], coords[,1], coords[,2],
vsize)
}
if (end %in% c("to", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
res <- res2 <- square.shift(coords[,1], coords[,2], coords[,3], coords[,4],
vsize)
}
if (end=="both") {
res <- cbind(res1, res2)
}
res
}
.igraph.shape.csquare.plot <- .igraph.shape.square.plot
.igraph.shape.rectangle.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
vertex.size2 <- 1/200 * params("vertex", "size2")
rec.shift <- function(x0, y0, x1, y1, vsize, vsize2) {
m <- (y0-y1)/(x0-x1)
l <- cbind(x1-vsize/m, y1-vsize2,
x1-vsize, y1-vsize*m,
x1+vsize2/m, y1+vsize2,
x1+vsize, y1+vsize*m )
v <- cbind(x1-vsize <= l[,1] & l[,1] <= x1+vsize &
y1-vsize2 <= l[,2] & l[,2] <= y1+vsize2,
x1-vsize <= l[,3] & l[,3] <= x1+vsize &
y1-vsize2 <= l[,4] & l[,4] <= y1+vsize2,
x1-vsize <= l[,5] & l[,5] <= x1+vsize &
y1-vsize2 <= l[,6] & l[,6] <= y1+vsize2,
x1-vsize <= l[,7] & l[,7] <= x1+vsize &
y1-vsize2 <= l[,8] & l[,8] <= y1+vsize2)
d <- cbind((l[,1]-x0)^2 + (l[,2]-y0)^2,
(l[,3]-x0)^2 + (l[,4]-y0)^2,
(l[,5]-x0)^2 + (l[,6]-y0)^2,
(l[,7]-x0)^2 + (l[,8]-y0)^2)
t(sapply(seq(length=nrow(l)), function(x) {
d[x,][!v[x,]] <- Inf
m <- which.min(d[x,])
l[x, c(m*2-1, m*2)]
}))
}
if (end %in% c("from", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
vsize2 <- if (length(vertex.size2)==1) {
vertex.size2
} else {
vertex.size2[ el[,1] ]
}
res <- res1 <- rec.shift(coords[,3], coords[,4], coords[,1], coords[,2],
vsize, vsize2)
}
if (end %in% c("to", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
vsize2 <- if (length(vertex.size2)==1) {
vertex.size2
} else {
vertex.size2[ el[,2] ]
}
res <- res2 <- rec.shift(coords[,1], coords[,2], coords[,3], coords[,4],
vsize, vsize2)
}
if (end=="both") {
res <- cbind(res1, res2)
}
res
}
#' @importFrom graphics symbols
.igraph.shape.rectangle.plot <- function(coords, v=NULL, params) {
vertex.color <- params("vertex", "color")
if (length(vertex.color) != 1 && !is.null(v)) {
vertex.color <- vertex.color[v]
}
vertex.frame.color <- params("vertex", "frame.color")
if (length(vertex.frame.color) != 1 && !is.null(v)) {
vertex.frame.color <- vertex.frame.color[v]
}
vertex.size <- 1/200 * params("vertex", "size")
if (length(vertex.size) != 1 && !is.null(v)) {
vertex.size <- vertex.size[v]
}
vertex.size <- rep(vertex.size, length=nrow(coords))
vertex.size2 <- 1/200 * params("vertex", "size2")
if (length(vertex.size2) != 1 && !is.null(v)) {
vertex.size2 <- vertex.size2[v]
}
vertex.size <- cbind(vertex.size, vertex.size2)
symbols(x=coords[,1], y=coords[,2], bg=vertex.color, fg=vertex.frame.color,
rectangles=2*vertex.size, add=TRUE, inches=FALSE)
}
.igraph.shape.crectangle.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
vertex.size2 <- 1/200 * params("vertex", "size2")
rec.shift <- function(x0, y0, x1, y1, vsize, vsize2) {
l <- cbind(x1, y1-vsize2,
x1-vsize, y1,
x1, y1+vsize2,
x1+vsize, y1)
d <- cbind((l[,1]-x0)^2 + (l[,2]-y0)^2,
(l[,3]-x0)^2 + (l[,4]-y0)^2,
(l[,5]-x0)^2 + (l[,6]-y0)^2,
(l[,7]-x0)^2 + (l[,8]-y0)^2)
t(sapply(seq(length=nrow(l)), function(x) {
m <- which.min(d[x,])
l[x, c(m*2-1, m*2)]
}))
}
if (end %in% c("from", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
vsize2 <- if (length(vertex.size2)==1) {
vertex.size2
} else {
vertex.size2[ el[,1] ]
}
res <- res1 <- rec.shift(coords[,3], coords[,4], coords[,1], coords[,2],
vsize, vsize2)
}
if (end %in% c("to", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
vsize2 <- if (length(vertex.size2)==1) {
vertex.size2
} else {
vertex.size2[ el[,2] ]
}
res <- res2 <- rec.shift(coords[,1], coords[,2], coords[,3], coords[,4],
vsize, vsize2)
}
if (end=="both") {
res <- cbind(res1, res2)
}
res
}
.igraph.shape.crectangle.plot <- .igraph.shape.rectangle.plot
.igraph.shape.vrectangle.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
vertex.size2 <- 1/200 * params("vertex", "size2")
rec.shift <- function(x0, y0, x1, y1, vsize, vsize2) {
l <- cbind(x1-vsize, y1, x1+vsize, y1)
d <- cbind((l[,1]-x0)^2 + (l[,2]-y0)^2,
(l[,3]-x0)^2 + (l[,4]-y0)^2)
t(sapply(seq(length=nrow(l)), function(x) {
m <- which.min(d[x,])
l[x, c(m*2-1, m*2)]
}))
}
if (end %in% c("from", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
vsize2 <- if (length(vertex.size2)==1) {
vertex.size2
} else {
vertex.size2[ el[,1] ]
}
res <- res1 <- rec.shift(coords[,3], coords[,4], coords[,1], coords[,2],
vsize, vsize2)
}
if (end %in% c("to", "both")) {
vsize <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
vsize2 <- if (length(vertex.size2)==1) {
vertex.size2
} else {
vertex.size2[ el[,2] ]
}
res <- res2 <- rec.shift(coords[,1], coords[,2], coords[,3], coords[,4],
vsize, vsize2)
}
if (end=="both") {
res <- cbind(res1, res2)
}
res
}
.igraph.shape.vrectangle.plot <- .igraph.shape.rectangle.plot
.igraph.shape.none.clip <- .igraph.shape.circle.clip
.igraph.shape.none.plot <- function(coords, v=NULL, params) {
## does not plot anything at all
invisible(NULL)
}
#' @importFrom graphics par polygon
mypie <- function(x, y, values, radius, edges=200, col=NULL, angle=45,
density=NULL, border=NULL, lty=NULL, init.angle=90, ...) {
values <- c(0, cumsum(values)/sum(values))
dx <- diff(values)
nx <- length(dx)
twopi <- 2 * pi
if (is.null(col))
col <- if (is.null(density))
c("white", "lightblue", "mistyrose", "lightcyan",
"lavender", "cornsilk")
else par("fg")
col <- rep(col, length.out = nx)
border <- rep(border, length.out = nx)
lty <- rep(lty, length.out = nx)
angle <- rep(angle, length.out = nx)
density <- rep(density, length.out = nx)
t2xy <- function(t) {
t2p <- twopi * t + init.angle * pi/180
list(x = radius * cos(t2p), y = radius * sin(t2p))
}
for (i in 1:nx) {
n <- max(2, floor(edges * dx[i]))
P <- t2xy(seq.int(values[i], values[i + 1], length.out = n))
polygon(x+c(P$x, 0), y+c(P$y, 0), density = density[i], angle = angle[i],
border = border[i], col = col[i], lty = lty[i], ...)
}
}
.igraph.shape.pie.clip <- function(coords, el, params,
end=c("both", "from", "to")) {
end <- match.arg(end)
if (length(coords)==0) { return (coords) }
vertex.size <- 1/200 * params("vertex", "size")
if (end=="from") {
phi <- atan2(coords[,4] - coords[,2], coords[,3] - coords[,1])
vsize.from <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
res <- cbind(coords[,1] + vsize.from*cos(phi),
coords[,2] + vsize.from*sin(phi) )
} else if (end=="to") {
phi <- atan2(coords[,4] - coords[,2], coords[,3] - coords[,1])
r <- sqrt( (coords[,3] - coords[,1])^2 + (coords[,4] - coords[,2])^2 )
vsize.to <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
res <- cbind(coords[,1] + (r-vsize.to)*cos(phi),
coords[,2] + (r-vsize.to)*sin(phi) )
} else if (end=="both") {
phi <- atan2(coords[,4] - coords[,2], coords[,3] - coords[,1])
r <- sqrt( (coords[,3] - coords[,1])^2 + (coords[,4] - coords[,2])^2 )
vsize.from <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,1] ]
}
vsize.to <- if (length(vertex.size)==1) {
vertex.size
} else {
vertex.size[ el[,2] ]
}
res <- cbind(coords[,1] + vsize.from*cos(phi),
coords[,2] + vsize.from*sin(phi),
coords[,1] + (r-vsize.to)*cos(phi),
coords[,2] + (r-vsize.to)*sin(phi) )
}
res
}
#' @importFrom stats na.omit
.igraph.shape.pie.plot <- function(coords, v=NULL, params) {
getparam <- function(pname) {
p <- params("vertex", pname)
if (length(p) != 1 && !is.null(v)) {
p <- p[v]
}
p
}
vertex.color <- getparam("color")
vertex.frame.color <- getparam("frame.color")
vertex.size <- rep(1/200 * getparam("size"), length=nrow(coords))
vertex.pie <- getparam("pie")
vertex.pie.color <- getparam("pie.color")
vertex.pie.angle <- getparam("pie.angle")
vertex.pie.density <- getparam("pie.density")
vertex.pie.lty <- getparam("pie.lty")
for (i in seq_len(nrow(coords))) {
pie <- if(length(vertex.pie)==1) {
vertex.pie[[1]]
} else {
vertex.pie[[i]]
}
col <- if (length(vertex.pie.color)==1) {
vertex.pie.color[[1]]
} else {
vertex.pie.color[[i]]
}
mypie(x=coords[i,1], y=coords[i,2], pie,
radius=vertex.size[i], edges=200, col=col,
angle=na.omit(vertex.pie.angle[c(i,1)])[1],
density=na.omit(vertex.pie.density[c(i,1)])[1],
border=na.omit(vertex.frame.color[c(i,1)])[1],
lty=na.omit(vertex.pie.lty[c(i,1)])[1])
}
}
.igraph.shape.sphere.clip <- .igraph.shape.circle.clip
#' @importFrom graphics rasterImage
#' @importFrom grDevices col2rgb as.raster
.igraph.shape.sphere.plot <- function(coords, v=NULL, params) {
getparam <- function(pname) {
p <- params("vertex", pname)
if (length(p) != 1 && !is.null(v)) {
p <- p[v]
}
p
}
vertex.color <- rep(getparam("color"), length=nrow(coords))
vertex.size <- rep(1/200 * getparam("size"), length=nrow(coords))
## Need to create a separate image for every different vertex color
allcols <- unique(vertex.color)
images <- lapply(allcols, function(col) {
img <- .Call(C_R_igraph_getsphere, pos=c(0.0,0.0,10.0), radius=7.0,
color=col2rgb(col)/255, bgcolor=c(0,0,0),
lightpos=list(c(-2,2,2)), lightcolor=list(c(1,1,1)),
width=100L, height=100L)
as.raster(img)
})
whichImage <- match(vertex.color, allcols)
for (i in seq_len(nrow(coords))) {
vsp2 <- vertex.size[i]
rasterImage(images[[ whichImage[i] ]],
coords[i,1]-vsp2, coords[i,2]-vsp2,
coords[i,1]+vsp2, coords[i,2]+vsp2)
}
}
.igraph.shape.raster.clip <- .igraph.shape.rectangle.clip
#' @importFrom graphics rasterImage
.igraph.shape.raster.plot <- function(coords, v=NULL, params) {
getparam <- function(pname) {
p <- params("vertex", pname)
if (is.list(p) && length(p) != 1 && !is.null(v)) {
p <- p[v]
}
p
}
size <- rep(1/200 * getparam("size"), length=nrow(coords))
size2 <- rep(1/200 * getparam("size2"), length=nrow(coords))
raster <- getparam("raster")
for (i in seq_len(nrow(coords))) {
ras <- if (!is.list(raster) || length(raster)==1) raster else raster[[i]]
rasterImage(ras, coords[i,1]-size[i], coords[i,2]-size2[i],
coords[i,1]+size[i], coords[i,2]+size2[i])
}
}
.igraph.shapes <- new.env()
.igraph.shapes[["circle"]] <- list(clip=.igraph.shape.circle.clip,
plot=.igraph.shape.circle.plot)
.igraph.shapes[["square"]] <- list(clip=.igraph.shape.square.clip,
plot=.igraph.shape.square.plot)
.igraph.shapes[["csquare"]] <- list(clip=.igraph.shape.csquare.clip,
plot=.igraph.shape.csquare.plot)
.igraph.shapes[["rectangle"]] <- list(clip=.igraph.shape.rectangle.clip,
plot=.igraph.shape.rectangle.plot)
.igraph.shapes[["crectangle"]] <- list(clip=.igraph.shape.crectangle.clip,
plot=.igraph.shape.crectangle.plot)
.igraph.shapes[["vrectangle"]] <- list(clip=.igraph.shape.vrectangle.clip,
plot=.igraph.shape.vrectangle.plot)
.igraph.shapes[["none"]] <- list(clip=.igraph.shape.none.clip,
plot=.igraph.shape.none.plot)
.igraph.shapes[["pie"]] <- list(clip=.igraph.shape.pie.clip,
plot=.igraph.shape.pie.plot)
.igraph.shapes[["sphere"]] <- list(clip=.igraph.shape.sphere.clip,
plot=.igraph.shape.sphere.plot)
.igraph.shapes[["raster"]] <- list(clip=.igraph.shape.raster.clip,
plot=.igraph.shape.raster.plot)
|
240effdcafee599b6ffbf0ae299cf93c9fb9ec64
|
7eb63399fa00e3c547e5933ffa4f47de515fe2c6
|
/man/print.fromXYZ.Rd
|
759b6e3f46bfc037e96d68f68b1f83a14da673e3
|
[] |
no_license
|
bentaylor1/lgcp
|
a5cda731f413fb30e1c40de1b3360be3a6a53f19
|
2343d88e5d25ecacd6dbe5d6fcc8ace9cae7b136
|
refs/heads/master
| 2021-01-10T14:11:38.067639
| 2015-11-19T13:22:19
| 2015-11-19T13:22:19
| 45,768,716
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 405
|
rd
|
print.fromXYZ.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/spatialAtRiskClassDef.R
\name{print.fromXYZ}
\alias{print.fromXYZ}
\title{print.fromXYZ function}
\usage{
\method{print}{fromXYZ}(x, ...)
}
\arguments{
\item{x}{an object of class spatialAtRisk}
\item{...}{additional arguments}
}
\value{
prints the object
}
\description{
Print method for objects of class fromXYZ.
}
|
548635dcc049774d36437394a0e2fff3776463ce
|
8b61baaf434ac01887c7de451078d4d618db77e2
|
/man/rm.na.Rd
|
1fadbe0c85dc13693f6f0931055d604212908f15
|
[] |
no_license
|
drmjc/mjcbase
|
d5c6100b6f2586f179ad3fc0acb07e2f26f5f517
|
96f707d07c0a473f97fd70ff1ff8053f34fa6488
|
refs/heads/master
| 2020-05-29T19:36:53.961692
| 2017-01-17T10:54:00
| 2017-01-17T10:54:00
| 12,447,080
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
rm.na.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rm.na.R
\name{rm.na}
\alias{rm.na}
\title{remove NA's from an object}
\usage{
rm.na(x)
}
\arguments{
\item{x}{a 1D object}
}
\value{
an object with no \code{NA}'s
}
\description{
remove NA's from an object
}
\note{
Defunct: \code{\link{na.omit}} is a better alternative.
}
\author{
Mark Cowley, 2012-07-16
}
|
d9d453bac0997144df6e06b1ec3f28b1f295276b
|
c784be70105a2f34f820295e1189b351e33a56dd
|
/R/queue.R
|
f25a9d84697f0ba992ba22b9e6515c92066b0e19
|
[] |
no_license
|
sumprain/dbMapR
|
80f39be8d1bcee5812ebe8b256b6f2ef7238fee8
|
9642a3c4c5e21f1344fdee6d7f0b71f44df741e2
|
refs/heads/master
| 2021-01-09T20:17:19.104350
| 2016-01-18T15:28:24
| 2016-01-18T15:28:24
| 42,582,541
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,739
|
r
|
queue.R
|
#' @export
queue <- function(max_length = 5L, parent = emptyenv()) {
e <- new.env(hash = TRUE, parent = parent)
attr(e, "max_length") <- max_length
return(structure(e, class = "queue"))
}
#' @export
push <- function(queue, value) {
UseMethod("push")
}
#' @export
empty <- function(queue) {
UseMethod("empty")
}
#' @export
sorted_index <- function(queue) {
UseMethod("sorted_index")
}
#' @export
min_index <- function(queue) {
UseMethod("min_index")
}
#' @export
max_index <- function(queue) {
UseMethod("max_index")
}
#' @export
remove <- function(queue, index) {
UseMethod("remove")
}
#' @export
counter <- function(queue) {
UseMethod("counter")
}
#' @export
push.queue <- function(queue, value) {
max_length <- attr(queue, "max_length")
if (length(queue) >= max_length) {
remove(queue, min_index(queue))
}
queue[[counter(queue)]] <- value
invisible(queue)
}
#' @export
empty.queue <- function(queue) {
rm(list = ls(envir = queue), envir = queue)
invisible(queue)
}
#' @export
length.queue <- function(x) {
return(length(ls(envir = x)))
}
#' @export
sorted_index.queue <- function(queue) {
ind <- ls(envir = queue)
#ind_POSIX <- as.POSIXct.t_stamp(ind)
return(as.character(sort(as.integer(ind))))
}
#' @export
min_index.queue <- function(queue) {
return(sorted_index(queue)[1])
}
#' @export
max_index.queue <- function(queue) {
return(sorted_index(queue)[length(queue)])
}
#' @export
remove.queue <- function(queue, index) {
base::remove(list = index, envir = queue)
invisible(queue)
}
#' @export
counter.queue <- function(queue) {
if (!length(queue)) {
return(as.character(1L))
} else {
return(as.character(as.integer(max_index(queue)) + 1L))
}
}
|
331c1d0aefc4e68f3457041351fe7e137c0d384d
|
fffa2b4bc248b745cb0f3986f57b78ab3036c58c
|
/rankhospital.r
|
f4a8425f50ee7aca9d65f3ed8576aa7785f1cffe
|
[] |
no_license
|
pmanickavelu/datasciencecoursera
|
3696b0859509d2a5db3f76c444b873eb3d3a740a
|
9f667e4fe5c03a189dee1e9d4f15fdeaa0b48d21
|
refs/heads/master
| 2021-01-11T02:48:28.366326
| 2016-10-21T11:10:43
| 2016-10-21T11:10:43
| 70,921,377
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,582
|
r
|
rankhospital.r
|
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
## Check that state and outcome are valid
## Return hospital name in that state with the given rank
## 30-day death rate
outcome_data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
outcome_keys <- c("heart attack" = "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack", "heart failure" = "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure", "pneumonia" = "Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia")
if(!outcome %in% names(outcome_keys)){
cat("Error in best(\"", state, "\", \"", outcome , "\") : invalid outcome")
}
state.hospital <- outcome_data[outcome_data$State == state,]
state.hospital[[outcome_keys[outcome]]] <- as.numeric(state.hospital[[outcome_keys[outcome]]])
if(nrow(state.hospital) == 0){
cat("Error in best(\"", state, "\", \"", outcome , "\") : invalid state")
}
ranked.hospitals <- state.hospital[order(state.hospital[[outcome_keys[outcome]]], state.hospital$Hospital.Name,na.last = TRUE),]$Hospital.Name
if(is.numeric(num)){
return(ranked.hospitals[num])
}else if(num == "best"){
return(ranked.hospitals[1])
}else if(num == "worst"){
return(ranked.hospitals[length(ranked.hospitals)])
}
}
# rankhospital("TX", "heart failure", "best")
# rankhospital("TX", "heart failure", "worst")
# rankhospital("TX", "heart failure", 4)
# rankhospital("TX", "heart failure", 1000)
# rankhospital("TX", "heart failure", 100)
|
8e02e4a3bbaed3515f45e1a555268306ffa06328
|
7e3f188372012ed9635facb1a2a3b0bab71cef48
|
/R/compare_expr.r
|
15ac0220607d2e2f76fbbd28ae7d0667544f4270
|
[] |
no_license
|
skranz/RTutor
|
ae637262b72f48646b013b5c6f89bb414c43b04d
|
f2939b7082cc5639f4695e671d179da0283df89d
|
refs/heads/master
| 2023-07-10T03:44:55.203997
| 2023-06-23T05:33:07
| 2023-06-23T05:33:07
| 11,670,641
| 203
| 61
| null | 2020-06-17T16:11:34
| 2013-07-25T20:47:22
|
R
|
UTF-8
|
R
| false
| false
| 18,099
|
r
|
compare_expr.r
|
examples.describe.call = function() {
x = 1:5
describe.call(runif(10,1,2))
describe.call(2*3+4)
describe.call(x[1:4])
df = data.frame(x=1:100)
describe.call(df %.% filter(x>80))
}
examples.describe.call = function() {
f = function(x) {
y = substitute(x)
describe.call(call.obj=y)
}
f(2*x)
f(plot(1))
f("Hi")
f(x)
f(3)
}
describe.call = function(call, call.obj=NULL, call.str=NULL) {
if (!is.null(call.obj)) {
call = call.obj
} else if (!is.null(call.str)) {
call = base::parse(call.str,srcfile=NULL)
} else {
call = substitute(call)
}
restore.point("describe.call")
call
na = name.of.call(call)
type = "fun"
if (na %in% c("+","-","*","/","%*%","(")) {
type="math"
} else if (na == "~") {
type = "formula"
} else if (na == "%.%" | na == "%>%") {
type="chain"
} else if (na == "<-" | na =="=") {
type="assign"
} else if (na == "[" | na=="$" | na == "[[") {
type="subset"
} else if (na=="==" | na=="<" | na =="!=" | na=="<=" | na==">" | na==">=") {
type="comp"
} else if (is.name(call)) {
type="var"
} else if (!is.call(call)) {
type=class(call)
}
if (type=="chain") {
return(describe.chain.call(call))
}
# if (type == "fun") {
# if (is.null(call.str)) {
# call.str = deparse1(call)
# }
# if (!has.substr(call.str,"(")) {
# res = suppressWarnings(as.numeric(call.str))
# if (is.na(res)) {
# type = "var"
# } else {
# type = "numeric"
# }
# }
# }
args = args.of.call(call)
list(name=na,type=type, args = args)
}
describe.chain.call = function(call.obj, chain.operator=NULL) {
restore.point("describe.chain.call")
call = call.obj
# The caller function has determined that we have a chain
if (is.null(chain.operator)) {
call = call.obj
na = name.of.call(call)
args = recursive.args.of.call(call, na)
return(list(name=na,type="chain", args = args))
}
# We have a chain if the call is equal to chain.operator
na = name.of.call(call)
if (na==chain.operator) {
return(describe.chain.call(call.obj, chain.operator=NULL))
} else {
# No true chain just a single element
# For simplicity treat it as a chain
args = list(describe.call(call.obj=call.obj))
names(args)=na
return(list(name=na,type=chain.operator, args = args))
}
}
# Checks whether arguments of stud.call are correct given the specification in check.call.
# Used inside correctness tests
check.call.args = function(stud.call, check.call, compare.vals = !is.null(val.env), val.env=NULL, allow.extra.arg=FALSE, ignore.arg=NULL, check.values=NULL) {
#restore.point("check.call.args")
sarg = args.of.call(stud.call, name.empty.arg=TRUE)
carg = args.of.call(check.call, name.empty.arg=TRUE)
missing.arg = setdiff(names(carg), c(names(sarg), ignore.arg))
if (length(missing.arg)>0)
return(FALSE)
if (!allow.extra.arg) {
extra.arg = setdiff(names(sarg), c(names(carg), ignore.arg))
if (length(extra.arg)>0)
return(FALSE)
}
overlap.arg = setdiff(intersect(names(sarg), names(carg)), ignore.arg)
if (length(overlap.arg)==0)
return(TRUE)
differs = sapply(overlap.arg, function(na) !identical(sarg[[na]],carg[[na]]))
if (sum(differs)==0)
return(TRUE)
if (!compare.vals)
return(FALSE)
differ.arg = overlap.arg[differs]
if (compare.vals) {
for (var in differ.arg) {
stud.val = try(eval(sarg[[var]],val.env), silent=TRUE)
check.val = try(eval(carg[[var]],val.env), silent=TRUE)
if (is(stud.val,"try-error") | is(check.val,"try-error")) return(FALSE)
if (!is.same(stud.val,check.val))
return(FALSE)
}
}
return(TRUE)
}
remove.names = function(x) {
try({
if (!is.null(names(x)))
names(x) = NULL
if (!is.null(rownames(x)))
rownames(x) = NULL
if (!is.null(colnames(x)))
catnames(x) = NULL
}, silent=TRUE)
x
}
# Compares selected columns of data frames
# Also allows to sort data frame before checking
same.data.frame.cols = function(x,y, check.cols=NULL, sort.cols=NULL,tol=1e-9, check.names=FALSE, check.attributes=FALSE,...) {
if (!is.data.frame(x) | !is.data.frame(y))
return(FALSE)
#restore.point("same.data.frame.cols")
if (NROW(x) != NROW(y)) return(FALSE)
if (!is.null(sort.cols)) {
if (!all(sort.cols %in% colnames(x)))
return(FALSE)
if (!all(sort.cols %in% colnames(y)))
return(FALSE)
x = ungroup(x)
y = ungroup(y)
x = arrange_at(x, sort.cols)
y = arrange_at(y, sort.cols)
}
if (is.null(check.cols))
return(is.same(x,y, tol=tol, check.names=check.names, check.attributes = check.attributes,...))
for (col in check.cols) {
ok = isTRUE(all.equal(x[[col]],y[[col]],tol=tol, check.names=check.names, check.attributes=check.attributes))
if (!ok) return(FALSE)
}
return(TRUE)
}
is.same = function(x,y, tol=1e-9, check.all.equal=TRUE, check.names=true(is.data.frame(x) | is.list(x)), check.attributes=FALSE, check.groups=TRUE, ignore.environment=TRUE) {
#restore.point("is.same")
if(identical(x,y,ignore.environment = ignore.environment))
return(TRUE)
if (length(x)!=length(y))
return(FALSE)
if (check.groups) {
if (is(x,"tbl") | is(y,"tbl")) {
ret = try(identical(dplyr::groups(x),dplyr::groups(y)))
if (identical(ret,FALSE)) return(FALSE)
}
}
if (check.names) {
if (!true(identical(names(x), names(y)))) {
return(FALSE)
}
}
if (check.all.equal) {
if (is.data.frame(x) & is.data.frame(y)) {
if ((NROW(x) != NROW(y)) | (NCOL(x) != NCOL(y)))
return(FALSE)
if (length(x)==0)
return(TRUE)
eq = sapply(1:NCOL(x), function(i) isTRUE(all.equal(x[[i]],y[[i]],tol=tol, check.names=check.names, check.attributes=check.attributes) ))
if (all(eq))
return(TRUE)
} else {
if (isTRUE(all.equal(x,y, tol=tol, check.names=check.names, check.attributes=check.attributes)))
return(TRUE)
}
}
if (is.numeric(x) & is.numeric(y)) {
if (max(abs(x-y), na.rm=TRUE)>tol )
return(FALSE)
if (!identical(is.na(x),is.na(y)))
return(FALSE)
return(TRUE)
}
return(FALSE)
}
# Compare if two calls are the same
compare.calls = function(stud.call, check.call, compare.vals = !is.null(val.env), val.env=NULL, ...) {
stud.call = match.call.object(stud.call, ...)
check.call = match.call.object(check.call, ...)
restore.point("compare.calls")
if (is.symbol(stud.call) & is.symbol(check.call)) {
if (identical(stud.call,check.call)) {
return(nlist(same=TRUE, same.call=TRUE, descr=""))
} else {
return(nlist(same=FALSE, same.call=FALSE, descr=""))
}
} else if (is.symbol(stud.call) != is.symbol(check.call)) {
return(nlist(same=FALSE, same.call=FALSE, descr=""))
}
res = compare.call.args(stud.call, check.call, compare.vals=compare.vals, val.env=val.env,...)
same = length(res$differ.arg) == 0 & length(res$missing.arg) == 0 & length(res$extra.arg) == 0
c(list(same=same, same.call=TRUE), res)
}
compare.call.args = function(stud.call, check.call, compare.vals = !is.null(val.env), val.env=NULL, from.pipe=FALSE, ...) {
org.check.call = check.call
if (!from.pipe) {
stud.call = match.call.object(stud.call, ...)
check.call = match.call.object(check.call, ...)
}
restore.point("compare.call.args")
sarg = args.of.call(stud.call, name.empty.arg=TRUE)
carg = args.of.call(check.call, name.empty.arg=TRUE)
missing.arg = setdiff(names(carg), names(sarg))
extra.arg = setdiff(names(sarg), names(carg))
overlap.arg = intersect(names(sarg), names(carg))
if (length(overlap.arg)>0) {
differs = sapply(overlap.arg, function(na) !identical(sarg[[na]],carg[[na]]))
differ.arg = overlap.arg[differs]
} else {
differ.arg = same.arg = overlap.arg
}
if (length(differ.arg)>0) {
if (setequal(sarg,carg)) {
return(nlist(differ.arg,differ.detail=NULL,missing.arg,extra.arg,same.arg=NULL, overlap.arg, stud.arg=sarg, check.arg=carg,setequal=TRUE, descr=paste0("You have the right function arguments, but in the wrong order. The call in the sample solution is:\n", deparse1(org.check.call))))
}
if (compare.vals) {
differ.detail = try(lapply(differ.arg, function(var) {
stud.val = eval(sarg[[var]],val.env)
check.val = eval(carg[[var]],val.env)
paste0(compare.values(stud.val, check.val), collapse=", ")
}),silent = TRUE)
# We may in particular have a try error in
# dplyr or tidyr functions.
if (is(differ.detail, "try-error")) {
differ.detail = NULL
} else {
names(differ.detail) = differ.arg
differs = sapply(differ.detail, function(x) nchar(x)>0)
differ.detail = unlist(differ.detail[differs])
differ.arg = names(differ.detail)
}
} else {
differ.detail = replicate(length(differ.arg),c("code"),simplify=FALSE)
names(differ.detail) = differ.arg
}
} else {
differ.detail = NULL
}
same.arg = setdiff(overlap.arg, differ.arg)
is.num.differ.arg = suppressWarnings(!is.na(as.integer(differ.arg)))
# Make a description that is used by hint functions.
s = NULL
if (length(differ.arg)>0) {
check.call.name = name.of.call(check.call)
s = sapply(seq_along(differ.arg), function(i) {
arg.name = differ.arg[[i]]
arg.txt = if (is.num.differ.arg[[i]]) {
paste0("Your ", to_ordinal(arg.name), " argument ")
} else {
paste0("Your argument ", arg.name," = ")
}
cde = describe.call(call.obj = carg[[arg.name]])
if (cde$type == "formula") {
ok = FALSE
if (check.call.name %in% c("lm","ivreg","glm")) {
res = try(compare.regression.formula(sarg[[arg.name]],carg[[arg.name]], from=check.call.name), silent=TRUE)
if (!is("res","try-error"))
return(res$descr)
}
scramble = scramble.text(deparse1(carg[[arg.name]]),"?",0.5, keep.char=c(" ","~","|"))
paste0(arg.txt, sarg[differ.arg[i]], " differs from my solution. Here is a scrambled version of my solution:\n ",scramble)
} else if (cde$type == "math") {
scramble = scramble.text(deparse1(carg[[arg.name]]),"?",0.4, keep.char=" ")
paste0(arg.txt, sarg[differ.arg[i]], " differs from my solution. Here is a scrambled version of my solution:\n ",scramble)
} else if (cde$type == "fun") {
paste0(arg.txt, sarg[differ.arg[i]], " differs from my solution, where I call the function ", cde$name,".")
} else if (isTRUE(is.null(differ.detail) | differ.detail[i] %in% c("values","code"))) {
paste0(arg.txt, sarg[differ.arg[i]], " differs from my solution.")
} else {
paste0(arg.txt, sarg[differ.arg[i]], " differs from my solution", if(!is.null(differ.detail[i])) " in its ", differ.detail[i])
}
})
}
if (length(extra.arg)>0) {
s = c(s,paste0("In my solution I don't use the argument ", extra.arg))
}
if (length(missing.arg)>0) {
s = c(s,paste0("You don't use the argument ", missing.arg))
}
# Special message for particular calls
call.name = name.of.call(check.call)
if (isTRUE(call.name %in% c("group_by","arrange"))) {
if (length(sarg) > length(carg)) {
s = paste0("The solution has fewer arguments in the ", call.name, " call.")
} else if (length(sarg) < length(carg)) {
s = paste0("The solution has more arguments in the ", call.name, " call.")
} else {
txt.sargs = unlist(lapply(sarg, deparse1))
txt.cargs = unlist(lapply(carg, deparse1))
sd1 = setdiff(txt.sargs, txt.cargs)
if (length(sd1)>0) {
s = paste0("The solution does not use the argument(s) ", paste0(sd1, collapse=","), " in the ", call.name, " call but some other variable(s).")
}
sd2 = setdiff(txt.cargs, txt.sargs)
if (length(sd2)==0 & !identical(txt.cargs, txt.sargs)) {
s = paste0("You should write the arguments in your ", call.name, " call in the order: ", paste0(txt.cargs, collapse=", "))
}
}
}
# For common dplyr verbs a scrambled version is often
# better suited since we don't check nested calls inside
if (isTRUE(call.name %in% c("mutate","transmute","summarize","summarise", "filter","select"))) {
call.str = deparse1(check.call)
call.str = gsub("(.data = ","(", call.str, fixed=TRUE)
if (startsWith(call.str,call.name)) {
scramble = paste0(call.name,scramble.text(substring(call.str, nchar(call.name)+1),"?",0.5, keep.char=c(",", " ","\n",">","%","(",")","=", "\t")))
} else {
scramble = scramble.text(call.str,"?",0.5, keep.char=c(",", " ","\n",">","%","(",")","=", "\t"))
}
s = paste0("Your arguments are not correct. Below is a scrambled version of the sample solution:\n\n",scramble)
}
nlist(differ.arg,differ.detail,missing.arg,extra.arg,same.arg, overlap.arg, stud.arg=sarg, check.arg=carg, descr=s)
}
compare.values = function(var.stud,var.sol, class=TRUE, length=TRUE, dim=TRUE, names=TRUE, values=TRUE, groups=TRUE, tol=1e-12, details = TRUE, check.all.equal=TRUE) {
wrong = NULL
if (is.same(var.stud, var.sol))
return(NULL)
if (class != FALSE) {
class.stud = class(var.stud)[1]
class.sol = class(var.sol)[1]
if (class.stud == "integer") class.stud = "numeric"
if (class.sol == "integer") class.sol = "numeric"
if (class.stud != class.sol) {
if (details) {
wrong = c(wrong,paste0("class (is ", class.stud, " but shall be ", class.sol,")"))
} else {
wrong = c(wrong,"class")
}
}
}
if (!is.null(wrong))
return(wrong)
if (length != FALSE) {
if (!length(var.stud)==length(var.sol)) {
wrong = c(wrong,"length")
}
}
if (!is.null(wrong))
return(wrong)
if (dim != FALSE) {
if (!identical(dim(var.stud),dim(var.sol))) {
wrong = c(wrong,"dim")
}
}
if (!is.null(wrong))
return(wrong)
if (groups != FALSE) {
if (is(var.sol,"tbl")) {
gr.x = dplyr::groups(var.sol)
gr.y = dplyr::groups(var.stud)
if (!setequal(gr.x,gr.y)) {
if (details) {
wrong = c(wrong,"groups are wrong")
} else {
wrong = c(wrong,"groups")
}
} else if (!identical(gr.x,gr.y)) {
if (details) {
wrong = c(wrong,paste0("group order must be ",paste0(gr.x,collapse=", ")))
} else {
wrong = c(wrong,"groups_order")
}
}
}
}
if (!is.null(wrong))
return(wrong)
if (names != FALSE) {
if (!identical(names(var.stud),names(var.sol))) {
wrong = c(wrong,"names")
}
}
if (values != FALSE) {
if (is.list(var.sol) | is.environment(var.sol)) {
if (!identical(var.sol, var.stud, ignore.environment=TRUE)) {
wrong = c(wrong,"values")
}
} else if (is.numeric(var.stud) & is.numeric(var.sol)) {
if (max(abs(var.stud-var.sol), na.rm=TRUE)>tol ) {
wrong = c(wrong,"values")
} else if (!identical(is.na(var.stud),is.na(var.sol))) {
wrong = c(wrong,"values")
}
} else {
wrong = c(wrong,"values")
}
}
wrong
}
examples.match.call.object = function() {
match.call.object(quote(t.test(extra ~ group, data=sleep)),s3.method=stats:::t.test.formula)
match.call.object(quote(t.test(extra ~ group, data=sleep)))
match.call.object(quote(stats:::t.test.formula(extra ~ group, data=sleep)))
match.call.object(quote(t.test(formula=extra ~ group, data=sleep)))
f()
}
match.call.object = function(call, envir=parent.frame(), s3.method=NULL) {
restore.point("match.call.object")
#browser()
if (length(call)==1)
return(call)
ret = call
env = new.env(parent=envir)
env$call = call
if (!is.null(s3.method)) {
s3.method = substitute(s3.method)
#restore.point("match.call.object2")
match.expr = substitute(match.call(fun, call=call), list(fun=s3.method))
} else {
match.expr = substitute(match.call(fun, call=call), list(fun=call[[1]]))
}
try(ret <- eval(match.expr, envir=env), silent=TRUE)
ret
}
name.of.call = function(call) {
if (is.symbol(call)) {
name = as.character(call)
if (is.na(name)) return("NA")
return(name)
}
as.character(call[[1]])
}
recursive.args.of.call = function(call,expand.names=NULL) {
args = args.of.call(call)
names = sapply(args, name.of.call)
do.expand = names %in% expand.names
li = lapply(seq_along(args), function(i){
if (do.expand[i])
return(recursive.args.of.call(args[[i]],expand.names))
return(args[i])
})
do.call("c",li)
}
examples.args.of.call = function() {
args.of.call(quote(t.test(extra ~ group, data=sleep)))
match.call.object(quote(t.test(extra ~ group, data=sleep)))
}
args.of.call = function(call, name.empty.arg = FALSE, prefix="") {
#restore.point("args.of.call")
if (is.symbol(call))
return(NULL)
li = as.list(call[-1])
if (name.empty.arg & length(li)>0) {
if (is.null(names(li))) {
is.empty = seq_along(li)
} else {
is.empty = which(names(li)=="")
}
names(li)[is.empty] <- paste0(prefix,is.empty)
}
li
}
examples.code.has.call = function() {
code.str = "
plot(5,y=3)
x*2
x$a
x[['a']]
"
call.str = "plot(x=5,y=3,main='Hi')"
call.str = 'x[["a"]]'
find.matching.calls(code.str, call.str)
}
find.matching.calls = function(code.str, call.str, call = parse(text=call.str, srcfile=NULL)[[1]]) {
code.li = as.list(base::parse(text=code.str, srcfile=NULL))
call =
code.names = sapply(code.li, name.of.call)
call.name = name.of.call(call)
ind = which(code.names %in% call.name)
if (length(ind)==0) {
return(NULL)
}
return(code.li[ind])
as.list(code)
co = code[[3]]
co
co = match.call.object(co)
names(co)
as.character(co[[1]])
co[[2]]
co[[3]]
class(co[[1]])
call. = co
f()
names(co)
args(co)
call_tree(co)
standardise_call(co)
str(co)
class(co)
co[[2]]
}
|
702d664511509c26d7e42e0ccf1f37fdb553bee3
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#130.A#48.c#.w#5.s#41.asp/ctrl.e#1.a#3.E#130.A#48.c#.w#5.s#41.asp.R
|
59322560ce55ffa0ea261d86a122701949fabbe6
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 91
|
r
|
ctrl.e#1.a#3.E#130.A#48.c#.w#5.s#41.asp.R
|
41fe05f586958cb2c255a85f86f35705 ctrl.e#1.a#3.E#130.A#48.c#.w#5.s#41.asp.qdimacs 5377 15598
|
3bcb7d7ea8235fce8dc366e2099268879b962c7b
|
c5d3abd3a3913b4684328cfd0ac88c2f399efad2
|
/content/scholarGenerated/Zeppel2015.r
|
ab258affa3d280217a2b822b3af8d99f0de79779
|
[] |
no_license
|
douglask3/webpageGenerator
|
09a3ee3c46019c4c427d70b78c54ab66e1bf1eb3
|
c30b66a866513ad71f101ebf822cf3a34cc81279
|
refs/heads/master
| 2021-01-10T12:20:06.738656
| 2017-02-07T21:36:45
| 2017-02-07T21:36:45
| 43,382,723
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,074
|
r
|
Zeppel2015.r
|
# Example
header = 'Title: Drought and resprouting plants
Date: 2015-01-01 00:00
Category: Publications
status: published
tags: Publications, Traits for Resistance and Recovery to Disturbance, LPX Dynamic Global Vegetation Model
Many species have the ability to resprout vegetatively after a substantial loss
of biomass induced by environmental stress, including drought. Many of the
regions characterised by ecosystems where resprouting is common are projected
to experience more frequent and intense drought during the 21st Century.
However, in assessments of ecosystem response to drought disturbance there has
been scant consideration of the resilience and post-drought recovery of
resprouting species. Systematic differences in hydraulic and allocation traits
suggest that resprouting species are more resilient to drought-stress than
nonresprouting species. Evidence suggests that ecosystems dominated by
resprouters recover from disturbance more quickly than ecosystems dominated by
nonresprouters. The ability of resprouters to avoid mortality and withstand
drought, coupled with their ability to recover rapidly, suggests that the
impact of increased drought stress in ecosystems dominated by these species may
be small. The strategy of resprouting needs to be modelled explicitly to improve
estimates of future climate-change impacts on the carbon cycle, but this will
require several important knowledge gaps to be filled before resprouting can be
properly implemented.
'
cnameExtra = '<hr> '
cnameFormat = c(' <h3 class = "publication">', '</h3> ')
pubSep = '<hr>'
titleFormat = c('<h3 class = "publication">', '</h3>')
citeFormat = c(' ', '')
yearFormat = c(' ', '')
textFormat = c('', '')
footer = ''
outputFile = 'content/publications/Zeppel2015.md'
MakePublicationDocument(c('Douglas Kelley' = 'AJKyfI4AAAAJ'), NULL,
header, cnameExtra, cnameFormat,
pubSep, titleFormat, citeFormat, yearFormat, textFormat,
footer, outputFile, 'u-x6o8ySG0sC')
|
c57264f8b720aba6d35d5c1aea7c2a26c94adcd1
|
013c1c5c3220764b25a7fc9d434c502a5c190b93
|
/IE7275 working/17jan.R
|
ca8c3f366b8eab707f3a2c6df469d3fb7cc5bb07
|
[] |
no_license
|
mashwinmuthiah/Making_sense_of_data
|
a0e06eadccd4a63feeb376b793358366e14e3ee3
|
31e9613673ccd8b014f776b0ced5219930217ae5
|
refs/heads/master
| 2020-04-20T10:08:11.027566
| 2019-02-26T15:59:12
| 2019-02-26T15:59:12
| 168,782,563
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,043
|
r
|
17jan.R
|
m<-martix(c(1,2,3,4,5,6),ncol=2)
IQR(usedcars$price)
quantile(usedcars$price)
quantile(usedcars$price,probs = c(0.01,0.99))
quantile(usedcars$price,seq(from = 0,to=1,by=0.2))
boxplot(usedcars$price,main="box plot of used car price",ylab="price($)")
hist(usedcars$price,main="histogram of used car price",xlab="price($)")
var(usedcars$price)
sd(usedcars$price)
table(usedcars$year)
table(usedcars$color)
prop.table((table(usedcars$model)))*100
plot(x=usedcars$mileage,y=usedcars$price,main="Scattterplot of price vs Milege",xlab = "Miledge(ml.)",ylab = "Price($)")
usedcars$conservative<-usedcars$color %in% c("black","gray","silver","white")
table(usedcars$conservative)
install.packages("gmodels")
CrossTable(x = usedcars$model, y = usedcars$conservative)
library(ggplot2)
ggplot(housing.df) + geom_point(aes(x=LSTAT , y=MEDV),color="navy",alpha=0.7)
data.for.plot <- aggregate(housing.df$MEDV, by = list(housing.df$CHAS),FUN = mean)
names(data.for.plot)<-c("CHAS","MenaMEDV")
barplot(data.for.plot$meanEDV), names.arg = data.for.plot$CHAS)
|
e9226dac1698f1a4cddf323fc71a568dd8c0353b
|
0e7a066f410066e8533031a8fd23b0b6ad7d2258
|
/R/Ex_Files_Data_Wrangling_R/Exercise Files/Ch05/05_03/separate_complete.r
|
3e6cc1bfb5ad64fd171190a2faef2a4e2fa03cc6
|
[
"MIT"
] |
permissive
|
vvpn9/Handy-Tools
|
6560c02ef99cc6ef57168e36f744694bc79a69ad
|
5b8e59e80832985c352b7f6e578462e61fcbc300
|
refs/heads/main
| 2023-06-13T23:39:13.146882
| 2021-07-13T06:00:07
| 2021-07-13T06:00:07
| 341,105,420
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 956
|
r
|
separate_complete.r
|
# Data Wrangling in R
# 5.3 Breaking Apart Columns With Separate
#
# Load the tidyverse and read in the Medicare payments dataset
library(tidyverse)
names <- c("DRG", "ProviderID", "Name", "Address", "City", "State", "ZIP", "Region", "Discharges", "AverageCharges", "AverageTotalPayments",
"AverageMedicarePayments")
types = 'ccccccccinnn'
inpatient <- read_tsv('http://594442.youcanlearnit.net/inpatient.tsv', col_names = names, skip=1, col_types = types)
# Take a look at the diagnosis-related group unique values
unique(inpatient$DRG)
# Let's try separating this on the hyphen
inpatient_separate <- separate(inpatient,DRG,c('DRGcode','DRGdescription'),'-')
# What's going on with those warning rows? Let's look at row 45894
inpatient$DRG[45894]
# Let's separate with character position instead
inpatient_separate <- separate(inpatient,DRG,c('DRGcode','DRGdescription'),4)
# And take a look at the data now
glimpse(inpatient_separate)
|
936e302542a6e7914597e0117c860c17e5264bfe
|
c06da00ce0c89f3a6323e895df9f0626111d215d
|
/man/to_size.Rd
|
062a652d6b6da2ba607d040a39e3d256537de3f2
|
[
"CC-BY-4.0"
] |
permissive
|
iPsych/webmorphR
|
41fc542b846a5592bae5374c6037715f9d4cb3ed
|
218f5be8b7b2a868aba023113d580421981a752c
|
refs/heads/master
| 2023-05-05T16:59:32.310237
| 2021-05-23T21:52:15
| 2021-05-23T21:52:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,230
|
rd
|
to_size.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/to_size.R
\name{to_size}
\alias{to_size}
\title{Resize and crop/pad images to a specified size}
\usage{
to_size(
stimuli,
width,
height = NULL,
fill = wm_opts("fill"),
patch = FALSE,
crop = FALSE,
keep_rels = FALSE
)
}
\arguments{
\item{stimuli}{of class stimlist}
\item{width}{the target width (or a vector of width and height)}
\item{height}{the target height (or null if width is dimensions)}
\item{fill}{background color if cropping goes outside the original image}
\item{patch}{whether to use the patch function to set the background color}
\item{crop}{whether to crop or pad images to make them the specified size}
\item{keep_rels}{whether to keep the size relationships between images in the set, or make all the maximum size}
}
\value{
stimlist with cropped tems and/or images
}
\description{
Resize and crop/pad images to a specified size
}
\examples{
# make images with different aspect ratios and sizes
stimuli <- demo_stim() \%>\% crop(c(0.8, 1.0)) \%>\% resize(c(1.0, 0.5))
to_size(stimuli, 300, 400, fill = "dodgerblue") \%>\% plot()
to_size(stimuli, 300, 400, fill = "dodgerblue", keep_rels = TRUE) \%>\% plot()
}
|
c7368c547021f46b82e8c4f6c9ddfce56c89c384
|
ef4eb23543224c14f4cae67190d1f82bd881a4a4
|
/dfg_for_kilimanjaro/stable_isotope_analysis/iso_ta200.R
|
3c6933f81e780b63b67884563343102206b27eae
|
[] |
no_license
|
environmentalinformatics-marburg/magic
|
33ed410de55a1ba6ff943090207b99b1a852a3ef
|
b45cf66f0f9aa94c7f11e84d2c559040be0a1cfb
|
refs/heads/master
| 2022-05-27T06:40:23.443801
| 2022-05-05T12:55:28
| 2022-05-05T12:55:28
| 9,035,494
| 6
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,704
|
r
|
iso_ta200.R
|
library(ggplot2)
library(lubridate)
library(reshape)
library(gridExtra)
library(RColorBrewer)
require(RcolorBrewer)
col <- brewer.pal(8, "brBG")
# set working directory
wd <- setwd("C:/Users/IOtte/Desktop/training/")
### load data
iso <- read.csv2("iso_calc_copy.csv", header = T)
ta200 <- read.csv("C:/Users/IOtte/Desktop/plot_air_temperatur/iso_ta200_monthly.csv", header = TRUE)
## Sort temperature data
ta200 <- melt(ta200)
colnames(ta200) <- c("plotID", "date", "ta200")
ta200$year <- substr(ta200$date, 26,29)
ta200$mon <- substr(ta200$date, 31,32)
ta200 <- ta200[, -2]
ta200$date <- paste(ta200$year, ta200$mon, sep = "-")
ta200 <- ta200[, -3]
ta200 <- ta200[, -3]
## Aggregate iso plot data to monthly mean values
# build monthly mean values of d18-16, dD_H & dexcess
iso.mns <- aggregate(cbind(iso$d18_16, iso$dD_H, iso$d.excess),
by = list(substr(iso$date_sample, 1, 7),
iso[, 4], iso[, 5], iso[, 6]),
FUN = "mean", na.rm = TRUE)
colnames(iso.mns) <- c("date", "plotID", "type", "elevation","d18_16", "dD_H", "dexcess")
# build monthly sums of amount_mm
amnt.smm <- aggregate(iso$amount_mm,
by = list(substr(iso$date_sample, 1, 7),
iso[, 4], iso[, 5], iso[, 6]),
FUN = "sum", na.rm = TRUE)
colnames(amnt.smm) <- c("date", "plotID", "type", "elevation", "amount")
# merge monthly mean of d18-16 & dD_H and monthly sums of amount_mm
iso.mnth <- merge(iso.mns, amnt.smm)
## Merge iso.mns and ta200 to iso.ta200
iso.ta200 <- merge(iso.mnth, ta200)
## subsetting for better facility of instruction
#type <- lapply(types, function(i){
# sub <- subset(iso, iso$type == i)
#})
### build plot for presentation
### each plot seperately
col.id.rn <- c("#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61",
"#f46d43", "#d53e4f")
leg.rn <- c("fer0", "fpd0", "fpo0", "foc0", "foc6", "flm1", "hom4", "sav5")
col.id.fg <- c("#3288bd", "#66c2a5", "#abdda4", "#e6f598", "#fee08b", "#fdae61",
"#f46d43")
leg.fg <- c("fer0", "fpd0", "fpo0", "foc0", "foc6", "flm1", "nkw1")
## d18O
iso.dD <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"),
aes(x = date, y = d18_16, group = plotID,
colour = plotID)) +
geom_line() +
scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") +
ylab( expression(delta^{2}*D ~ "\u2030")) +
xlab("") +
scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12", "01", "02",
"03", "04", "05", "06", "07", "08", "09", "10",
"11")) +
theme(
panel.grid.major = element_line(color = "lightgray", size = 0.01),
panel.background = element_rect(fill = NA),
panel.border = element_rect(color = "gray", fill = NA))
## dexcess
iso.dexcess <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"),
aes(x = date, y = dexcess, group = plotID,
colour = plotID)) +
geom_line() +
scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") +
ylab( expression(dexcess ~ "\u2030")) +
xlab("") +
scale_y_reverse() +
scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12", "01", "02",
"03", "04", "05", "06", "07", "08", "09", "10",
"11")) +
theme(
panel.grid.major = element_line(color = "lightgray", size = 0.01),
panel.background = element_rect(fill = NA),
panel.border = element_rect(color = "gray", fill = NA))
## temperature
iso.ta.200 <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "rain"),
aes(x = date, y = ta200, group = plotID,
colour = plotID)) +
geom_line() +
scale_color_manual(values = col.id.rn, limits = leg.rn, name = "Plot ID SP1") +
ylab("ta200 [??C]") +
xlab("") +
scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12", "01", "02",
"03", "04", "05", "06", "07", "08", "09", "10",
"11")) +
theme(
panel.grid.major = element_line(color = "lightgray", size = 0.01),
panel.background = element_rect(fill = NA),
panel.border = element_rect(color = "gray", fill = NA))
## amount
amount <- ggplot(subset(iso.ta200, iso.ta200[, 3] == "fog"),
aes(x = date, y = amount, group = plotID,
colour = plotID)) +
geom_line() +
scale_color_manual(values = col.id.fg, limits = leg.fg, name = "Plot ID SP1") +
ylab("fog [mm]") +
xlab("") +
scale_x_discrete(labels = c("11", "12", "01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12", "01", "02",
"03", "04", "05", "06", "07", "08", "09", "10",
"11")) +
theme(
panel.grid.major = element_line(color = "lightgray", size = 0.01),
panel.background = element_rect(fill = NA),
panel.border = element_rect(color = "gray", fill = NA))
## wind direction
## afterwards merging
d18.dex.amn.fg <- arrangeGrob(iso.d18, iso.dexcess, amount, ncol = 1, nrow = 3)
# print "iso.mns.mnth.amnt.18O"
png("out/iso_d18_dex_amn_fg.png", width = 20, height = 30, units = "cm",
res = 300, pointsize = 15)
print(d18.dex.amn.fg)
dev.off()
|
a89ff31e10142e6c311c7ebab889c65570f65b9a
|
6fb453f3b45cad66751ee817f9b3463c89196972
|
/R/concordance_heatmap.R
|
580360e99e8804adfa464b3bf98659b7ed857d07
|
[] |
no_license
|
SamirRachidZaim/referenceNof1
|
838c74bdad9f99cc4144ed43368dc2b187403e26
|
65f63660a96022c202addef59888e2026faae24f
|
refs/heads/master
| 2022-12-31T22:02:44.858043
| 2020-10-20T15:16:46
| 2020-10-20T15:16:46
| 296,476,075
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,521
|
r
|
concordance_heatmap.R
|
#' Constructing robust reference standards for Nof1 studies for precision medicine
#'
#' \code{referenceNof1} is the R implementation of the reference biomarker algorithm by (Zaim 2020)
#'
#' @usage concordance_heatmap(jaccard_matrix)
#'
#' @param jaccard_matrix the concordance matrix used to create the heatmap
#'
#' @export
#'
#'
concordance_heatmap <- function(jaccard_matrix){
mat <- jaccard_matrix
mat2 <- data.frame(data.table::melt(mat))
mat2$JaccardIndex = arules::discretize(mat2$value, method = 'fixed',
breaks = c( 0,0.5, 0.75, .9, 1))
JaccardIndex <- mat2$JaccardIndex
p <- ggplot2::ggplot(mat2, ggplot2::aes(mat2[,'Var1'], mat2[,'Var2'])) +
ggplot2::geom_raster(ggplot2::aes( fill=JaccardIndex))
p <- p + ggplot2::theme_dark() + ggplot2::theme(
axis.text.y = ggplot2::element_text(angle = 0, hjust = 1,size = ggplot2::rel(2)),
title = ggplot2::element_text(angle = 0, hjust = 1,size = ggplot2::rel(1.5))) +
ggplot2::ggtitle("Agreement After Expression Filter") + ggplot2::theme_light()+
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1,size = ggplot2::rel(2)),
axis.text.y = ggplot2::element_text(angle = 0, hjust = 1,size = ggplot2::rel(2)),
title = ggplot2::element_text(angle = 0, hjust = .5,size = ggplot2::rel(1.5))) +
ggplot2::xlab('')+
ggplot2::ylab('') + ggplot2::scale_fill_manual(values = c("grey", "yellow", "blue", "green"))
print(p)
}
|
0d2f6d323eab9146551a4718741f219023db4987
|
6594403b535237e0bc2137b3e929427df3a4b51f
|
/2011/RJ-2011-009.R
|
6b59b7523eee4537ff0030f54c11efe5e023ef45
|
[] |
no_license
|
MrDomani/WB2020
|
b42b7992493721fcfccef83ab29703b429e1a9b3
|
2612a9a3b5bfb0a09033aa56e3a008012b8df310
|
refs/heads/master
| 2022-07-13T03:32:29.863322
| 2020-05-18T17:25:43
| 2020-05-18T17:25:43
| 264,994,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,376
|
r
|
RJ-2011-009.R
|
data(srft)
members <- c("CMCG", "ETA", "GASP", "GFS","JMA", "NGPS", "TCWB", "UKMO")
srftData <-ensembleData(forecasts = srft[,members],dates = srft$date,observations = srft$obs,latitude = srft$lat,longitude = srft$lon,forecastHour = 48)
srftFit <-ensembleBMA(srftData, dates = "2004013100",model = "normal", trainingDays = 25)
plot(srftFit, srftData, dates = "2004013100")
srftGridForc <- quantileForecast(srftFit,
srftGridData, quantiles = c( .1, .5, .9))
probFreeze <- cdf(srftFit, srftGridData,
date = "2004013100", value = 273.15)
data(prcpFit)
prcpGridForc <- quantileForecast(
prcpFit, prcpGridData, date = "20030115",
q = c(0.5, 0.9))
probPrecip <- 1 - cdf(prcpFit, prcpGridData,
date = "20030115", values = c(0, 25))
srftForc <- quantileForecast(srftFit,
srftData, quantiles = c( .1, .5, .9))
CRPS(srftFit, srftData)
# ensemble BMA
# 1.945544 1.490496
MAE(srftFit, srftData)
# ensemble BMA
# 2.164045 2.042603
use <- ensembleValidDates(srftData) >=
"2004013000"
srftPIT <- pit(srftFitALL, srftData)
hist(srftPIT, breaks = (0:(k+1))/(k+1),
main = "Probability Integral Transform")
axis(1, at = seq(0, to = 1, by = .2),
srftFitALL <- ensembleBMA(srftData,
trainingDays = 25)
abline(h=1/(ensembleSize(srftData)+1), lty=2)
xlab="", xaxt="n", prob = TRUE,
0.00 0.04 0.08 0.12
Probability Density</div>
0.0 0.1 0.2 0.3 0.4 0.5
Prob No Precip and Scaled PDF for Precip</div>
|
f88ed3fce5d6f7bba677237ea9564c38af0a3976
|
34517b52cf00b7203a96516b578fd6464501a74c
|
/man/impute_missing.Rd
|
7409bb53a2266244a7c7554fb548c130ba6d67b2
|
[
"MIT"
] |
permissive
|
yllz/hamr
|
a50aaf0a6f1e766295ead901980119044e876b11
|
b3815e194ef67ffaa76a1349cc18c9e662fc9c96
|
refs/heads/master
| 2020-03-16T00:13:27.623169
| 2018-03-18T15:48:30
| 2018-03-18T15:48:30
| 124,505,982
| 0
| 0
|
MIT
| 2018-03-09T07:42:46
| 2018-03-09T07:42:46
| null |
UTF-8
|
R
| false
| true
| 1,235
|
rd
|
impute_missing.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hamr.R
\name{impute_missing}
\alias{impute_missing}
\title{Impute missing values in a specified column of a data frame or a numerical matrix with three simple methods}
\usage{
impute_missing(dfm, col, method, missing_val_char)
}
\arguments{
\item{dfm}{A data frame or a numerical matrix}
\item{col}{A string of column name, if the input data is a matrix, this should be a string like "Vn" where n is an integer representing the index of column}
\item{method}{A string of a method name, should be one of "CC", "MIP" and "DIP"}
\item{missing_val_char}{A string of a missing value format, should be one of NA, NaN, "" and "?"}
}
\value{
A data frame having no missing values in the specified column
}
\description{
Impute missing values in a specified column of a data frame or a numerical matrix with three simple methods
}
\examples{
impute_missing(data.frame(ex = c(1, 2, 3), bf = c(6, 8, "")), "bf", "DIP", "")
impute_missing(matrix(c(1,2,3, 6,8,NA), nrow = 3, ncol = 2, byrow = FALSE), "V2", "DIP", NA)
}
\seealso{
\code{\link{na.omit}} for the complete case
Other aggregate functions: \code{\link{compare_model}}
}
\author{
Linsey Yao, March 2018
}
|
6b1c520265887c61d8169e258914d9590f6de652
|
90196f13726d5d9ad5b5c26d141836ef53d4745d
|
/R/r2.R
|
9ae6789ca911b55f5ce5cd04d8fd2be4d2614762
|
[] |
no_license
|
CharlesJB/manhattan
|
571d05613b297ea01f32d31fb421eb50fc98f405
|
ff4ccdb42fb3826c1b167db5167a256283407209
|
refs/heads/master
| 2021-01-01T16:56:17.779966
| 2015-06-02T19:04:22
| 2015-06-02T19:04:22
| 31,437,329
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,769
|
r
|
r2.R
|
#' Add r2 values to GRanges object
#'
#' This function will:
#' 1) Find the best SNPs in \code{gr_snps} using the \code{pvalue} parameter.
#' 2) Will extract relevant lines from \code{geno_ld}.
#' 3) Create \code{r2} vector.
#'
#' @param geno_ld The file *.geno.ld produced by `vcftools` or a
#' \code{data.frame} from the file loaded with read.table.
#' @param gr_snps A \code{GRanges} object.
#' @param pvalue The name of the column to use as pvalue. Default: NULL (the
#' first numerical metadata column will be used)
#' @param status Name of the column for the SNP status (Genotype or Imputed).
#' If value is \code{NULL}, all the SNPs will be used to find the best SNP,
#' otherwise only SNPs with \code{Genotyped} value will be used.
#' Default: NULL.
#'
#' @return A \code{numeric} vector corresponding to the pairwise LD of best SNP
#' versus all the other SNPs.
#'
#' @export
# TODO: Example
add_r2_from_vcftools <- function(geno_ld, gr_snps, pvalue = NULL,
status = NULL) {
## 0. Name the metadata column correctly
if (is.null(pvalue)) {
i <- min(which(sapply(GenomicRanges::mcols(gr_snps), is.numeric)))
pvalue <- colnames(GenomicRanges::mcols(gr_snps))[i]
}
pvalue <- GenomicRanges::mcols(gr_snps)[[pvalue]]
if (!is.null(status)) {
status <- GenomicRanges::mcols(gr_snps)[[status]]
}
## 1. Find the best SNP
gr_sorted <- gr_snps
gr_sorted <- gr_sorted[order(pvalue)]
if (!is.null(status)) {
gr_sorted <- gr_snps[status == "Genotyped"]
}
# TODO: Should use findOverlaps instead of expecting a name metadata column
best_snp <- as.character(gr_sorted[1]$name)
i <- which(gr_snps$name == best_snp)
## 2. Load the R2 values
if (is.character(geno_ld)) {
geno_ld <- read.table(geno_ld, header = TRUE, stringsAsFactors=FALSE)
}
## 3. Subset R2 values
# The current SNP can be in POS1 or POS2 column
pos <- GenomicRanges::start(gr_snps)[i]
subset_R2 <- geno_ld[geno_ld$POS1 == pos | geno_ld$POS2 == pos,]
# We to make it easier to convert to GRanges, we will make sure all POS1
# correspond to the best SNP position and all POS2 value correspond to the 2nd
# SNP
j <- subset_R2[["POS1"]] != pos
tmp <- subset_R2[["POS1"]][j]
subset_R2[["POS1"]][j] <- subset_R2[["POS2"]][j]
subset_R2[["POS2"]][j] <- tmp
# Convert to GRanges
subset_R2 <- GenomicRanges::GRanges(unique(GenomicRanges::seqnames(gr_snps)),
IRanges::IRanges(subset_R2[["POS2"]], width = 1), R2 = subset_R2[["R.2"]])
## 4. Add r2 value to gr_snps
overlaps <- GenomicRanges::findOverlaps(gr_snps, subset_R2)
j <- GenomicRanges::queryHits(overlaps)
k <- GenomicRanges::subjectHits(overlaps)
r2 <- 1
r2[j] <- subset_R2$R2[k]
## 5. Return result
r2
}
|
e660b6e1355c10a71b56fcf6f108d997295efc44
|
786d5aea1eddb46aff1cbe34e4e3151f2d184acc
|
/tests/testthat/test-supreme-integration.R
|
da7ef444b0ea5d914380f0649f09d1d11e126ebf
|
[
"MIT"
] |
permissive
|
strboul/supreme
|
34ea5346f72f79b87c866e78d90f145ff536f6fd
|
715c0e1685f47094871eca625d109acd0fd0e6ae
|
refs/heads/master
| 2021-06-07T17:31:10.605554
| 2021-06-05T12:39:51
| 2021-06-05T12:40:31
| 169,471,133
| 61
| 2
|
NOASSERTION
| 2020-07-08T18:35:32
| 2019-02-06T20:22:40
|
R
|
UTF-8
|
R
| false
| false
| 7,031
|
r
|
test-supreme-integration.R
|
context("test-supreme: Integration tests")
# integration-data paths
module_output <- file.path("integration-data", "module-output.Rtest")
multiple_server_definition <- file.path("integration-data", "multiple-server-definition.Rtest")
server_exprs_elems <- file.path("integration-data", "server-exprs-elems.Rtest")
without_any_calling_module <- file.path("integration-data", "without-any-calling-module.Rtest")
module_with_namespaced_fun <- file.path("integration-data", "module-with-namespaced-fun.Rtest")
server_without_session_arg <- file.path("integration-data", "server-without-session-arg.Rtest")
# src_yaml
cycle_modules <- file.path("integration-data", "cycle-modules.yaml")
test_that("supreme with src_file", {
expect_equal(supreme(src_file(module_output)),
structure(list(
data = list(
list(
name = "linkedScatter",
input = c("data",
"left", "right"),
output = c("plot1", "plot2"),
return = "dataWithSelection",
src = "module-output.Rtest"
),
list(
name = "server",
output = "summary",
calling_modules = list(list(linkedScatter = "scatters")),
src = "module-output.Rtest"
)
),
source_input = c("supreme_src_obj", "supreme_src_file")
), class = "supreme"))
expect_error(
supreme(src_file(multiple_server_definition)),
regexp = "[supreme] duplicated module names in the source: 'server'",
fixed = TRUE
)
expect_equal(supreme(src_file(server_exprs_elems)),
structure(list(
data = list(
list(
name = "server",
calling_modules = list(
list(SomeTabServer = "SomeTab"),
list(BarPlotPanelServer = "BarPlotPanel"),
list(CustomerListPanelServer = "CustomerListPanel"),
list(ObservedPanelServer = "ObservedPanel"),
list(ConditionalItemsServer = "ConditionalItems"),
list(ConditionalConditionalItems1Server = "ConditionalConditionalItems1"),
list(ConditionalConditionalItems2Server = "ConditionalConditionalItems2"),
list(DetailsButtonServer = "DetailsButton")
),
src = "server-exprs-elems.Rtest"
)
),
source_input = c("supreme_src_obj", "supreme_src_file")
), class = "supreme"))
expect_equal(supreme(src_file(without_any_calling_module)),
structure(list(
data = list(
list(
name = "main_table_server",
input = c("data",
"tbl.pageLength", "tbl.selection"),
output = "tbl",
return = "rv",
src = "without-any-calling-module.Rtest"
)
),
source_input = c("supreme_src_obj",
"supreme_src_file")
), class = "supreme"))
})
test_that("supreme with src_yaml", {
expect_equal(supreme(src_yaml(cycle_modules)),
structure(list(
data = list(
list(
name = "server",
input = c("ax",
"by", "cz"),
output = c("O1", "O2"),
return = "rv",
calling_modules = list(list(reusableModule = NULL))
),
list(
name = "reusableModule",
input = c("a", "b"),
output = c("OO1", "OO2", "OO3"),
return = c("RV1",
"RV2")
)
),
source_input = c("supreme_src_obj", "supreme_src_yaml")
), class = "supreme"))
})
test_that("supreme print methods", {
sp_yaml <- supreme(src_yaml(example_yaml()))
sp_file <- supreme(src_file(example_app_path()))
expect_equal(
trimws(paste(utils::capture.output(sp_yaml), collapse = " ")),
"A supreme model object 5 entities: server, customers_tab_module_server, items_tab_module_server, transactions_tab_module_server, ..."
)
expect_equal(
trimws(paste(utils::capture.output(sp_file), collapse = " ")),
"A supreme model object 5 entities: server, customers_tab_module_server, items_tab_module_server, transactions_tab_module_server, ..."
)
model1 <- '
- name: displayImages
'
s1 <- supreme(src_yaml(text = model1))
expect_equal(
trimws(paste(utils::capture.output(s1), collapse = " ")),
"A supreme model object 1 entity: displayImages"
)
model2 <- '
- name: displayImages
- name: checkInbox
'
s2 <- supreme(src_yaml(text = model2))
expect_equal(
trimws(paste(utils::capture.output(s2), collapse = " ")),
"A supreme model object 2 entities: displayImages, checkInbox"
)
})
test_that("graph supreme with src_file (test nomnoml code with hashing)", {
{set.seed(2019); graph_module_output <- graph(supreme(src_file(module_output)))}
expect_identical(
digest::digest(graph_module_output[["x"]][["code"]]),
"696db21a45f9dedc84524c8d28b7142c"
)
{set.seed(2019); graph_server_exprs_elems <- graph(supreme(src_file(server_exprs_elems)))}
expect_identical(
digest::digest(graph_server_exprs_elems[["x"]][["code"]]),
"542c09b280acf8048065b77d36f3557f"
)
{set.seed(2019); graph_without_any_calling_module <- graph(supreme(src_file(without_any_calling_module)))}
expect_identical(
digest::digest(graph_without_any_calling_module[["x"]][["code"]]),
"c16c3390c84bc187cf79d6a264c96746"
)
})
test_that("graph supreme with src_yaml (test nomnoml code with hashing)", {
{set.seed(2019); graph_cycle_modules <- graph(supreme(src_yaml(cycle_modules)))}
expect_identical(
digest::digest(graph_cycle_modules[["x"]][["code"]]),
"f4c657a99b2debecd55406471c765c83"
)
})
test_that("graph supreme with namespaced function (test nomnoml code with hashing)", {
{set.seed(2019); graph_namespaced_fun <- graph(supreme(src_file(module_with_namespaced_fun )))}
expect_identical(
digest::digest(graph_namespaced_fun[["x"]][["code"]]),
"72475a0144b2d66ddeb7633bbb6030e0"
)
})
test_that("supreme error", {
expect_error(
supreme(1),
"[supreme] the provided input cannot be turned into a supreme object",
fixed = TRUE
)
})
test_that("supreme error - Shiny server module not found", {
expect_error(
src_file(server_without_session_arg),
"[supreme] cannot parse the file.",
fixed = TRUE
)
})
|
fb5ce8ce71ac938fb0f7c4c0c12a69594a38ae94
|
e4a86352f154b710fb2607e9cd8ef01232fec389
|
/R/fruit_avg_demo_code.R
|
220f7445f1cbd5617ecf66da04ca65b961f921e4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
snowdj/debugging
|
f33431903014c84cf6092b698f03f1c8e0d31c5e
|
2740b1f83382ade3f0a6687a9cc106b6065fcc60
|
refs/heads/master
| 2021-01-16T02:13:26.949642
| 2020-02-15T01:00:24
| 2020-02-15T01:00:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,557
|
r
|
fruit_avg_demo_code.R
|
# setup ----
options(width = 50)
options(error = rlang::entrace)
source("R/knitr-error-wrap.R")
dat <- read.csv("R/fruit.csv", strip.white = TRUE)
source("R/fruit_avg.R")
# first look at dat and fruit_avg(); see error ----
# series of 3 code slides
# in Keynote, position based on "black" (error) example
dat
fruit_avg(dat, pattern = "berry")
dat
fruit_avg(dat, pattern = "melon")
dat
fruit_avg(dat, pattern = "black")
# What just happened? ways to see the call stack ----
# series of 3 slides: base, rlang, RStudio
# in Keynote, position based on rlang example
fruit_avg(dat, pattern = "black")
traceback()
fruit_avg(dat, pattern = "black")
rlang::last_trace()
# Rstudio's Debug > On Error > Error Inspector
fruit_avg(dat, pattern = "black")
# screenshot!
options(error = rlang::entrace)
# How to see the state of the world(s) at the moment of failure ----
options(error = recover)
fruit_avg(dat, "black")
# interactive recover() work is beyond the reach of reprex
# have to fake these snippets by running, copy/pasting,
# tweaking text color in Keynote
# use a screenshot to convey what goes on in RStudio environment pane
# show power of browser() ----
# I recorded this with shift + command + 5, record selected part of screen
source("R/fruit_avg_browser.R")
fruit_avg(dat, "black")
# inspect mini_dat, add `drop = FALSE` on-the-fly, see success
# mention debug() ----
debug(fruit_avg)
fruit_avg(dat, "black")
# source a version of fruit_avg() that has `drop = FALSE`
source("R/fruit_avg_fixed.R")
fruit_avg(dat, "black")
|
832c1ecbe0ec37dc2b0f5f5de29099036bf79168
|
4d12b578bce8c6508101174c5bb5e5586b5673d4
|
/R/scatterplot.R
|
948c0161a1ad6c13b31ce6b0d2950178955818be
|
[] |
no_license
|
swang3/timestudy
|
7f00f508674cba20a81e0e43895fa6f3ead24589
|
93f70835fd3599400ca6043226683dd26564418f
|
refs/heads/master
| 2021-08-28T00:54:39.310244
| 2017-12-10T23:52:21
| 2017-12-10T23:52:21
| 107,064,174
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 282
|
r
|
scatterplot.R
|
scatterplot<-function(data,x_data,y_data,x_lab,y_lab,title){
requireNamespace("ggplot2")
scatter<-ggplot(data,aes(x=x_data,y=y_data))+
geom_point(shape=19)+
xlab(x_lab)+ylab(y_lab)+
ggtitle(title)+
theme(plot.title = element_text(hjust=0.5))
return(scatter)
}
|
253f4732330ab0643b9c2a71e359d7fe922ff207
|
e9665a99eda6af9bfb27777dc7c53a339f8b2d70
|
/man/normal.Rd
|
f205d6f0a8fb77bc1f5f4c3a3a258f012f55d077
|
[
"MIT"
] |
permissive
|
paulsharpeY/psy.phd
|
5bf45b8c8e5deff128c3d6c75312d70ce500bd90
|
9d5965369639650917eb14c45674b856ba116675
|
refs/heads/master
| 2023-07-02T18:51:01.215697
| 2021-07-26T16:53:08
| 2021-07-26T16:53:08
| 228,865,345
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 274
|
rd
|
normal.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psy.phd.R
\name{normal}
\alias{normal}
\title{Various tests for normality}
\usage{
normal(x)
}
\arguments{
\item{x}{Data frame}
}
\value{
Data frame
}
\description{
Various tests for normality.
}
|
6f40e0c330e6e733654560ea8475411382465910
|
3d97b24b8b05cefda925dcbd99f07e5e7abda0fb
|
/dataPrep.R
|
bc32e5039be778602c65e54a1e6a92241873b97b
|
[
"MIT"
] |
permissive
|
wurstmineberg/wurstminestats
|
8a653c1d3d694bbea4d233e0592af5d295d0bcd2
|
1bf0017812cd69b07c9e47a02097a7b0641a5f1e
|
refs/heads/master
| 2021-01-23T13:18:31.738429
| 2015-09-04T14:52:59
| 2015-09-04T14:52:59
| 16,962,610
| 1
| 0
| null | 2014-05-22T15:16:15
| 2014-02-18T20:27:35
|
R
|
UTF-8
|
R
| false
| false
| 3,900
|
r
|
dataPrep.R
|
#### Datapreparations ####
## Startup
message("Making preparations…")
source("config/options.R")
source("functions.R")
# Checking if wurstmineR needs an update
message("Checking for wurstmineR")
# devtools::install_github("jemus42/wurstmineR", ref = "dev")
library("wurstmineR")
#Sys.setenv(TZ = "UTC") # Don't ask
# Get a close enough timestamp for the data age.
dataTime <- now(tzone = "UTC")
message("Timestamped data at ", now(tzone = "UTC"))
#### Get player stats from wurstmineberg API ####
message("Sucking data out of the API")
## Getting a people dataset from people.json ## (Also, deaths)
people <- get_people(urls = urls, size = "full")
people_active <- get_people(urls = urls, size = "active")
stats <- get_stats(urls = urls, strings = wurstmineR::strings, people = people)
#---------------------------------------------------#
#### Enhancing playerstats with some useful shit ####
#---------------------------------------------------#
# Get total distance column by summing up all *OneCm rows per player ##
#generalstats$distanceTraveled <- rowSums(generalstats[, grep("OneCm", colnames(generalstats))])
#--------------------------------#
#### Handle per stat datasets ####
#--------------------------------#
# Get a dataframe of item stat ID, item name and action ##
itemStats <- getItemStats(stats$items)
mobStats <- getMobStats(stats$entities)
#-----------------------------------------------------#
#### Getting sessions from /sessions/overview.json ####
#-----------------------------------------------------#
message("Now the session data…")
sessions <- get_sessions(urls = urls)
playerSessions <- get_player_sessions(sessions, splitByDay = T)
# We want play time per day, sooooo…
# playedPerDay <- getPlayedPerX(playerSessions, people = people, sumBy = "day")
#
# # We also want play time per day per person, so, well… ##
# playedPerPerson <- getPlayedPerX(playerSessions, people = people, sumBy = "person")
# # Getting per weekday stuff
# playedPerWeekday <- getPlayedPerX(playerSessions, people = people, sumBy = "weekday")
# avgPerWeekday <- mean(ddply(playedPerWeekday, .(wday), summarize, timePlayed=sum(timePlayed))$timePlayed)
# # Let's do a monthly one
# playedPerMonth <- getPlayedPerX(playerSessions, people = people, sumBy = "month")
# avgPerMonth <- mean(ddply(playedPerMonth, .(month), summarize, timePlayed=sum(timePlayed))$timePlayed)
# # Actually per person
# playtime.people <- ddply(playedPerPerson, "person", summarize, timePlayed = sum(timePlayed))
# # Now per year
# playedPerYear <- getPlayedPerX(playerSessions, people = people, sumBy = "year")
# # Now per months
# playedPerMonthYear <- ddply(playerSessions, .(year, month, person), summarize, playedMinutes = sum(playedMinutes))
# playedPerMonthYear$person <- factor(playedPerMonthYear$person, levels = people$id, labels = people$name, ordered = T)
# Fix playerSession person names
#playerSessions$person <- factor(playerSessions$person, levels = people$id, ordered = T)
#### Add lastseen data
lastseen <- get_lastseen(urls, people)
#### Cache some objects ####
message("So far soo good, caching data…")
save.image(file = "cache/workspace.RData")
#save(playerstats, file = paste0("cache/", "playerstats", ".rda"))
#save(generalstats, file = paste0("cache/", "generalstats", ".rda"))
#save(items, file = paste0("cache/", "items", ".rda"))
#save(achievements, file = paste0("cache/", "achievements", ".rda"))
#save(entities, file = paste0("cache/", "entities", ".rda"))
#save(activePeople, file = paste0("cache/", "activePeople", ".rda"))
#save(playerSessions, file = paste0("cache/", "playerSessions", ".rda"))
#save(itemStats, file = paste0("cache/", "itemStats", ".rda"))
#save(mobStats, file = paste0("cache/", "mobStats", ".rda"))
|
e58f97ade7e950b4dfa904385e642dca3f289c0a
|
54ab125482e07f85918a361407ac127c2128ef3b
|
/web scrapping.R
|
623644e874c9ce284d0c180c1aee803331c14033
|
[] |
no_license
|
vithika03/WebScrapping_in_R
|
8b40e2bdcbf1c44c6bb575a8ac0ae5a72aebbfe2
|
3e07ede6baab3c27f63c1af69bd564a5e29d0daf
|
refs/heads/master
| 2020-03-24T02:43:44.055738
| 2018-07-26T04:21:15
| 2018-07-26T04:21:15
| 142,388,153
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 941
|
r
|
web scrapping.R
|
#install.packages('rvest')
library('rvest')
datalist = list() #define the variable
for(i in 160:175){ #run loop for the number of pages in the website
url <- paste0('https://www.zomato.com/ncr/delivery-in-gurgaon?ref_page=zone&page=',i, '') #changes page for each iteration
webpage <- read_html(url) #read the html page
webpage #print the html page
#install.packages('dplyr')
library('dplyr')
#install.packages("XML")
library('XML')
cast <- html_nodes(webpage, ".fontsize0") #pick the desired node from the webpage
length(cast)
Title_data=as.data.frame(html_text(cast)) #convert it into a dataframe
Title_data
cast2<-html_nodes(webpage, "#orig-search-list .right")
length(cast2)
Rating_data=as.data.frame(html_text(cast2))
Rating_data
Gurgaon_data <- cbind(Rating_data,Title_data) #combine the columns of both the dataframes
Gurgaon_data$i<-i
datalist[[i]] <- Gurgaon_data
}
big_data = do.call(rbind, datalist)
|
428a66db6782c2a907569a10e50627fa50e11145
|
7531afc00e390923ea286520b931face524a5b45
|
/A_Simulations/SimulateSubwise.R
|
f260679dd1015c21c542903d788c21704d222b20
|
[] |
no_license
|
NomisCiri/Social_Adolescence_Public
|
0cee432a2abef2662ff423606a50e0226e55f7b8
|
cf5151ebf6b1890f032715c3a69c78ca15dc5c75
|
refs/heads/master
| 2020-05-16T18:53:51.751435
| 2020-02-20T10:29:35
| 2020-02-20T10:29:35
| 183,242,759
| 1
| 1
| null | 2020-02-20T10:29:37
| 2019-04-24T14:11:28
|
R
|
UTF-8
|
R
| false
| false
| 22,721
|
r
|
SimulateSubwise.R
|
## this script is made so it can run many simulations from the command line.
bashInput <- commandArgs(trailingOnly = TRUE)
#for debugging
#bashInput<-c(1,2)
#setwd(here())
load('simulate.RData')
library(truncnorm)
#What happens if i just duplicate it.... which is a more realistic scenario in our experiment anyway....
simulate$RiskSafeAdv<-1#risky advisor
simulateRev<-simulate
simulateRev$OtherChoseRisk<-as.numeric(!simulateRev$OtherChoseRisk)
simulateRev$RiskSafeAdv<-2#safe advisor
simulate<-rbind(simulate,simulateRev)
simulate<-rbind(simulate,simulate)
#parameter<-expand.grid(ocu=seq(-50,50,0.5),alpha=seq(1,1),theta=seq(1,1))
#parameter<-expand.grid(ocu=seq(0.1,5,1),alpha=seq(0.1,2,1),theta=seq(0.1,10,1))
#subset with the bash command.
betamu=0
betamu=0
switch(bashInput[4],
"1"={rhomu=0.4},
"2"={rhomu=1},
"3"={rhomu=1.6}
#"4"={rhomu=1.5}
)
switch(bashInput[4],
"1"={betamu=-0.5},
"2"={betamu=0.5},
"3"={betamu=0}
#"4"={rhomu=1.5}
)
OCUParams<-seq(0,5,length.out = 12)
G1<-OCUParams[1:(length(OCUParams)/2)]#FirstHalf
G2<-OCUParams[(length(OCUParams)/2):length(OCUParams)]#SecondHalf
parameterG1<-data.frame(ocuSafe=rnorm(50,abs(G1[(1+as.numeric(bashInput[3]))]),0.1),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1))
parameterG2<-data.frame(ocuSafe=rnorm(50,abs(G2[(1+as.numeric(bashInput[3]))]),0.1),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1))
howManySubs<-length(unique(simulate$subject))
subarray<-unique(simulate$subject)#for subsetting.
if (bashInput[2]==1){
#########################################################
#########################################################
######### Thats the "Risk Seeking" OCU Model.############
#########################################################
#########################################################
#
#this wierd model just adds some value to the risky option irrespective of the quality of the information.
#parameter<-expand.grid(ocu=seq(-10,10,0.5),alpha=seq(1,1),theta=seq(0.3,10))
MakeOCUDataRisk <- function(v) {
ocu<-v[1]
alpha<-v[2]
theta<-v[3]
##Utilities for each choice
for (i in 1:nrow(simulateSubset)){
#get the social choices.
if (is.na(simulateSubset$OtherChoseRisk[i])){ # NoAdvice.
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)) #risk
}else
if (simulateSubset$OtherChoseRisk[i]==1){ # advice is risky
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha))+ocu #risk
}
else
if (simulateSubset$OtherChoseRisk[i]==0){# advice is safe
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- (simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)+ocu
}
simulateSubset$ProbChooseRisk[i] <- 1/(1 + exp(-(( simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta)))) # no it doesn
simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
}# choice rule Here OCU might make sense, too?
#save the parameters.
simulateSubset$alpha<-alpha
simulateSubset$theta<-theta
simulateSubset$ocu<-ocu
return(simulateSubset)
}
#simulate<-simulate[simulate$subject==subarray[bashInput[1]],]
#bashInput<-1
simulateSubset<-simulate[simulate$subject==subarray[as.numeric(bashInput[1])],]
dummytomakelists<-MakeOCUDataRisk(c(parameterG1[1,1],parameterG1[1,2],parameterG1[1,3]))
Simulations1=as.list(dummytomakelists)
Simulations2=as.list(dummytomakelists)
#make data of the FIRST group.
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataRisk(c(parameterG1[i,1],parameterG1[i,2],parameterG1[i,3]))
Simulations1[[i]]<-as.list(dummytomakelists)
Simulations1[[i]]$group<-1# i make a new entry here and tell the simulations flag that this is a new group.
}#end first
#make data of the SECOND group:
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataRisk(c(parameterG2[i,1],parameterG2[i,2],parameterG2[i,3]))
Simulations2[[i]]<-as.list(dummytomakelists)
Simulations2[[i]]$group<-2# i make a new entry here and tell the simulations flag that this is a new group.
}
#combine the lists
Simulations<-do.call(c, list(Simulations1, Simulations2))
#rnorm(n=as.numeric(bashInput[1]), mean=as.numeric(bashInput[2]))
save(Simulations,file=paste0("ModelRisk/",as.character(bashInput[4]),"RiskSocial",as.character(bashInput[3]),as.character(bashInput[1]),".RData"))
}else if (bashInput[2]==2){
MakeOCUDataInfo <- function(v) {
########################################################
########################################################
######### Thats the a "common Currency" OCU Model.######
########################################################
########################################################
ocu<-v[1]
alpha<-v[2]
theta<-v[3]
##Utilities for each choice
for (i in 1:nrow(simulateSubset)){
#get the social choices.
if (is.na(simulateSubset$OtherChoseRisk[i])){ # NoAdvice.
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)) #risk
}else
if (simulateSubset$OtherChoseRisk[i]==1){ # advice is risky
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha))+ocu #risk
}
else
if (simulateSubset$OtherChoseRisk[i]==0){# advice is safe
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)+ocu
simulateSubset$Util_Risk[i] <- (simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)
}
simulateSubset$ProbChooseRisk[i] <- 1/(1 + exp(-((simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta)))) # no it doesn
simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
}# choice rule Here OCU might make sense, too?
#save the parameters.
simulateSubset$alpha<-alpha
simulateSubset$theta<-theta
simulateSubset$ocu<-ocu
return(simulateSubset)
}
# bashInput<-1
simulateSubset<-simulate[simulate$subject==subarray[as.numeric(bashInput[1])],]
dummytomakelists<-MakeOCUDataInfo(c(parameterG1[1,1],parameterG1[1,2],parameterG1[1,3]))
Simulations1=as.list(dummytomakelists)
Simulations2=as.list(dummytomakelists)
#make data of the FIRST group.
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataInfo(c(parameterG1[i,1],parameterG1[i,2],parameterG1[i,3]))
Simulations1[[i]]<-as.list(dummytomakelists)
Simulations1[[i]]$group<-1# i make a new entry here and tell the simulations flag that this is a new group.
}#end first
#make data of the SECOND group:
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataInfo(c(parameterG2[i,1],parameterG2[i,2],parameterG2[i,3]))
Simulations2[[i]]<-as.list(dummytomakelists)
Simulations2[[i]]$group<-2# i make a new entry here and tell the simulations flag that this is a new group.
}
#combine the lists
Simulations<-do.call(c, list(Simulations1, Simulations2))
save(Simulations,file=paste0("ModelInfo/",as.character(bashInput[4]),"InfoSocial",as.character(bashInput[3]),as.character(bashInput[1]),".RData"))
}else if (bashInput[2]==3){
# Here i make the values for the Noise OCU Model. I make a list of 12 values that lie between 0 (no noise) and 1 (complete Randomness) and use these values
# for my groups.
NoiseParams<-seq(0, 0.8, length.out=12)# Prevent the Values from becoming 0 with abs
G1<-NoiseParams[1:(length(NoiseParams)/2)]#FirstHalf
G2<-NoiseParams[(length(NoiseParams)/2):length(NoiseParams)]#SecondHalf
parameterG1<-data.frame(ocuSafe=abs(rnorm(50,G1[(1+as.numeric(bashInput[3]))],0.1)),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1))
parameterG2<-data.frame(ocuSafe=abs(rnorm(50,G2[(1+as.numeric(bashInput[3]))],0.1)),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1))
########################################################
########################################################
######### Thats the a "Distraction" OCU Model.##########
########################################################
########################################################
MakeOCUDataNoise <- function(v) {
ocu<-v[1]
alpha<-v[2]
theta<-v[3]
##Utilities for each choice
for (i in 1:nrow(simulateSubset)){
#get the social choices.
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- (simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)
if (is.na(simulateSubset$OtherChoseRisk[i])){ # if there is no Advice.
simulateSubset$ProbChooseRisk[i] <- 1/(1 + exp(-((simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta)))) # no it doesn
}else {
simulateSubset$ProbChooseRisk[i] <-(1-abs(ocu)) * (1/(1 + exp(-((simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta))))) + (abs(ocu)/2) # no it doesn
}
simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
}# choice rule Here OCU might make sense, too?
#save the parameters.
simulateSubset$alpha<-alpha
simulateSubset$theta<-theta
simulateSubset$ocu<-ocu
# simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
return(simulateSubset)
}
# if you want to do this not on the cluster you have to put this into a loop.
# here i pick the subject that shall be used for simulation the data.
simulateSubset<-simulate[simulate$subject==subarray[as.numeric(bashInput[1])],]
dummytomakelists<-MakeOCUDataNoise(c(parameterG1[1,1],parameterG1[1,2],parameterG1[1,3]))
Simulations1=as.list(dummytomakelists)
Simulations2=as.list(dummytomakelists)
#for (i in 1:length(parameter$ocu)){# go through this
#make data of the FIRST group.
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataNoise(c(parameterG1[i,1],parameterG1[i,2],parameterG1[i,3]))
Simulations1[[i]]<-as.list(dummytomakelists)
Simulations1[[i]]$group<-1# i make a new entry here and tell the simulations flag that this is a new group.
}#end first
#make data of the SECOND group:
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataNoise(c(parameterG2[i,1],parameterG2[i,2],parameterG2[i,3]))
Simulations2[[i]]<-as.list(dummytomakelists)
Simulations2[[i]]$group<-2# i make a new entry here and tell the simulations flag that this is a new group.
}
#combine the lists
Simulations<-do.call(c, list(Simulations1, Simulations2))
save(Simulations,file=paste0("ModelTemperature/",as.character(bashInput[4]),"TemperatureSocial",as.character(bashInput[3]),as.character(bashInput[1]),".RData"))
} else if (bashInput[2]==4){
MakeOCUDataNull <- function(v) {
########################################################
########################################################
######### Thats the a "Null" OCU Model.######
########################################################
########################################################
ocu<-0
alpha<-v[2]
theta<-v[3]
# beta<-v[4]
##Utilities for each choice
for (i in 1:nrow(simulateSubset)){
#get the social choices.
if (is.na(simulateSubset$OtherChoseRisk[i])){ # NoAdvice.
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)) #risk
}else
if (simulateSubset$OtherChoseRisk[i]==1){ # advice is risky
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha))
}
else
if (simulateSubset$OtherChoseRisk[i]==0){# advice is safe
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)+ocu
simulateSubset$Util_Risk[i] <- (simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)
}
simulateSubset$ProbChooseRisk[i] <- 1/(1 + exp(-(( simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta)))) # no it doesn
simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
}# choice rule Here OCU might make sense, too?
#save the parameters.
simulateSubset$alpha<-alpha
simulateSubset$theta<-theta
simulateSubset$ocu<-ocu
return(simulateSubset)
}
# bashInput<-1
simulateSubset<-simulate[simulate$subject==subarray[as.numeric(bashInput[1])],]
dummytomakelists<-MakeOCUDataNull(c(parameterG1[1,1],parameterG1[1,2],parameterG1[1,3]))
Simulations1=as.list(dummytomakelists)
Simulations2=as.list(dummytomakelists)
#make data of the FIRST group.
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataNull(c(parameterG1[i,1],parameterG1[i,2],parameterG1[i,3]))
Simulations1[[i]]<-as.list(dummytomakelists)
Simulations1[[i]]$group<-1# i make a new entry here and tell the simulations flag that this is a new group.
}#end first
#make data of the SECOND group:
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeOCUDataNull(c(parameterG2[i,1],parameterG2[i,2],parameterG2[i,3]))
Simulations2[[i]]<-as.list(dummytomakelists)
Simulations2[[i]]$group<-2# i make a new entry here and tell the simulations flag that this is a new group.
}
#combine the lists
Simulations<-do.call(c, list(Simulations1, Simulations2))
save(Simulations,file=paste0("ModelNull/",as.character(bashInput[4]),"NullSocial",as.character(bashInput[3]),as.character(bashInput[1]),".RData"))
}else if(bashInput[2]==5){
# to decorrelate OCU safe and Risk for my simulations.
# This sequence as been generated by calling SepParams<-sample(abs(seq(0, 5, length.out=12)),12), and adding a 0 to the end and
# partitioning the Vector into 2.
#Decorrelate the values.
#Prevent the Values from becoming 0 with abs
SepParamsRisk<-c( 1.3636364, 2.2727273, 2.7272727, 5.0000000, 3.6363636, 0.4545455, 1.8181818, 0.0000000, 4.0909091, 0.9090909, 3.1818182, 4.5454545)
SepParamsSafe<-c(4.5454545, 2.7272727, 2.2727273, 3.6363636, 1.8181818, 0.9090909, 3.1818182, 4.0909091, 0.4545455, 0.000000, 5.0000000, 1.3636364)
G1<-SepParamsRisk[1:(length(SepParamsRisk)/2)]#FirstHalf
G2<-SepParamsRisk[(length(SepParamsRisk)/2):length(SepParamsRisk)]#SecondHalf
G3<-SepParamsSafe[1:(length(SepParamsSafe)/2)]#FirstHalf
G3<-SepParamsSafe[(length(SepParamsSafe)/2):length(SepParamsSafe)]#SecondHalf
parameterG1<-data.frame(ocuSafe=abs(rnorm(50,G1[(1+as.numeric(bashInput[3]))],0.3)),ocuRisk=abs(rnorm(50,G3[(1+as.numeric(bashInput[3]))],0.3)),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1))
parameterG2<-data.frame(ocuSafe=abs(rnorm(50,G2[(1+as.numeric(bashInput[3]))],0.3)),ocuRisk=abs(rnorm(50,G4[(1+as.numeric(bashInput[3]))],0.3)),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1))
MakeOCUDataSep <- function(v) {
########################################################
########################################################
######### Thats the a "assymetric" OCU Model.######
########################################################
########################################################
ocuSafe<-v[1]
ocuRisk<-v[2]
alpha<-v[3]
theta<-v[4]
##Utilities for each choice
for (i in 1:nrow(simulateSubset)){
#get the social choices.
if (is.na(simulateSubset$OtherChoseRisk[i])){ # NoAdvice.
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)) #risk
}else
if (simulateSubset$OtherChoseRisk[i]==1){ # advice is risky
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha))+ocuRisk #risk
}
else
if (simulateSubset$OtherChoseRisk[i]==0){# advice is safe
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)+ocuSafe
simulateSubset$Util_Risk[i] <- (simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)
}
simulateSubset$ProbChooseRisk[i] <- 1/(1 + exp(-((simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta)))) # no it doesn
simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
}# choice rule Here OCU might make sense, too?
#save the parameters.
simulateSubset$alpha<-alpha
simulateSubset$theta<-theta
simulateSubset$ocuSafe<-ocuSafe
simulateSubset$ocuRisk<-ocuRisk
return(simulateSubset)
}
# bashInput<-1
simulateSubset<-simulate[simulate$subject==subarray[as.numeric(bashInput[1])],]
dummytomakelists<-MakeOCUDataSep(c(parameterG1[1,1],parameterG1[1,2],parameterG1[1,3],parameterG1[1,4]))
Simulations1=as.list(dummytomakelists)
Simulations2=as.list(dummytomakelists)
#make data of the FIRST group.
for (i in 1:length(parameterG1$ocuSafe)){
dummytomakelists<-MakeOCUDataSep( c(parameterG1[i,1],parameterG1[i,2],parameterG1[i,3],parameterG1[i,4]))
Simulations1[[i]]<-as.list(dummytomakelists)
Simulations1[[i]]$group<-1# i make a new entry here and tell the simulations flag that this is a new group.
}#end first
#make data of the SECOND group:
for (i in 1:length(parameterG1$ocuSafe)){
dummytomakelists<-MakeOCUDataSep(c(parameterG2[i,1],parameterG2[i,2],parameterG2[i,3],parameterG2[i,4]))
Simulations2[[i]]<-as.list(dummytomakelists)
Simulations2[[i]]$group<-2# i make a new entry here and tell the simulations flag that this is a new group.
}
#combine the lists
Simulations<-do.call(c, list(Simulations1, Simulations2))
save(Simulations,file=paste0("ModelSep/",as.character(bashInput[4]),"SepSocial",as.character(bashInput[3]),as.character(bashInput[1]),".RData"))
}else if(bashInput[2]==6){
simulateR=simulate$Risk1Ambig0=1
simulateA=simulate$Risk1Ambig0=0
simulate<-rbind(simulateR,simulateA)
# to decorrelate OCU safe and Risk for my simulations.
# This sequence as been generated by calling SepParams<-sample(abs(seq(0, 5, length.out=12)),12), and adding a 0 to the end and
# partitioning the Vector into 2.
#Decorrelate the values.
#Prevent the Values from becoming 0 with abs
SepParamsSafe<-c(4.5454545, 2.7272727, 2.2727273, 3.6363636, 1.8181818, 0.9090909, 3.1818182, 4.0909091, 0.4545455, 0.000000, 5.0000000, 1.3636364)
G1<-SepParamsSafe[1:(length(SepParamsSafe)/2)]#FirstHalf
G2<-SepParamsSafe[(length(SepParamsSafe)/2):length(SepParamsSafe)]#SecondHalf
#G3<-SepParamsSafe[1:(length(SepParamsSafe)/2)]#FirstHalf
#G3<-SepParamsSafe[(length(SepParamsSafe)/2):length(SepParamsSafe)]#SecondHalf
parameterG1<-data.frame(ocuSafe=abs(rnorm(50,G1[(1+as.numeric(bashInput[3]))],0.3)),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1),beta=rtruncnorm(1, a=-1, b=1, mean = betamu, sd = 0.3))
parameterG2<-data.frame(ocuSafe=abs(rnorm(50,G2[(1+as.numeric(bashInput[3]))],0.3)),alpha=abs(rnorm(50,rhomu,0.3)),theta=rnorm(50,0.8,0.1),beta=rtruncnorm(1, a=-1, b=1, mean = betamu, sd = 0.3))
MakeDataNullAmbig <- function(v) {
########################################################
########################################################
######### The Tymula Model##############################
########################################################
########################################################
ocu<-0
alpha<-v[2]
theta<-v[3]
beta<-v[4]
##Utilities for each choice
for (i in 1:nrow(simulateSubset)){
#get the social choices.
if (is.na(simulateSubset$OtherChoseRisk[i])){ # NoAdvice.
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)) #risk
}else
if (simulateSubset$OtherChoseRisk[i]==1){ # advice is risky
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)
simulateSubset$Util_Risk[i] <- ((simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha))
}
else
if (simulateSubset$OtherChoseRisk[i]==0){# advice is safe
simulateSubset$Util_Sure[i] <- (simulateSubset$valueSure[i]^alpha)+ocu
simulateSubset$Util_Risk[i] <- (simulateSubset$probGamble[i])*(simulateSubset$valueGamble[i]^alpha)
}
simulateSubset$ProbChooseRisk[i] <- 1/(1 + exp(-(( simulateSubset$Util_Risk[i] - simulateSubset$Util_Sure[i])*(theta)))) # no it doesn
simulateSubset$ChooseRisk[i]<-rbinom(1,1,simulateSubset$ProbChooseRisk[i])
}# choice rule Here OCU might make sense, too?
#save the parameters.
simulateSubset$alpha<-alpha
simulateSubset$theta<-theta
simulateSubset$ocu<-ocu
return(simulateSubset)
}
# bashInput<-1
simulateSubset<-simulate[simulate$subject==subarray[as.numeric(bashInput[1])],]
dummytomakelists<-MakeDataNullAmbig(c(parameterG1[1,1],parameterG1[1,2],parameterG1[1,3]))
Simulations1=as.list(dummytomakelists)
Simulations2=as.list(dummytomakelists)
#make data of the FIRST group.
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeDataNullAmbig(c(parameterG1[i,1],parameterG1[i,2],parameterG1[i,3]))
Simulations1[[i]]<-as.list(dummytomakelists)
Simulations1[[i]]$group<-1# i make a new entry here and tell the simulations flag that this is a new group.
}#end first
#make data of the SECOND group:
for (i in 1:length(parameterG1$ocu)){
dummytomakelists<-MakeDataNullAmbig(c(parameterG2[i,1],parameterG2[i,2],parameterG2[i,3]))
Simulations2[[i]]<-as.list(dummytomakelists)
Simulations2[[i]]$group<-2# i make a new entry here and tell the simulations flag that this is a new group.
}
#combine the lists
Simulations<-do.call(c, list(Simulations1, Simulations2))
save(Simulations,file=paste0("ModelNullAmbig/",as.character(bashInput[4]),"NullSocial",as.character(bashInput[3]),as.character(bashInput[1]),".RData"))
}
|
a4d14c588d1d30c219baca0ce25bd267b3dc06f5
|
9bf4135a0dc4eda6c14159616e53f8bd87d404c2
|
/man/add_eventscale.Rd
|
e5aae4b51cfecd967d0745a56c19064a5b306079
|
[] |
no_license
|
adeze/events
|
2c0c7ebab319dea9c3d13f7ffdc42453f6c39f46
|
c245f52726adac5eb529bcf52b92b29b3e2d29ff
|
refs/heads/master
| 2021-01-22T13:07:58.819709
| 2014-03-16T20:46:30
| 2014-03-16T20:46:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 455
|
rd
|
add_eventscale.Rd
|
\name{add_eventscale}
\alias{add_eventscale}
\title{Apply eventscale to event data}
\usage{
add_eventscale(edo, sc)
}
\arguments{
\item{edo}{Event data}
\item{sc}{scale}
}
\value{
Event data with a scaling
}
\description{
Applies an eventscale to event data
}
\details{
Applies an eventscale to event data. This adds a new field
in the event data with the same name as the eventscale.
Add as many as you want to keep around.
}
\author{
Will Lowe
}
|
12bbbd7b05e1fb43f4e4299722afd7ed326fd1e6
|
389568d389710a27a5210c837095262fbe85ca9e
|
/inst/extdata/archive/src/vpathToEpath.R
|
336cdade8777d66e179820b38ce54d02177c8ecb
|
[] |
no_license
|
CBIIT/geneSetPathwayAnalysis
|
5fe72e6e58d8cc91f1089c4d8e7152a64cbc3698
|
ecdbd6ae8acb4968834c523fa4fbc1a9bc10cd09
|
refs/heads/master
| 2023-02-08T02:00:11.638585
| 2023-01-31T15:13:27
| 2023-01-31T15:13:27
| 178,019,545
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,839
|
r
|
vpathToEpath.R
|
# lapply(all_shortest_paths(g, aIdx, bIdx)$res, function(x) {
# cat("IT: ", get.edge.attribute(g, "interactionType", E(g, path=x)), "\n")
# E(g, path=x)
# })
## Get Network
library(paxtoolsr)
library(rcellminer)
library(igraph)
sif <- downloadPc2("PathwayCommons.8.All.EXTENDED_BINARY_SIF.hgnc.txt.gz")
#sif <- downloadPc2("PathwayCommons.8.drugbank.BINARY_SIF.hgnc.txt.sif.gz")
x <- getSifInteractionCategories()
excludeInteractions <- c("in-complex-with")
sifX <- filterSif(sif$edges, setdiff(c(x$BetweenProteins, x$BetweenProteinSmallMolecule), excludeInteractions))
ignoreDb <- "CTD"
t1 <- lapply(sifX$INTERACTION_DATA_SOURCE, function(x) {
if(ignoreDb %in% x && length(x) == 1) {
return(FALSE)
} else {
return(TRUE)
}
})
t2 <- unlist(t1)
sifY <- sifX[t2, ]
g1 <- loadSifInIgraph(sifY, directed=TRUE)
g <- loadSifInIgraph(sifY, directed=FALSE)
idA <- "GEN1"
idB <- "CCND1"
aIdx <- match(idA, V(g)$name)
bIdx <- match(idB, V(g)$name)
mode <- "all"
weights <- NULL
targetCor <- "609699"
db <- list()
db[["exp"]] <- getAllFeatureData(rcellminerData::molData)[["exp"]]
db[["act"]] <- exprs(getAct(rcellminerData::drugData))
m1 <- getShortestPathSif(g1, idA, idB, mode=mode, weights=weights, filterFun=filterFun, db, targetCor)
# s2 <- all_shortest_paths(g, aIdx, bIdx, mode=mode, weights=weights)
# s2$res
#
# s2$res[[1]]$name
#
# vpaths <- s2$res
#
# i <- 1
# j <- 1
filterFun <- function(vpaths, db, targetCor) {
results <- NULL
pathLength <- length(vpaths[[1]]$name)
for(i in 1:length(vpaths)) {
path <- vpaths[[i]]$name
tmpResults <- 0
if(any(grepl("^CHEBI", path))) {
next
}
for(j in 1:length(path)) {
id <- path[j]
#cat("ID: ", id, "\n")
if(id %in% rownames(db[["exp"]])) {
x <- db[["act"]][targetCor,]
y <- db[["exp"]][id,]
t1 <- cor.test(x, y)
tmpResults <- tmpResults + abs(t1$estimate)
}
}
results <- c(results, tmpResults)
}
n1 <- lapply(vpaths, function(x) {
paste(names(x), collapse=":")
})
n1 <- unlist(n1)
cat("CORR: ", paste(results/pathLength, collapse=", "), "\n")
cat("PATHS: ", paste(n1, collapse=", "), "\n")
vpath <- vpaths[[which.max(results)]]
return(vpath)
}
# y <- filterFun(vpaths, db, targetCor)
# e <- E(g, path=y) # Works for undirected
# e
# e$interactionType
#
# gX <- make_ring(10, directed=TRUE)
# V(gX)$name <- letters[1:10]
# yX <- all_shortest_paths(gX, 1, 6)$res[[1]]
# E(gX, path=yX)
#
# are.connected(g1, "RB1", "GEN1") # T
# are.connected(g1, "TP53", "RB1") # T
#
# are.connected(g1, "GEN1", "RB1") # F
# are.connected(g1, "RB1", "TP53") # F
#
# # Evidence it has issues if the path is not ordered corrected
# E(g1, P=c("RB1", "GEN1"))
# #+ 1/381041 edge (vertex names):
# # [1] RB1->GEN1
# E(g1, P=c("GEN1", "RB1"))
# #Error in .Call("R_igraph_es_pairs", graph, as.igraph.vs(graph, P) - 1, :
# # At type_indexededgelist.c:1173 : Cannot get edge id, no such edge, Invalid value
#
# are.connected(g1, "RELA", "GEN1") # T
# are.connected(g1, "TP53", "RELA") # F
#
# are.connected(g1, "GEN1", "RELA") # F
# are.connected(g1, "RELA", "TP53") # T
#
# r1 <- E(g1, P=c("RB1", "GEN1"))
# r2 <- E(g1, P=c("RELA", "TP53"))
#
# E(g1)[c(r1, r2)]
#
# v1 <- y$name
# v1 <- s2$res[[5]]$name
# e1 <- NULL
#
# for(i in 1:(length(v1)-1)) {
# if(are.connected(g1, v1[i], v1[i+1])) {
# r1 <- E(g1, P=c(v1[i], v1[i+1]))
# } else {
# r1 <- E(g1, P=c(v1[i+1], v1[i]))
# }
#
# e1 <- c(e1, r1)
# }
#
# E(g1)[e1]
# E(g1)[e1]$interactionType
#
# m1 <- getShortestPathSif(g1, idA, idB, mode="all", weights=NULL, filterFun=filterFun, db, targetCor)
|
a6f5eb19be92363dce5253deba0a74fe277f69ac
|
30ec93c0d3a45feeaa6ea38e73c3c660d374b2be
|
/app.R
|
b3f5425de0881cad532b468b872c45fc9939f29b
|
[] |
no_license
|
rgs212/HackathonTeam2
|
6b9450666d63ddb182a38164d7a7be74e96d2a1c
|
de03d7be9925dd19e859bda62d9994430dc8154e
|
refs/heads/master
| 2022-10-02T17:11:18.948956
| 2020-06-08T09:51:40
| 2020-06-08T09:51:40
| 267,627,092
| 0
| 2
| null | 2020-06-08T09:51:42
| 2020-05-28T15:30:48
|
R
|
UTF-8
|
R
| false
| false
| 8,593
|
r
|
app.R
|
### notes
library(shiny)
library(DT)
library(shinyalert)
library(shinyBS)
library(dplyr)
# Define UI ----
Human <- read.csv("~/Hackathon/Example.csv")[,-1]
ui <- fluidPage(
titlePanel(p(column(2,img(src = "Exeter.png", height = 100)),
column(8,h1(strong("Complex Disease Epigenetics Group Database", align = "center")),
p(h3(helpText("The interactive database to browse all available datasets of the group"))), align = "center"),
column(2,p(img(src = "CDEG.png", height = 100)),
p(actionButton("UploadPopup", "Upload Data",
style="color: #000000; background-color: #8bbbee; border-color: #000000", width = "100%"),
align = "left")),
bsModal("UploadModal","Upload new Data into Database","UploadPopup",
h3("Upload Human Data"),
h5(helpText("Please refer to the Upload Guidelines:"),a("humandataguideline.weblink.ac.uk")),
fileInput("UploadHuman", h5("Choose CSV File"), multiple = FALSE,
accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv")),
h3("Upload Mouse Data"),
h5(helpText("Please refer to the Upload Guidelines:"),a("mousedataguideline.weblink.ac.uk")),
fileInput("UploadMouse", h5("Choose CSV File"), multiple = FALSE,
accept = c("text/csv", "text/comma-separated-values,text/plain", ".csv"))))),
tabsetPanel(
tabPanel("Human Data",
sidebarLayout(
sidebarPanel(width = 3,
helpText(h2("Filter database")),
selectizeInput("hDatatype", label = "Data Type", sort(unique(Human$DataType)), selected = NULL, multiple = TRUE,
options = NULL),
selectizeInput("hPlatform", label = "Platform", sort(unique(Human$Platform)), selected = NULL, multiple = TRUE,
options = NULL),
selectizeInput("hProject", label = "Project", sort(unique(Human$Project)), selected = NULL, multiple = TRUE,
options = NULL),
selectizeInput("hCohort", label = "Cohort", sort(unique(Human$HumanCohort)), selected = NULL, multiple = TRUE,
options = NULL),
checkboxGroupInput("hSex", label = "Sex",
choices = sort(unique(Human$Sex)),
selected = sort(unique(Human$Sex))),
checkboxGroupInput("hTissue", label = "Tissue",
choices = sort(unique(Human$Tissue)),
selected = sort(unique(Human$Tissue))),
sliderInput("hAge", label = ("Age"), min = min(na.omit(Human$Human_AgeYears)),
max = max(na.omit(Human$Human_AgeYears)), value = c(min(na.omit(Human$Human_AgeYears)),
max(na.omit(Human$Human_AgeYears)))),
actionButton("hExtraFilter", "More",style="color: #000000; background-color: #D3D3D3; border-color: #000000"),
bsModal("ExtraModal","Apply more filter","hExtraFilter",
selectizeInput("hCellType", label = "Cell Type", sort(unique(Human$CellType)), selected = NULL, multiple = TRUE,
options = NULL),
selectizeInput("hRegionFraction", label = "Brain Region or Blood Fraction", sort(unique(Human$BrainRegion_BloodFraction)), selected = NULL, multiple = TRUE,
options = NULL),
h4(helpText("and more!"))),
br(),
br(),
br(),
actionButton("hResetFilter", "Reset Filter",style = "color: #000000; background-color: #f88282; border-color: #000000")
),
mainPanel(
column(10,
br(),
DT::dataTableOutput("data", width = 850) ,
br(),
p(actionButton(inputId = "gen_report", label = "Generate Report",style="color: #000000; background-color: #D3D3D3; border-color: #000000"), align = "right",
actionButton(inputId = "DownloadButton", label = strong("Download Data"),style="color: #000000; background-color: #a9a9a9; border-color: #000000")),
p(helpText("Generate QC Report or Download Selected Data"), align = "right"),
h2("Plots"),
helpText("Select Variables for Plotting"),
p(column(3,selectInput("hX","X-axis",choices = sort(names(Human)), selected = "Sex")),
column(3,selectInput("hY","Y-axis",choices = sort(names(Human)), selected = "Human_AgeYears"))),
plotOutput("myplot", width = 850),
br(),br(), br(),br(),br(),br(),br(),
actionButton("REDBUTTON", strong("Press here, Jon"), style="color: #ffffff; background-color: #ff0000; border-color: #ffffff"),
bsModal("JonModal", "Oh no, that's not good!", "REDBUTTON",
img(src = "muh.jpg", height = 425),
p(h3("The cow has eaten all the data. It is gone forever!"))),
br(),br()
),
column(2,
br(),
actionButton("hColumnButton", "Column Selection",style="color: #000000; background-color: #D3D3D3; border-color: #000000"),
bsModal("ColumnModal","Columns to be display","hColumnButton",
checkboxGroupInput("hColumn", label = "Columns", choices = colnames(Human),
selected = colnames(Human)),
actionButton("hResetColumn", "Reset",style="color: #000000; background-color: #f88282; border-color: #000000")),
p(helpText("Define shown columns")),
br(), br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),br(),
)
)
)
),
tabPanel("Mouse Data", h3("Ctrl+C - Ctrl+V "))
)
)
# Define server logic ----
server <- function(input, output, session) {
df <- reactive({
req(input$hSex)
req(input$hAge)
Human <- filter(Human, Sex %in% input$hSex &
between(Human_AgeYears, input$hAge[1], input$hAge[2])&
Tissue %in% input$hTissue
)
if (length(input$hDatatype > 0)){Human <- filter(Human,DataType %in% v$hDatatype)}
else {Human}
if (length(input$hPlatform > 0)){Human <- filter(Human,Platform %in% input$hPlatform)}
else {Human}
if (length(input$hProject > 0)){Human <- filter(Human,Project %in% input$hProject)}
else {Human}
if (length(input$hCohort > 0)){Human <- filter(Human,HumanCohort %in% input$hCohort)}
else {Human}
if (length(input$hCellType > 0)){Human <- filter(Human,CellType %in% input$hCellType)}
else {Human}
if (length(input$hRegionFraction > 0)){Human <- filter(Human,BrainRegion_BloodFraction %in% input$hRegionFraction)}
else {Human}
})
v <- reactiveValues(data = NULL)
observeEvent(input$hDatatype, {
v$hDatatype <- input$hDatatype
})
observeEvent(input$hResetFilter, {
v$hDatatype <- sort(unique(Human$DataType))
})
output$data <- DT::renderDataTable({
DT::datatable(df()[, input$hColumn, drop = FALSE], options = list(scrollX = TRUE))
})
output$myplot <-
renderPlot({if(is.numeric(input$hX) == "TRUE"){
boxplot(get(input$hY) ~ get(input$hX) , data=Human)
}else{
plot(get(input$hY) ~ get(input$hX) , data=Human)
}
})
{ "example second tab" }
}
# Run the app ----
shinyApp(ui = ui, server = server)
|
9535195088274b57b79924a223a1cfb710b96eb3
|
fb5a4392c02428ef7171e757e9dced834f6b72e1
|
/07.PCI.computation.R
|
faf497752c85bfacc0c771e615b170a6d5f93d10
|
[] |
no_license
|
pdicarl3/BiolPsychiatry_2019
|
99d05e44fdf56a3cc109543d4672343725b80ff5
|
82de7fe00e7237b44b6e4b7ebb0957efe9914c69
|
refs/heads/master
| 2020-04-24T19:52:04.835367
| 2019-07-03T13:59:21
| 2019-07-03T13:59:21
| 172,226,048
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,531
|
r
|
07.PCI.computation.R
|
#########################################################################################
### POLYGENIC CO-EXPRESSION INDEX (PCI) computation #####################################
#########################################################################################
### load function to compute A-prime
source("PCI_function.R")
### name data
database = ""
PCI = "PCI"
nameData = ""
### load the vector of co-expression values (module eigengene) of interest
## single vector of trait variable (expression, co-expression, etc.) ## one value for each subject in your sample
expr <- module_eigengene
### load the SNP genotype matrix [samples, SNPs] in allelic dosage mode, plink format (0,1,2 as the number of reference alleles (minor allele))
### used the top 150 independent co-eQTL to calculate your weights
gen <- data.frame(gen)
### PCI computation start
## computate weights for each SNP-genotype
## output a data.frame with three columns (SNP, genotype, weight), these weights are then used to calculete PCI
AA <- NULL
for(i in colnames(gen)) {
for(l in levels(as.factor(gen[,i]))) {
## the homozygouse of the major allele homozygouse (coded as 0, zero copies of the minor allele)
hit <- pnorm(mean(expr[as.character(gen[,i]) == "0"], na.rm=TRUE),
mean = mean(expr[gen[,i]==l], na.rm=TRUE),
sd = sd(expr[gen[,i]==l], na.rm=TRUE)
)
fa <- pnorm(mean(expr[as.character(gen[,i]) == "0"], na.rm=TRUE),
mean = mean(expr[as.character(gen[,i]) == "0"], na.rm=TRUE),
sd = sd(expr[as.character(gen[,i]) == "0"], na.rm=TRUE)
)
a <- aprime(hit, fa) ## here, use the sourced function
names(a) <- paste(i, l, sep=" ")
AA <- c(AA, a)
}
}
AA <- data.frame(AA)
y <- strsplit(row.names(AA), " ")
AA$snp <- sapply(y, "[", 1)
AA$genotype <- sapply(y, "[", 2)
### REPLACE WEIGHTS TO GENOTYPES
### this could be used also to replace weights in an independent dataset (be careful, in an independent dataset the concordance of the reference allele should be checked)
GS_score <- data.frame(apply(gen, 2, function(d) as.factor(d)))
for(i in colnames(GS_score)) {
levels(GS_score[,i]) <- AA[AA[,2]==i, 1]
}
### output a data.frame with SNP-genotype weights [samples,SNPs]
GS_score <- data.frame(apply(GS_score, 2, as.double))
row.names(GS_score) <- row.names(gen)
######### !!!!!!!
### in the following, THIS SCRIPT COMPUTES THE PCI WITH ALL THE SNPs SELECTED IN THE INPUT (e.g., 150 top ranked co-eQTLs)
### TO COMPUTE PCIs WITH ANY NUMBER OF SNPs, JUST SUBSET THE "GS_score" data.frame AS YOUR CONVENIENCE (e.g., calculating the PCI with the top 2 co-eQTLs, top 3, top 4, ect.)
#########
### calculate the PCI as the mean of SNP-genotype weights for each subject (rows)
### the PCI is defined by custom function as negatively correlated to the module eigengene (negatively indexing co-expression)
GS_score <- transform(GS_score, GS_neg = rowMeans(GS_score, na.rm=TRUE))
### multiply by -1 to obtain a PCI positevely indexing co-expression
GS_score$GS_pos <- GS_score$GS_neg*(-1)
### save
assign(paste(PCI, database, nameData, sep="."), GS_score)
assign(paste("geno", database, nameData, sep="."), gen)
assign(paste("AA", database, nameData, sep="."), AA)
save(list = c(paste(PCI, database, nameData, sep="."),
paste("geno", database, nameData, sep="."),
paste("AA", database, nameData, sep=".")),
file = paste0(PCI, "_", database, "_", nameData, ".RData"))
|
8a14c905f2eb02bbea3498047b5ab5513d27c285
|
2f2cc4176842b864f287c1b3198bf90990734bcf
|
/create_dfm.R
|
8ca2bde180f15bbe1261adef24900f14f27bdd11
|
[] |
no_license
|
m4n0v31/DS_Cap
|
1c7315c4133c60980d3bfced8c015cedcff4e535
|
0ab89e87b8254d124bc933a6d5014afdbd7e56b9
|
refs/heads/master
| 2021-01-10T07:45:19.650755
| 2016-01-12T17:39:18
| 2016-01-12T17:39:18
| 48,003,254
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,905
|
r
|
create_dfm.R
|
# Compute dtm
library(ggplot2)
library(knitr)
library(tm)
library(quanteda)
corpora <- corpus(textfile("data/*train.txt"))
if(!file.exists("data/dfm.RData")){
dfm <- dfm(corpora)
save(dfm, file="data/dfm.RData")
rm(dfm)
}
for(i in 2:4){
if(!file.exists(paste("data/dfm_", i, ".RData", sep=""))){
assign(paste("dfm_",i,sep=""), dfm(corpora, ngrams = i, concatenator = " "))
save(list = paste("dfm_",i,sep=""), file=paste("data/dfm_", i, ".RData", sep=""))
rm(list = paste("dfm_",i,sep=""))
}
}
#
# # load 1-grams
# load("data/dfm.RData")
# vec <- topfeatures(dfm, n = length(features(dfm)), decreasing = TRUE)
# data <- data.frame(keyName = names(vec), count = vec, row.names=NULL)
# rm(dfm)
# # barplot with 20 most occuring
# ggplot(data[1:20,], aes(x = reorder(keyName, -count), y = count)) + geom_bar(stat = "identity")
# # Calculate 1-grams coverage of the dataset
# data$cumsum <- cumsum(data$count)
# stats <- c(sum(data$cumsum < sum(data$count)*50/100), sum(data$cumsum < sum(data$count)*90/100), sum(data$cumsum < sum(data$count)*95/100), nrow(data))
# stats <- data.frame(as.list(stats))
# names(stats) <- c("50%", "90%", "95%", "100%")
# knitr::kable((stats))
#
# # load 2-grams
# load("data/dfm_2.RData")
# vec <- topfeatures(dfm_2, n = length(features(dfm_2)), decreasing = TRUE)
# data <- data.frame(keyName = names(vec), count = vec, row.names=NULL)
# rm(dfm_2)
# # barplot with 20 most occuring
# ggplot(data[1:20,], aes(x = reorder(keyName, -count), y = count)) + geom_bar(stat = "identity")
# # Calculate 2-grams coverage of the dataset
# data$cumsum <- cumsum(data$count)
# stats <- c(sum(data$cumsum < sum(data$count)*50/100), sum(data$cumsum < sum(data$count)*90/100), sum(data$cumsum < sum(data$count)*95/100), nrow(data))
# stats <- data.frame(as.list(stats))
# names(stats) <- c("50%", "90%", "95%", "100%")
# knitr::kable((stats))
|
d3b0d56d57d27ca6a0ea77ddac0bf985acfbb6a5
|
d96320b1375d32f0c7a84ddf621d8e1c2501e478
|
/valence_trust.R
|
8a78fb3e8d3be69e587742c33770eac5a33a4f11
|
[
"MIT"
] |
permissive
|
louismaiden/blogg
|
e1e0b21019167e220597f892ff929eadece8cada
|
446fbad93d86e302e0687967e9f24b840c97a367
|
refs/heads/master
| 2021-01-24T03:56:52.427976
| 2020-09-25T13:24:03
| 2020-09-25T13:24:03
| 121,855,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,781
|
r
|
valence_trust.R
|
library(dplyr)
library(ggplot2)
library(hrbrthemes)
library(plotly)
library(highcharter)
library(moderndive)
library(readxl)
library(tidyverse)
library(stargazer)
load("C:/Users/nyulo/Documents/R/blogg/tracks_subset")
party <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/political_party_by_year.xlsx")
gdp <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/gdp_pc_growth.xlsx")
trust <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx")
trust_dom <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "dom")
media <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "media")
politicians <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "politicians")
americans <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "americans")
wash_do_right <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "wash_do_right")
crooked <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "crooked")
state_gov <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "state_gov")
local_gov <- read_xlsx("C:/Users/nyulo/Documents/R/blogg/trust_by_year.xlsx", sheet = "local_gov")
#--------------------international problems -------------------------------
trust_by_year <- trust %>%
mutate(release_year = as.numeric(substr(`International problems`,1,4))) %>%
select(-`International problems`)
tracks_subset %>%
inner_join(trust_by_year) %>%
group_by(release_year) %>%
summarize(valence = mean(valence),
trust = mean(trust)) %>%
mutate(trust2 = (trust-min(trust))/(max(trust)-min(trust)),
trust3 = trust / 100) %>%
lm(trust3 ~ valence, data = .) %>%
stargazer(type = 'html') %>% View
#---------------------media------------------------------------------------
trust_by_year <- media %>%
mutate(release_year = as.numeric(substr(media,1,4))) %>%
select(-media)
tracks_subset %>%
inner_join(trust_by_year) %>%
group_by(release_year) %>%
summarize(valence = mean(valence),
trust = mean(trust)) %>%
mutate(trust2 = (trust-min(trust))/(max(trust)-min(trust)),
trust3 = trust / 100) %>%
lm(trust2 ~ valence, data = .) %>%
summary
#---------------------american people---------------------------------------------
trust_by_year <- americans %>%
mutate(release_year = as.numeric(substr(americans,1,4))) %>%
select(-americans)
tracks_subset %>%
inner_join(trust_by_year) %>%
group_by(release_year) %>%
summarize(valence = mean(valence),
trust = mean(trust)) %>%
mutate(trust2 = (trust-min(trust))/(max(trust)-min(trust)),
trust3 = trust / 100) %>%
lm(trust3 ~ valence, data = .) %>%
summary
#---------------------wash_do_right---------------------------------------------
trust_by_year <- wash_do_right %>%
mutate(release_year = as.numeric(substr(wash_do_right,1,4))) %>%
select(-wash_do_right)
tracks_subset %>%
inner_join(trust_by_year) %>%
group_by(release_year) %>%
summarize(valence = mean(valence),
trust = mean(trust)) %>%
mutate(trust2 = (trust-min(trust))/(max(trust)-min(trust)),
trust3 = trust / 100) %>%
lm(trust3 ~ valence, data = .) %>%
summary
#---------------------crooked---------------------------------------------
trust_by_year <- crooked %>%
mutate(release_year = as.numeric(substr(crooked,1,4))) %>%
select(-crooked)
tracks_subset %>%
inner_join(trust_by_year) %>%
group_by(release_year) %>%
summarize(valence = mean(valence),
trust = mean(trust)) %>%
mutate(trust2 = (trust-min(trust))/(max(trust)-min(trust)),
trust3 = trust / 100) %>%
lm(trust3 ~ valence, data = .) %>%
summary
|
e1616c7ecc06c2c5034ddeca181abf36f762684f
|
5f0cc64d3fc8b19504074004c481a6c3f3ea22a8
|
/webtool/scripts/db_operations.R
|
d496b0b1f7e51b569a942256628306a24543c7dc
|
[
"MIT"
] |
permissive
|
Parivesh123/rain-fp7
|
ba3d7fb650d385d145db450349f1c7e481556518
|
81f0adc91019c6b45a2242dcadeb37368d7900a5
|
refs/heads/master
| 2020-05-24T21:27:20.787593
| 2017-06-09T17:40:47
| 2017-06-09T17:40:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,859
|
r
|
db_operations.R
|
# -------------------------------------------------------------------------
# Risk Assessment Tool
#
# Tool developed within the "Risk Analysis of Infrastructure Networks in
# response to extreme weather" (RAIN) FP7 European project (rain-project.eu)
# by Grupo AIA (www.aia.es).
#
# Authors:
# -- xclotet (clotetx@aia.es) @xclotet
# -- mhalat (halatm@aia.es)
#
# -------------------------------------------------------------------------
#
#######################################################
# scripts/db_operations.R
#
#
#######################################################
if (!exists("dbRAIN")) {
source("scripts/db_access.R")
}
# Available Tables --------------------------------------------------------
#
# _Config
# t.grid
# t.grid.stations
# t.grid.stations.susc
# t.grid.stations.status
# t.grid.lines
# t.grid.towers
# t.grid.towers.status
# t.grid.towers.susc
# t.trans.elements
# t.trans.elements.status
# t.trans.elements.susc
#
# t.region.map
# t.region.landslide
# t.type_element
# t.elements
# t.eng.measures
#
# _EWEs
# t.ewes
#
# _Cases
# t.cases
# t.cases.ewe
# t.cases.towers.status
# Libraries ---------------------------------------------------------------
require(dplyr)
require(data.table)
require(tools)
require(DBI)
# General functions -------------------------------------------------------
# connection <- dbRAIN$con
# Insert function
db.insert <- function(connection, table, variables, values, verbose = F) {
query <- paste0("INSERT INTO ", table,
" (", paste(variables, collapse = ","), ")",
" VALUES ")
vals <- paste0("(",paste0( values, collapse = ","),")")
dbSendQuery(connection, paste0(query, vals))
if (verbose) {
cat("-------------------------------------------\n")
cat(paste0("Properly inserted into DB (", table, ")\n"))
}
}
# test function:
# db.insert(dbRAIN$con, "grid", "name", "'Alpine Case'")
db.get.last.id <- function(connection, table) {
query <- paste0("SELECT MAX(id) FROM ", table, ";")
res <- dbGetQuery(connection, query)
return(res$max)
}
# test function:
# db.get.last.id(dbRAIN$con, "grid")
# Set ---------------------------------------------------------------------
insert.grid <- function(connection, grid_name){
db.insert(connection, "grid", "name", paste0("'", grid_name, "'"))
}
insert.grid.stations <- function(connection, stations_data, grid_id = NULL) {
table. <- "alpine.grid_stations"
variables <- c("id_grid", "name", "name_extra", "group_", "x", "y",
"color_background", "color_border")
st_data <- copy(stations_data)
st_data[, group := paste0("'", group, "'")]
st_data[, id := paste0("'", id, "'")]
st_data[, name := paste0("'", name, "'")]
if (is.null(grid_id)) {
grid_id <- db.get.last.id(connection, "grid")
}
for (i in seq(1, dim(st_data)[1])) {
values <- paste0(c(grid_id,
st_data[i,
.SD,
.SDcols = c("id", "name", "group", "X", "Y",
"color.background", "color.border")]),
collapse = ",")
db.insert(connection, table., variables, values)
}
}
insert.grid.stations.status <- function(connection, grid.stations,
grid_id = NULL) {
table. <- "grid_stations_status"
variables <- c("id_grid", "id_station", "type_", "id_type_element")
for (id in grid.stations$id) {
values <- c(1, as.integer(id), "'station'", 1)
db.insert(connection, table., variables, values)
}
}
update.grid.stations.color <- function(connection, stations_data, grid_id = NULL) {
table. <- "alpine.grid_stations"
st_data <- copy(stations_data)
st_data[, id := paste0("'", id, "'")]
st_data[, color.background := paste0("'", color.background, "'")]
st_data[, color.border := paste0("'", color.border, "'")]
if (is.null(grid_id)) {
grid_id <- db.get.last.id(connection, "grid")
}
for (i in seq(1, dim(st_data)[1])) {
name <- st_data[i, id]
values.1 <- st_data[i, color.background]
values.2 <- st_data[i, color.border]
query <- paste0("UPDATE ", table., " SET color_background = ", values.1,
" WHERE name = ", name)
dbSendQuery(connection, query)
query <- paste0("UPDATE ", table., " SET color_border = ", values.2,
" WHERE name = ", name)
dbSendQuery(connection, query)
}
}
insert.grid.lines <- function(connection, lines_data, grid_id = NULL) {
table. <- "alpine.grid_lines"
variables <- c("id_grid", "label", "group_", "from_", "to_")
l_data <- copy(lines_data)
l_data[, group := paste0("'", group, "'")]
l_data[, label := paste0("'", label, "'")]
l_data[, from := paste0("'", from, "'")]
l_data[, to := paste0("'", to, "'")]
if (is.null(grid_id)) {
grid_id <- db.get.last.id(connection, "grid")
}
for (i in seq(1, dim(l_data)[1])) {
values <- paste0(c(grid_id,
l_data[i,
.SD,
.SDcols = c("label", "group", "from", "to")]),
collapse = ",")
db.insert(connection, table., variables, values)
}
}
insert.grid.towers.all <- function(connection, towers_data, grid_id = NULL) {
table. <- "alpine.grid_towers"
variables <- c("id_grid", "label_line", "group_", "x", "y",
"height", "slope", "land_type", "id_osm")
table.status <- "alpine.grid_towers_status"
variables.status <- c("id_grid", "id_tower", "id_type_element", "type_")
table.susc <- "alpine.grid_towers_susc"
variables.susc <- c("id_grid", "id_tower", "id_map", "susc")
t_data <- copy(towers_data)
t_data[, group := paste0("'", group, "'")]
t_data[, power := paste0("'", power, "'")]
t_data[, full_id := paste0("'", full_id, "'")]
t_data[, landType := paste0("'", landType, "'")]
t_data[, region := paste0("'", region, "'")]
type_element <- 2
id_map <- 1
if (is.null(grid_id)) {
grid_id <- db.get.last.id(connection, "grid")
}
for (i in seq(1, dim(t_data)[1])) {
values <- paste0(c(grid_id,
t_data[i,
.SD,
.SDcols = c("region", "group", "XCOORD",
"YCOORD", "X30n000e_20",
"malborghet", "landType",
"osm_id")]),
collapse = ",")
db.insert(connection, table., variables, values)
id_tower <- db.get.last.id(connection, table.)
values.status <- paste0(c(grid_id, id_tower, type_element,
t_data[i, power]),
collapse = ",")
db.insert(connection, table.status, variables.status, values.status)
values.susc <- paste0(c(grid_id, id_tower, id_map,
t_data[i, ifelse(is.na(susceptibi),
0, susceptibi)]),
collapse = ",")
db.insert(connection, table.susc, variables.susc, values.susc)
}
}
update.grid.towers.all <- function(connection, towers_data, grid_id = NULL) {
table. <- "alpine.grid_towers"
t_data <- copy(towers_data)
t_data[, full_id := paste0("'", full_id, "'")]
if (is.null(grid_id)) {
grid_id <- db.get.last.id(connection, "grid")
}
for (i in seq(1, dim(t_data)[1])) {
values <- t_data[i,
.SD,
.SDcols = c("full_id")]
query <- paste0("UPDATE ", table., " SET id_osm = ", values,
" WHERE id = ", i)
dbSendQuery(connection, query)
}
}
insert.region.landslide <- function(connection, filename, name = NULL){
table. <- "region_landslide"
variables <- c("name", "filename")
filename <- file_path_sans_ext(filename)
if (is.null(name)) {
name <- filename
}
name <- paste0("'", name, "'")
filename <- paste0("'", filename, "'")
values <- c(name, filename)
db.insert(connection, table., variables, values)
}
insert.trans.elements <- function(connection, land.elements,
land.transportation.id = NULL) {
table. <- "alpine.trans_elements"
variables <- c("id_land_transportation", "name", "way_name",
"basic_element_type", "means_transportation", "segment_name")
t_data <- copy(land.elements)
t_data[, names(t_data) := lapply(.SD, function(x) paste0("'", x, "'"))]
if (is.null(land.transportation.id)) {
land.transportation.id <- db.get.last.id(connection, "land_transportation")
}
for (i in seq(1, dim(t_data)[1])) {
values <- paste0(c(land.transportation.id,
t_data[i,
.SD,
.SDcols = c("name", "way_name",
"basic_element_type",
"means_transportation",
"segment_name")]),
collapse = ",")
db.insert(connection, table., variables, values)
}
}
insert.trans.elements.status <- function(connection, land.elements.status,
land.transportation.id = NULL) {
table. <- "alpine.trans_elements_status"
variables <- c("id_land_transportation", "id_trans_elements", "id_type_element")
t_data <- copy(land.elements.status)
if (is.null(land.transportation.id)) {
land.transportation.id <- db.get.last.id(connection, "land_transportation")
}
for (i in seq(1, dim(t_data)[1])) {
values <- paste0(c(land.transportation.id,
t_data[i,
.SD,
.SDcols = c("id_trans_elements",
"id_type_element")]),
collapse = ",")
db.insert(connection, table., variables, values)
}
}
insert.trans.elements.susc <- function(connection, land.elements.susc,
land.transportation.id = NULL) {
table. <- "alpine.trans_elements_susc"
variables <- c("id_land_transportation", "id_trans_elements", "id_map", "susc")
t_data <- copy(land.elements.susc)
if (is.null(land.transportation.id)) {
land.transportation.id <- db.get.last.id(connection, "land_transportation")
}
for (i in seq(1, dim(t_data)[1])) {
values <- paste0(c(land.transportation.id,
t_data[i,
.SD,
.SDcols = c("id_trans_elements",
"id_map", "susc")]),
collapse = ",")
db.insert(connection, table., variables, values)
}
}
# test eng.measures data.table:
eng.measures <- data.table(id_element = 1,
id_type_element = 0,
name = "Test Measure Station",
description = "Superinvestment in any kind of station.",
improvement = 0.5,
cost_improvement = 100000)
insert.eng.measures <- function(connection, eng.measures) {
table. <- "eng_measures"
variables <- c("id_element", "id_type_element",
"name", "description",
"improvement", "cost_improvement")
eng.measures[, name := paste0("'", name, "'")]
eng.measures[, description := paste0("'", description, "'")]
for (i in 1:dim(eng.measures)[1]) {
values <- paste0(c(eng.measures[i ,
.SD,
.SDcols = c("id_element", "id_type_element",
"name", "description",
"improvement",
"cost_improvement")]),
collapse = ",")
db.insert(connection, table., variables, values)
}
}
# Get ---------------------------------------------------------------------
# get by grid_id ....
get.grid.lines <- function(id_grid. = 1){
return(as.data.table(collect(filter(t.grid.lines, id_grid == id_grid.))))
}
get.grid.stations <- function(id_grid){
return(as.data.table(collect(filter(t.grid.stations, id_grid == id_grid))))
}
get.grid.stations.susc <- function(id_grid, id_map){
return(as.data.table(collect(filter(t.grid.stations.susc,
id_grid == id_grid,
id_map == id_map))))
}
get.grid.stations.status <- function(id_grid, id_map){
return(as.data.table(collect(filter(t.grid.stations.status,
id_grid == id_grid))))
}
get.grid.lines <- function(id_grid){
return(as.data.table(collect(filter(t.grid.lines, id_grid == id_grid))))
}
get.grid.towers <- function(id_grid. = 1, label_line. = NULL){
if (is.null(label_line.)) {
return(as.data.table(collect(filter(t.grid.towers, id_grid == id_grid.))))
} else if (length(label_line.) == 1) {
return(as.data.table(collect(dplyr::filter(t.grid.towers,
id_grid == id_grid. & label_line == label_line.))))
} else {
return(as.data.table(collect(dplyr::filter(t.grid.towers,
id_grid == id_grid. & (label_line %in% label_line.)))))
}
}
get.grid.towers.per.line <- function(id_grid. = 1){
return(as.data.table(filter(t.grid.towers, id_grid == id_grid.) %>%
select(label_line) %>% collect() %>% count(label_line)))
}
get.grid.towers.status <- function(id_grid., id_case = NULL){
# id_case == NULL => initial condition
if (is.null(id_case)) {
return(as.data.table(collect(dplyr::filter(t.grid.towers.status,
id_grid == id_grid.))))
} else {
return(as.data.table(collect(dplyr::filter(t.cases.towers.status,
id_grid == id_grid.,
id_case == id_case))))
}
}
get.grid.towers.susc <- function(id_grid, id_map){
return(as.data.table(collect(filter(t.grid.towers.susc,
id_grid == id_grid,
id_map == id_map))))
}
get.grid.towers.to.improve <- function(id_grid = 1, id_case = NULL, id_map = 1,
label_line = NULL){
grid.towers <- get.grid.towers(id_grid. = id_grid, label_line. = label_line)[, id_tower := id]
setkey(grid.towers, id_tower)
grid.towers.status <- get.grid.towers.status(id_grid)[, id_grid := NULL]
grid.towers.susc <- get.grid.towers.susc(id_grid, id_map)[, id_grid := NULL]
grid.towers <- merge(grid.towers, grid.towers.susc)
grid.towers <- merge(grid.towers, grid.towers.status)
grid.towers[, c("x", "y", "height", "slope", "land_type", "id_osm",
"id_map", "type_") := NULL]
return(grid.towers)
}
get.grid.towers.all <- function(id_grid = 1, id_case, id_map = 1){
grid.towers <- get.grid.towers(id_grid)[, id_tower := id]
setkey(grid.towers, id_tower)
grid.towers.status <- get.grid.towers.status(id_grid)[, id_grid := NULL]
grid.towers.susc <- get.grid.towers.susc(id_grid, id_map)[, id_grid := NULL]
grid.towers <- merge(grid.towers, grid.towers.susc)
grid.towers <- merge(grid.towers, grid.towers.status)
return(grid.towers)
}
get.grid.towers.initial <- function(id_grid, id_map = 1){
return(get.grid.towers.all(id_grid, id_case = NULL, id_map = id_map))
}
get.elements <- function(id. = NULL) {
if (is.null(id.)) {
return(as.data.table(collect(t.elements)))
} else if (length(id.) > 1) {
return(as.data.table(collect(dplyr::filter(t.elements,
id %in% id.))))
} else {
return(as.data.table(collect(dplyr::filter(t.elements,
id == id.))))
}
}
get.type.element <- function(id. = NULL) {
# print(paste0(">>>>> id. value: ", id.))
if (is.null(id.)) {
return(as.data.table(collect(t.type.element)))
} else if (length(id.) > 1) {
return(as.data.table(collect(dplyr::filter(t.type.element,
id %in% id.))))
} else {
return(as.data.table(collect(dplyr::filter(t.type.element,
id == id.))))
}
}
get.trans.elements <- function(id_land_transportation){
return(as.data.table(collect(filter(t.trans.elements,
id_land_transportation == id_land_transportation))))
}
get.trans.elements.susc <- function(id_land_transportation, id_map){
return(as.data.table(collect(filter(t.trans.elements.susc,
id_land_transportation == id_land_transportation,
id_map == id_map))))
}
get.trans.elements.status <- function(id_land_transportation, id_map){
return(as.data.table(collect(filter(t.trans.elements.status,
id_land_transportation == id_land_transportation))))
}
get.eng.measures <- function(id_element. = NULL) {
if (is.null(id_element.)) {
return(as.data.table(collect(t.eng.measures)))
} else if (length(id_element.) > 1) {
return(as.data.table(collect(dplyr::filter(t.eng.measures,
id_element %in% id_element.))))
} else {
return(as.data.table(collect(dplyr::filter(t.eng.measures,
id_element == id_element.))))
}
}
|
2a643b3fa4818ad854b3960aa55146fc97e0771b
|
965348fc1c33ebbce0e9f802bd4213a24eaf44c5
|
/man/spectrophoto_boxplot.Rd
|
9bb35eec056af109035d9a2727faea4843bc86b5
|
[
"MIT"
] |
permissive
|
IreneSchimmel/funkyfigs
|
38dfabbd0bc77377f1a422d7dd64187dbb8a2ab5
|
19a789de9f70b3f5a3723bf52bd7b35b52d9bdbb
|
refs/heads/main
| 2023-05-10T09:25:09.624368
| 2021-06-18T13:06:48
| 2021-06-18T13:06:48
| 377,165,566
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 707
|
rd
|
spectrophoto_boxplot.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spectrophoto_boxplot.R
\name{spectrophoto_boxplot}
\alias{spectrophoto_boxplot}
\title{Spectrophotography boxplot}
\usage{
spectrophoto_boxplot(data, group, y)
}
\arguments{
\item{data}{the desired dataset}
\item{group}{the desired group}
\item{y}{the extinction data}
}
\value{
boxplot
}
\description{
The spectrophoto_boxplot function makes it easier to quickly generate a boxplot on your spectrophotography data.
}
\details{
Note: for the example, no actual spectophotography data was used.
It's just an example of what the output should look like.
}
\examples{
spectrophoto_boxplot(iris, iris$Species, iris$Sepal.Length)
}
|
3534d4361dc4f05a587fd9b560260b358307ff12
|
e5ec2b5cd54b930e6a8303536d10deb291964e87
|
/Protocol_Simulation/Protocol_Simulation/Unit_Tests/testthat/test_markov_states_sum_to_one.R
|
cb0c30cc40b5650ae7bb5a42f3d4b457275d9d60
|
[] |
no_license
|
bridder/BHM_PS_2020
|
df6cd496d9838b770c2be8cab5f7ca8ca01a47eb
|
5f6f2ef1388765347eb23f20a8a37a5021212f14
|
refs/heads/master
| 2020-12-27T21:07:24.600615
| 2020-02-03T20:16:45
| 2020-02-03T20:16:45
| 238,053,473
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,796
|
r
|
test_markov_states_sum_to_one.R
|
context("Testing markov_states_sum_to_one.R")
test_that("Expect errors if stateDataFrame is not a dataframe.",
{
expect_error(markov_states_sum_to_one("a",1e-6))
expect_error(markov_states_sum_to_one(23L,1e-6))
expect_error(markov_states_sum_to_one(1.5423,1e-6))
expect_error(markov_states_sum_to_one(-5.293,1e-6))
expect_error(markov_states_sum_to_one(0,1e-6))
})
stateDataFrame <- data.frame(IC1 = 0.5,
IC2 = 0.25,
C1 = 0.25,
C2 = 0,
O = 0,
IO = 0,
IObound = 0,
Obound = 0,
Cbound = 0)
test_that("Expect errors if sumTol is not a numeric single number.",
{
expect_error(markov_states_sum_to_one(stateDataFrame,"a"))
expect_error(markov_states_sum_to_one(stateDataFrame,23+56i))
expect_error(markov_states_sum_to_one(stateDataFrame,c(1,2)))
})
stateDataFrameLong <- data.frame(IC1 = c(0.50,1,0,0,1 - 1e-9),
IC2 = c(0.25,0,0.01,0,0),
C1 = c(0.25,0,0.99,0,0),
C2 = c(0,0,0,0.1,0),
O = c(0,0,0,0.1,0),
IO = c(0,0,0,0.1,0),
IObound = c(0,0,0,0.1,0),
Obound = c(0,0,0,0.1,0),
Cbound = c(0,0,0,0.5,0)
)
test_that("Expect TRUE if all rows sum to 1 within sumTol.",
{
expect_true(markov_states_sum_to_one(stateDataFrameLong,1e-6))
})
stateDataFramePoorData1 <- data.frame(IC1 = c(0.50,1,0,0,1 - 1e-9),
IC2 = c(0.25,0,0.01,0,0),
C1 = c(0.25,0,0.99,0,0),
C2 = c(0.1,0,0,0.1,0),
O = c(0,0,0,0.1,0),
IO = c(0,0,0,0.1,0),
IObound = c(0,0,0,0.1,0),
Obound = c(0,0,0,0.1,0),
Cbound = c(0,0,0,0.5,0)
)
stateDataFramePoorData2 <- data.frame(IC1 = c(0.50,1,0,0,1 - 1e-3),
IC2 = c(0.25,0,0.01,0,0),
C1 = c(0.25,0,0.99,0,0),
C2 = c(0.0,0,0,0.1,0),
O = c(0,0,0,0.1,0),
IO = c(0,0,0,0.1,0),
IObound = c(0,0,0,0.1,0),
Obound = c(0,0,0,0.1,0),
Cbound = c(0,0,0,0.5,0)
)
stateDataFramePoorData3 <- data.frame(IC1 = 1,
IC2 = 2,
C1 = 3,
C2 = 4,
O = 5,
IO = 6,
IObound = 7,
Obound = 8,
Cbound = 9
)
test_that("Expect FALSE if any row does not sum to 1 within sumTol.",
{
expect_false(markov_states_sum_to_one(stateDataFramePoorData1,1e-6))
expect_false(markov_states_sum_to_one(stateDataFramePoorData2,1e-6))
expect_false(markov_states_sum_to_one(stateDataFramePoorData3,1e-6))
})
|
e22ffc544eb2cc1c5efcfc48c590ec2329a32467
|
f0f133124e46a821abc1b2a8cfd02ea8987340b7
|
/all_season_leaflet.R
|
3bb52011118436dd71cab12e76ad4c60c1a5e865
|
[] |
no_license
|
erinwalls/erhs_535_final
|
ca9bfd55eac201eb0484facec5a7be5f43b866c4
|
c9df7a845e608687b092ec945cf4dbe74912108b
|
refs/heads/master
| 2020-09-09T18:09:26.578879
| 2019-12-14T06:16:25
| 2019-12-14T06:16:25
| 221,520,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,956
|
r
|
all_season_leaflet.R
|
#all seasons
library(tidyverse)
library(rworldmap)
library(sf)
library(leaflet)
library("dplyr")
library(viridisLite)
library(janitor)
country_data_all <- read_csv("country_all_iso_all.csv", )
countriesLow <- countriesLow %>%
st_as_sf
#Temporarily removing air date (not sure how to animate/facet/etc. in Leaflet)
country_geom_full<- country_data_all %>%
left_join(countryExData, by = c("iso3" = "ISO3V10")) %>%
group_by(iso3) %>%
mutate(mean_value = mean(value)) %>%
add_tally(name = "count") %>%
ungroup() %>%
select(country, count, mean_value, iso3) %>%
distinct() %>%
mutate(iso3 = toupper(iso3)) %>%
rename(ISO3 = iso3)
country_geom_map_data <- country_geom_full %>%
mutate(ISO3 = as.factor(ISO3)) %>%
dplyr::full_join(countriesLow) %>%
clean_names() %>%
st_as_sf
pal <- colorNumeric(
palette = "Greens",
domain = country_geom_map_data$count)
popup_info<- paste0("<b>Country:</b> ",
country_geom_map_data$name, "<br/>",
"<b>Population:</b>",
country_geom_map_data$pop_est, "<br/>",
"<b>Count:</b>",
country_geom_map_data$count, "<br/>",
"<b>Mean Value:</b> ",
round(country_geom_map_data$mean_value))
leaflet(country_geom_map_data) %>%
addTiles() %>%
addPolygons(color = ~pal(count), popup = popup_info)
---------
library(tidyverse)
library(rworldmap)
library(sf)
library(leaflet)
library("dplyr")
library(viridisLite)
library(janitor)
library(lubridate)
library(broom)
library(plotly)
country_data_all <-read_csv("country_all_iso_all.csv", )
countriesLow <- countriesLow %>%
st_as_sf
#Temporarily removing air date (not sure how to animate/facet/etc. in Leaflet)
country_geom_full<- country_data_all %>%
left_join(countryExData, by = c("iso3" = "ISO3V10")) %>%
group_by(iso3) %>%
mutate(mean_value = mean(value)) %>%
add_tally(name = "count") %>%
ungroup() %>%
select(country, count, mean_value, iso3) %>%
distinct() %>%
mutate(iso3 = toupper(iso3)) %>%
rename(ISO3 = iso3)
country_geom_map_data <- country_geom_full %>%
mutate(ISO3 = as.factor(ISO3)) %>%
dplyr::full_join(countriesLow) %>%
clean_names() %>%
st_as_sf
pal <- colorQuantile(
palette = "Greens",
domain = country_geom_map_data$count)
popup_info<- paste0("<b>Country:</b> ",
country_geom_map_data$name, "<br/>",
"<b>Population:</b>",
country_geom_map_data$pop_est, "<br/>",
"<b>Count:</b>",
country_geom_map_data$count, "<br/>",
"<b>Mean Value:</b> ",
round(country_geom_map_data$mean_value))
#
leaflet(country_geom_map_data) %>%
addTiles() %>%
addPolygons(color = ~pal(count), popup = popup_info) %>%
addLegend(pal = pal, values = ~count, position = 'topright', opacity = 1, title = "Quantile Count")
|
8a2305245be03f437285f0c96be7e7db7e964cef
|
b6e31cfa3c6bb42f2f7f32ab2c49b372149b4924
|
/R/genotype_conf.R
|
219f6bd816c7f99eb51242ba4a5b8163954d78c0
|
[] |
no_license
|
marcjwilliams1/Alleloscope
|
b2b2020b800d1ad2e63154b130ae2626d0a13e61
|
dbf1b7773c197affedac540b49ea9c4a3a6c0e20
|
refs/heads/main
| 2023-08-07T14:30:27.923798
| 2021-09-27T03:06:33
| 2021-09-27T03:06:33
| 394,618,929
| 1
| 0
| null | 2021-08-10T10:57:18
| 2021-08-10T10:57:17
| null |
UTF-8
|
R
| false
| false
| 2,935
|
r
|
genotype_conf.R
|
#' Compute confidence scores based on posterior probability for each cell in a region.
#'
#' @param X: A ncell by 2 dataframe. Column 1: normalized coverage (rho_hat); Column 2: theta_hat
#' @param gt: A vector of lenth ncell. The numbers represent cell-level allele-specific copy number states.
#'
#' @return A lineage tree plot constructed using cell-level haplotype profiles across all regions.
#'
#' @import ggplot2
#' @import pheatmap
#' @import cluster
#' @export
genotype_conf=function(X=NULL, gt=NULL){
canonicalPoints=cbind(c(1:27),
c(0.5,0.5, 1,1,1,1.5,1.5,1.5,1.5,2,2,2,2,2,2.5,2.5,2.5,2.5,2.5,2.5,3,3,3,3,3,3,3),
c(0,1,0,0.5,1,0,1/3,2/3,1,0,1/4,2/4,3/4,1,0,1/5,2/5,3/5,4/5,1,0,1/6,2/6,3/6,4/6,5/6,1))
ncells=nrow(X)
#nregions=ncol(Obj_filtered$genotypes)
max.sdrho=rep(0.07, nrow(canonicalPoints))
max.sdtheta=rep(0.06, nrow(canonicalPoints))
max.sdrho[4] = 0.15
max.sdtheta[4] = 0.1
#posteriorConfidence=matrix(nrow=ncells, ncol=nregions, data=NA)
posteriorConfidence=rep(0,ncells )
#for(regionid in 1:nregions){
#rhohat=Obj_filtered$genotype_values[,2*(regionid-1)+1]
#thetahat=Obj_filtered$genotype_values[,2*(regionid-1)+2]
#genotype=Obj_filtered$genotypes[,regionid]
rhohat=X[,1]
thetahat=X[,2]
genotype=gt
possible.genotypes=unique(genotype)
cluster.props=rep(0, length(possible.genotypes))
for(i in 1:length(possible.genotypes)) cluster.props[i]=sum(genotype==possible.genotypes[i])
cluster.props=cluster.props/sum(cluster.props)
mu.rho=rep(0, length(possible.genotypes))
mu.theta=rep(0, length(possible.genotypes))
sd.rho=rep(0, length(possible.genotypes))
sd.theta=rep(0, length(possible.genotypes))
for(i in 1:length(possible.genotypes)){
cellids = which(genotype==possible.genotypes[i])
mu.rho[i]=canonicalPoints[possible.genotypes[i],2]
mu.theta[i]=canonicalPoints[possible.genotypes[i],3]
sd.rho[i]=sd(rhohat[cellids])
sd.theta[i]=sd(thetahat[cellids])
if(is.na(sd.rho[i])) sd.rho[i] = max.sdrho[possible.genotypes[i]]
if(is.na(sd.theta[i])) sd.theta[i] = max.sdtheta[possible.genotypes[i]]
}
sd.rho=pmin(sd.rho, max.sdrho[possible.genotypes])
sd.theta=pmin(sd.theta,max.sdtheta[possible.genotypes])
mm=match(genotype, possible.genotypes)
numerator=rep(NA,ncells)
denominator=rep(NA,ncells)
for(i in 1:ncells){
numerator[i]=cluster.props[mm[i]]*dnorm((rhohat[i]-mu.rho[mm[i]])/sd.rho[mm[i]])*dnorm((thetahat[i]-mu.theta[mm[i]])/sd.theta[mm[i]])
denominator[i]=0
for(j in 1:length(possible.genotypes)){
denominator[i]=denominator[i]+cluster.props[j]*dnorm((rhohat[i]-mu.rho[j])/sd.rho[j])*dnorm((thetahat[i]-mu.theta[j])/sd.theta[j])
}
posteriorConfidence[i]=numerator[i]/denominator[i]
}
#}
return(posteriorConfidence)
}
|
7936def5e8ca8d2eced0a3f202b9fe5a9147dde3
|
4eb0befe400a36a027196d6ff10840e276e2fb0a
|
/man/crossover.Rd
|
21c7d6a12a09b6c934d2d7dd5ea246334d262a22
|
[] |
no_license
|
zhrlin/GA
|
736a11499b02a7dd1fdaa29c887dcc3c6cc999b7
|
556810b18ff501a63f4116f0411a745eb57fe7c3
|
refs/heads/master
| 2020-11-26T23:48:25.893431
| 2019-12-20T09:36:23
| 2019-12-20T09:36:23
| 229,234,330
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 796
|
rd
|
crossover.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crossover.R
\name{crossover}
\alias{crossover}
\title{Crossover}
\usage{
crossover(chromo_1, chromo_2)
}
\arguments{
\item{chromo_1, chromo_2}{Numeric vectors with binary 0/1 elements. Should be of same length.}
}
\description{
\code{crossover} is to perform the genetic operator crossover between two parent chromosomes, and return one offspring chromosome.
}
\details{
To begin with, first select a random position between two adjacent loci,
and split both parent chromosomes at this position.
Then glue the left chromosome segment from one parent to the right segment
from the other parent to form an offspring chromosome
(the remaining segments are also combined to form a second offspring, but then discarded).
}
|
ad89866224bf813dd17ec6a1374765e06fb0d6b8
|
2d34708b03cdf802018f17d0ba150df6772b6897
|
/googlecomputebeta.auto/man/InstanceReference.Rd
|
e15ba5a53577a8bb3c0668ab3498e3273e84ef8c
|
[
"MIT"
] |
permissive
|
GVersteeg/autoGoogleAPI
|
8b3dda19fae2f012e11b3a18a330a4d0da474921
|
f4850822230ef2f5552c9a5f42e397d9ae027a18
|
refs/heads/master
| 2020-09-28T20:20:58.023495
| 2017-03-05T19:50:39
| 2017-03-05T19:50:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 542
|
rd
|
InstanceReference.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute_objects.R
\name{InstanceReference}
\alias{InstanceReference}
\title{InstanceReference Object}
\usage{
InstanceReference(instance = NULL)
}
\arguments{
\item{instance}{The URL for a specific instance}
}
\value{
InstanceReference object
}
\description{
InstanceReference Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
\seealso{
Other InstanceReference functions: \code{\link{targetPools.getHealth}}
}
|
fab2e3726ecf7d990d072cec56003dc5a07de27a
|
ad020fd7242dff50644fdf40f4a42dc5796fb9e9
|
/R/gss.R
|
f7b5b8a2d7fd6ea2e77424a6a6e7ff92ec47d738
|
[] |
no_license
|
ktargows/htmllayout
|
875c55b457d2d9d6c0f2e913df09dbe958148e0d
|
9a57c03c0e1a9e974c17bc2519c4c06bbd153c1b
|
refs/heads/master
| 2021-01-20T00:20:28.193579
| 2015-09-25T02:25:38
| 2015-09-25T02:25:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,126
|
r
|
gss.R
|
#' @export
gssDependency = function() {
htmltools::htmlDependency(
'gss', '2.0.0', system.file('assets', 'gss', package = getPackageName()),
script = 'gss.min.js',
head = '<script type="text/javascript">window.engine = new GSS(document);</script>'
)
}
#' @export
gssMatrix = function(mat, prefix = '') {
mat = as.matrix(mat)
n = nrow(mat); m = ncol(mat)
s = list()
mat = matrix(paste0(prefix, mat), nrow = n, ncol = m)
for (i in seq_len(n)) {
for (j in seq_len(m)) {
if (i == 1) {
s[[length(s) + 1]] = sprintf('#%s[top] == ::window[top];', mat[i, j])
}
if (j == 1) {
s[[length(s) + 1]] = sprintf('#%s[left] == ::window[left];', mat[i, j])
}
if (i == n) {
s[[length(s) + 1]] = sprintf('#%s[bottom] == ::window[bottom];', mat[i, j])
}
if (j == m) {
s[[length(s) + 1]] = sprintf('#%s[right] == ::window[right];', mat[i, j])
}
if (i < n && mat[i, j] != mat[i + 1, j]) {
s[[length(s) + 1]] = sprintf('#%s[bottom] == #%s[top];', mat[i, j], mat[i + 1, j])
}
if (j < m && mat[i, j] != mat[i, j + 1]) {
s[[length(s) + 1]] = sprintf('#%s[right] == #%s[left];', mat[i, j], mat[i, j + 1])
}
}
}
unique(unlist(s))
}
gssSize = function(..., `_type` = c('width', 'height')) {
type = match.arg(`_type`)
unlist(lapply(list(...), function(s) {
if (!inherits(s, 'formula')) {
str(s)
stop('All arguments must be two-sided formulae')
}
sprintf('#%s[%s] == %s;', deparse(s[[2]]), type, replaceRHSVars(s, type))
}))
}
replaceRHSVars = function(form, type) {
RHS = form[[3]]
form[[2]] = NULL
vars0 = all.vars(form)
vars1 = sprintf('#%s[%s]', vars0, type)
RHS = substituteDirect(RHS, as.list(setNames(vars1, vars0)))
gsub('"', '', paste(deparse(RHS, width.cutoff = 500), collapse = ''))
}
#' @export
gssWidth = function(...) {
gssSize(..., `_type` = 'width')
}
#' @export
gssHeight = function(...) {
gssSize(..., `_type` = 'height')
}
#' @export
gssStyle = function(style) {
tags$style(type = 'text/gss', paste(style, collapse = '\n'))
}
|
b36861fa0a18f86af48d572f93dec93e1be486e5
|
25098c7f80c40414f18bf25a7d9a12bf64d1fa94
|
/plot1.R
|
113a3bd9ddef6562985bf2c87fb9bc69c73dc89d
|
[] |
no_license
|
dansum15/ExData_Plotting1
|
adde5b3219a3227121828fec11b2ca1ad31c83a8
|
4dab7f90cd0370c6ee09d5374d159c43caaa0f8f
|
refs/heads/master
| 2021-01-18T00:13:06.286589
| 2016-01-11T04:34:28
| 2016-01-11T04:34:28
| 49,393,000
| 0
| 0
| null | 2016-01-11T00:49:30
| 2016-01-11T00:49:30
| null |
UTF-8
|
R
| false
| false
| 1,220
|
r
|
plot1.R
|
if(!file.exists("exdata-data-household_power_consumption.zip")) {
zipfile <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",zipfile)
file <- unzip(zipfile)
unlink(zipfile)
}
data <- read.table(file, header=T, sep=";")
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
subset.data <- data[(data$Date=="2007-02-01") | (data$Date=="2007-02-02"),]
subset.data$Global_active_power <- as.numeric(as.character(subset.data$Global_active_power))
subset.data$Global_reactive_power <- as.numeric(as.character(subset.data$Global_reactive_power))
subset.data$Voltage <- as.numeric(as.character(subset.data$Voltage))
subset.data <- transform(subset.data, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
subset.data$Sub_metering_1 <- as.numeric(as.character(subset.data$Sub_metering_1))
subset.data$Sub_metering_2 <- as.numeric(as.character(subset.data$Sub_metering_2))
subset.data$Sub_metering_3 <- as.numeric(as.character(subset.data$Sub_metering_3))
hist(subset.data$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
|
18051b3c9c10e996a75acf6189e9a49df8ea3eb3
|
ebb9a0c747fad779aba3a92266551f0a7c445d8a
|
/man/tidy_genorm.Rd
|
809573110347268a76a490098e6dd829e64d9953
|
[] |
no_license
|
dhammarstrom/generefer
|
ce0215e1ec01b19a9b6d32f7bae28921b04d849d
|
5e4532a49a07b32f1a8fec72eb2ac8ed507cafa1
|
refs/heads/master
| 2021-05-10T18:08:02.275722
| 2020-09-04T13:18:13
| 2020-09-04T13:18:13
| 118,621,326
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,741
|
rd
|
tidy_genorm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_genorm.R
\name{tidy_genorm}
\alias{tidy_genorm}
\title{Calculate reference gene stability}
\usage{
tidy_genorm(dat, sample = "sample", gene = "gene",
expression = "expression", log = FALSE)
}
\arguments{
\item{dat}{Name of the data frame, supplied to the function in i tidy way.
A column for sample id ("sample"), a column for gene id ("gene") and a column
containing expression values ("expression").}
\item{sample}{Character column name of column containing sample id's}
\item{gene}{Character column name of column containing gene id's}
\item{expression}{Character column name of column containing sample expression values}
\item{log}{Logical, is data on the log scale?}
}
\value{
A list containing: M, a data frame with stability measures, average stability measures per gene after step-wise
exclusion of genes; Normalisation factors for n >= 2 genes per sample, and; Pairwise variations of normalisation factors.
}
\description{
Calculates M values for n >= 2 genes. When there are more than two genes in the data set,
the function calculates M for the full data set, removes the least stable gene (largest M) and
calculates M again until only two genes remains. The remaining two genes cannot be ranked.
Based on gene ranking normalisation factors (NF) are calculated based on two to n genes and pairwise
variations between NFn and NFn+1 are calculated which can be used to determine the optimal
number of reference genes to include in the experiment.
}
\references{
Vandesompele, J., et al. (2002). "Accurate normalization of real-time quantitative
RT-PCR data by geometric averaging of multiple internal control genes." Genome Biol 3(7)
}
|
3aebf8e2115c8fd5a9066e9dda6ce94673cd207a
|
ecd0188879f984eba2455fba5c5ad24688737502
|
/man/quantile_normalize_cols.Rd
|
50e46372a67c7d155f21941a9682fa5b01bc9307
|
[
"Apache-2.0"
] |
permissive
|
davidaknowles/suez
|
71e0be678dcab61e671b3283d3b19a5245754702
|
8b96529a867a23635027c1d185e68a7add3c9913
|
refs/heads/master
| 2021-05-07T16:40:01.683115
| 2021-03-18T19:20:18
| 2021-03-18T19:20:18
| 108,589,583
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 282
|
rd
|
quantile_normalize_cols.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/map_interaction_qtl.R
\name{quantile_normalize_cols}
\alias{quantile_normalize_cols}
\title{Quantile normalize columns}
\usage{
quantile_normalize_cols(input)
}
\description{
Quantile normalize columns
}
|
f2a1f62f69d6402624b3567a025177b45497d1f7
|
9463229d7d9cc5902971aed9c9c4efe2f4083c50
|
/tests/testthat.R
|
6ca7a5557f8eda924f383cf2e3c87a12fbd7a559
|
[] |
no_license
|
sapfluxnet/sapfluxnetQC1
|
df66fb4c8282c14aa8921e668f671928c73e300a
|
c66115798d0c0814f399b1df6505f146594b6bdf
|
refs/heads/master
| 2021-05-01T12:53:11.380754
| 2019-03-01T09:15:47
| 2019-03-01T09:15:47
| 52,454,181
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.R
|
library(testthat)
library(sapfluxnetQC1)
test_check("sapfluxnetQC1")
|
9088f787ffcfbe67ec285b723e29c695bc760a18
|
249c82f3755ca232fc7e86ffc7416f9f3b1abf77
|
/inception.R
|
9396ee09d3c045a84acbe14676cc38dd09a2c8f8
|
[] |
no_license
|
ashlizjosh/Miniproject
|
a501d86f38832d757d13ec0f977ebf5373564ed0
|
94ee6bb30abac15d9384d5cb9bb5a84121690b13
|
refs/heads/master
| 2023-03-06T03:13:26.328774
| 2021-02-12T10:25:37
| 2021-02-12T10:25:37
| 299,181,426
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 528
|
r
|
inception.R
|
v <- c(97,98,97.5)
g_range <- range(0, v)
plot(v, type="o", col="blue", ylim=g_range, axes=FALSE, ann=FALSE)
#plot(v, type="o", col="green", ylim=g_range, axes=FALSE, ann=FALSE)
#plot(v, type="o", col="blue", ylim=g_range, axes=FALSE, ann=FALSE)
axis(1, at=1:3, lab=c("Bacterial leaf blight","Brown spot","leaf smut"))
axis(2, las=1, at=20*0:g_range[2])
box()
title(xlab="Paddy crop stresses", col.lab=rgb(0,0.5,0))
title(ylab="Accuracy", col.lab=rgb(0,0.5,0))
title(main="Inception-V3 Results")
# , col.main="red", font.main=4)
|
d93cb45dc8757bdbb336b5803fe3fbddda73edce
|
0a1c607003a2a773bcbb68d3889909f35af0d758
|
/SB_060718/SB_msh2_wt/regioneR/defineMaskedRegions.R
|
efc93c1e052eb3baa1453de72a75902291806c83
|
[] |
no_license
|
ajtock/GBS_CO
|
5fed369a6e739fe163ebe490fdb504f9a5b43f0d
|
c2ff2ab01736060a390e0eceb74504a9cadfbd82
|
refs/heads/master
| 2021-12-03T17:35:46.211069
| 2021-10-25T14:56:36
| 2021-10-25T14:56:36
| 146,581,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,518
|
r
|
defineMaskedRegions.R
|
#!/applications/R/R-3.3.2/bin/Rscript
# Create and save regions to mask in overlap analyses
library(regioneR)
outDir <- "/projects/ajt200/GBS_CO/SB_060718/SB_msh2_wt/regioneR/"
# Col/Ler mask
maskGR <- toGRanges(data.frame(chr = c("Chr3",
rep("Chr4", times = 3)),
start = c(3000000,
131000, 170000, 7800500),
end = c(9000000,
140000, 179000, 8030000)))
save(maskGR,
file = paste0(outDir, "SB_ColLer_F2_2018_maskGR.RData"))
maskGR <- NULL
# Col/Cvi mask
maskGR <- toGRanges(data.frame(chr = c(rep("Chr1", times = 3),
rep("Chr2", times = 5),
"Chr3",
rep("Chr4", times = 2)),
start = c(3460000, 4250000, 11303000,
446500, 527100, 6500000, 16190000, 18870000,
4000000,
3125000, 4171000),
end = c(3750000, 4650000, 11330000,
457300, 538500, 10300000, 16205000, 18880000,
9500000,
3400000, 4802000)))
save(maskGR,
file = paste0(outDir, "SB_ColCvi_F2_2018_maskGR.RData"))
maskGR <- NULL
|
4c71b2ec02f185aec25dd56580a701b9a058f78b
|
bd4d459aca02be3900cbccd5f1f8a4abe1d30314
|
/tests/testthat/test-haplotypes.R
|
7c49ed5e7414eddce2c0fe18356da78dc7f9507a
|
[] |
no_license
|
augusto-garcia/onemap
|
98ff53e2825c8cf626cff6c433acd78311ac5fa3
|
d71d3c4800ddb00848a15a8635e08f8f1428bd1d
|
refs/heads/master
| 2023-07-07T02:23:26.351957
| 2022-11-25T19:27:50
| 2022-11-25T19:27:50
| 31,918,564
| 35
| 27
| null | 2023-06-28T16:57:49
| 2015-03-09T19:37:05
|
R
|
UTF-8
|
R
| false
| false
| 974
|
r
|
test-haplotypes.R
|
context("test plot haplotypes")
test_that("ordering and HMM parallel", {
test_haplo <- function(example_data, which.group, sum.counts){
eval(bquote(data(.(example_data))))
onemap_mis <- eval(bquote(filter_missing(get(.(example_data)), 0.15)))
twopt <- rf_2pts(onemap_mis)
all_mark <- make_seq(twopt,"all")
lod_sug <- suggest_lod(all_mark)
groups <- group_upgma(all_mark, expected.groups = 2, inter = F)
LG <- eval(bquote(make_seq(groups, .(which.group))))
map1 <- onemap::map(LG)
dist <- cumsum(kosambi(map1$seq.rf))
expect_equal(dist[length(dist)], 100, tolerance = 5) # The simulated distance of Chr01 is 100
counts <- progeny_haplotypes_counts(x = progeny_haplotypes(map1, most_likely = T, ind = "all"))
eval(bquote(expect_equal(sum(counts$counts), .(sum.counts))))
}
test_haplo("simu_example_bc", 1, 126)
test_haplo("simu_example_out", 1, 347)
test_haplo("simu_example_f2", 1, 216)
})
|
5cdcb496ce95471530787738faa1cede17a34b26
|
f549ca13cfe755675c45105c5bfeba89af836c14
|
/SACS_measuresNavon.R
|
570f99710ab43a5f9dfdece2c9aaef5cbec15558
|
[] |
no_license
|
TeresaWenhart/Navon
|
d682d44d4717ea9a64e6017928bf7b628fe12396
|
99a7f2783d97edaa03a1dac7f4598142e535897a
|
refs/heads/master
| 2021-09-22T19:21:36.979538
| 2018-09-14T11:30:19
| 2018-09-14T11:30:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
SACS_measuresNavon.R
|
#script to calculate the SACS measures from AGLTresults table
# SACS = Z(%)-Z(RT) Speed accuracy composite scores
# high score--> efficient performance, low score --> poor performance
# calculate this for each congruency separation condition and all trials
library("dplyr", lib.loc="/usr/local/lib/R/site-library")
Navonresults<-data.frame(Navonresults)
######### L_con ##############
ACC<-Navonresults[,1]/40
RT<-Navonresults[,2]
SACS_Lcon=scale(ACC,center = TRUE, scale=TRUE)-scale(RT,center = TRUE, scale=TRUE)
rm(ACC,RT)
######### L_incon ##############
ACC<-Navonresults[,3]/40
RT<-Navonresults[,4]
SACS_Linc=scale(ACC,center = TRUE, scale=TRUE)-scale(RT,center = TRUE, scale=TRUE)
rm(ACC,RT)
######### G_con ##############
ACC<-Navonresults[,5]/40
RT<-Navonresults[,6]
SACS_Gcon=scale(ACC,center = TRUE, scale=TRUE)-scale(RT,center = TRUE, scale=TRUE)
rm(ACC,RT)
######### G_incon ##############
ACC<-Navonresults[,7]/40
RT<-Navonresults[,8]
SACS_Ginc=scale(ACC,center = TRUE, scale=TRUE)-scale(RT,center = TRUE, scale=TRUE)
rm(ACC,RT)
######### all ###############
ACC<-(Navonresults[,1]+Navonresults[,3]+Navonresults[,5]+Navonresults[,7])/160
RT<-(Navonresults[,2]+Navonresults[,4]+Navonresults[,6]+Navonresults[,8])/4
SACS_all=scale(ACC,center = TRUE, scale=TRUE)-scale(RT,center = TRUE, scale=TRUE)
rm(ACC,RT)
Navon_SACS<-cbind(SACS_Lcon,SACS_Linc,SACS_Gcon,SACS_Ginc,SACS_all)
Navon_SACS<-data.frame(Navon_SACS)
colnames(Navon_SACS)<-c("vis_SACS_Lcon","vis_SACS_Linc","vis_SACS_Gcon","vis_SACS_Ginc","vis_SACS_all")
rownames(Navon_SACS)<-rownames(Navonresults)
#ggf
Navonresults<-cbind(Navonresults,Navon_SACS)
|
88e7d0290e5d3cae3091d4d58ff50378efcaa00b
|
b5dd41686cf7a4c1cc141b3b77064b981ee8e3f1
|
/code/group 31_project.R
|
87fb87e7172d7247ef13b0a5abb9ed8269615f60
|
[] |
no_license
|
miftahulridwan/Airbnb-price-and-superhost
|
dcf5e1d09fa3ab1a527774315f46b3a9be7bf282
|
bfa5942e796113c9c430be454d862601bdc16329
|
refs/heads/master
| 2022-11-10T08:40:28.240408
| 2020-06-10T19:13:36
| 2020-06-10T19:13:36
| 270,871,264
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,376
|
r
|
group 31_project.R
|
### Title : Research Skills: Programming with R
### Group Number : 31
### Group Member :
### 1. Adrian Konja (SNR: 2037123 | ANR: u242159)
### 2. Fernando Catalan Resquin (SNR: 2048042 | ANR: u270800)
### 3. George Satyridis (SNR: 2046944 | ANR: u836891)
### 4. Konstantinos Soiledis (SNR: 2037001 | ANR: u226393)
### 5. Miftahul Ridwan (SNR: 2040778 | ANR: u989303)
### Created : 14 - Dec - 2019
### Last Modified : 09 - Jan - 2020
## Loading the Necessary Library -----------------------------------------------
require(dplyr) # Used primarily in data cleaning
require(tidyr) # Used primarily in data cleaning
require(ggplot2) # Used to create graphs, primarily in EDA
require(caret) # Used to implement Machine learning algorithms for prediction
require(mice) # Used for imputation
require(ranger) # Used for feature selection with Random forests
## Loading the Dataset ---------------------------------------------------------
wd <- getwd()
setwd(wd)
hawai <- read.csv("./listings.csv", na.strings = c("", "NA"),
stringsAsFactors = FALSE)
## Data Cleaning ---------------------------------------------------------------
### Define Necessary Function ---------------------------------------------- ###
reformat <- function(strings){
strings <- gsub(strings, pattern = "_", replacement = ".")
strings <- gsub(strings, pattern = "[$]|[%]|NA|[*]", replacement = "")
strings <- toupper(strings)
}
### Tidying the Data ------------------------------------------------------- ###
hawai_col <- hawai %>%
# Selec for specific column
select(c(23, 26, 27, 29, 37, 45, 52, 53:58, 61, 66:68, 85, 87:93, 99, 102,
106)) %>%
# Applying predefined function
select_all(funs(reformat)) %>%
mutate_all(funs(reformat)) %>%
# Change column type to integer
mutate_at(c('REVIEW.SCORES.RATING','BATHROOMS', 'EXTRA.PEOPLE',
'HOST.RESPONSE.RATE', 'PRICE', 'REVIEWS.PER.MONTH',
'CALCULATED.HOST.LISTINGS.COUNT'),
as.integer) %>%
# Change column type to numeric
mutate_at(c('BEDS', 'BEDROOMS','GUESTS.INCLUDED', 'MINIMUM.NIGHTS',
'REVIEW.SCORES.RATING', 'REVIEW.SCORES.ACCURACY',
'ACCOMMODATES', 'REVIEW.SCORES.CLEANLINESS',
'REVIEW.SCORES.CHECKIN', 'REVIEW.SCORES.LOCATION',
'REVIEW.SCORES.COMMUNICATION', 'REVIEW.SCORES.VALUE'),
as.numeric) %>%
# Factorize column
mutate_at(c('FIRST.REVIEW', 'MARKET', 'PROPERTY.TYPE',
'HOST.RESPONSE.TIME', 'CANCELLATION.POLICY'),
as.factor) %>%
# Filter for row which is not NA
filter(!is.na(BATHROOMS), # 11
!is.na(MARKET), #14
!is.na(BEDS), # 44
!is.na(PRICE), # 1029
!is.na(HOST.SINCE), #276 NA 's !
!is.na(HOST.RESPONSE.TIME), # 276 !
!is.na(HOST.RESPONSE.RATE), # 1476 !
!is.na(BEDROOMS), # 19
!is.na(HOST.IS.SUPERHOST), # 276!
!is.na(HOST.IDENTITY.VERIFIED), #276 NA as well
!is.na(HOST.IDENTITY.VERIFIED)) #276
### Missing Data Imputation ------------------------------------------------ ###
miss_method <- rep("", ncol(hawai_col))
names(miss_method) <- names(hawai_col)
miss_method["FIRST.REVIEW"] <- "pmm"
miss_method[c("REVIEW.SCORES.RATING", "REVIEW.SCORES.ACCURACY",
"REVIEW.SCORES.CLEANLINESS", "REVIEW.SCORES.CHECKIN",
"REVIEW.SCORES.COMMUNICATION", "REVIEW.SCORES.LOCATION",
"REVIEW.SCORES.VALUE", "REVIEWS.PER.MONTH")] <- "norm"
miss_pred <- quickpred(hawai_col, mincor = 0.25)
miceOut <- mice(data = hawai_col,
m = 5,
maxit = 10,
method = miss_method,
predictorMatrix = miss_pred,
seed = 42)
clean_hawai <- complete(miceOut)
clean_hawai <- clean_hawai %>%
mutate(REVIEW.SCORES.RATING = ifelse(REVIEW.SCORES.RATING >= 100, 10,
REVIEW.SCORES.RATING/10)) %>%
mutate_at(c('HOST.SINCE', 'FIRST.REVIEW'), as.Date, format = '%Y-%m-%d') %>%
mutate(HOST.SINCE = as.numeric(format(HOST.SINCE,'%Y'))) %>%
mutate(FIRST.REVIEW = as.numeric(format(FIRST.REVIEW, '%Y')),
FIRST.REVIEW = 2019 - FIRST.REVIEW) %>%
mutate(YEARS.HOST = as.integer(2019 - HOST.SINCE)) %>%
mutate_if(is.numeric, as.integer) %>%
mutate_if(is.character, as.factor) %>%
select(-HOST.SINCE) %>%
droplevels() #Dropping unused factor levels
str(clean_hawai)
## Exploratory Data Analysis ---------------------------------------------------
'We are using market as our main geographical variable, and we are filtering
some observations that contain sparse locations, and are not in Hawai'
clean_hawai_EDA <- clean_hawai %>%
filter(MARKET == "OAHU" | MARKET == "MAUI" | MARKET == "KAUAI" |
MARKET == "THE BIG ISLAND")
### -------------------- Section Property Features --------------------- ###
### Fig 01: Nr. of Listings per Market ------------------------------------- ###
# A bar chart is appropriate for counting values
fig01 <- ggplot(data = clean_hawai_EDA, aes(x = MARKET)) +
geom_bar() +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "Nr. of Listings",
breaks = c(0, 1000, 2000, 3000, 4000, 5000, 6000, 7000)) +
labs (title = "Fig 01: Nr. of Listings per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 02: Nr. of Listings per Property Type ------------------------------ ###
fig02 <- ggplot(data = clean_hawai_EDA, aes(x = PROPERTY.TYPE)) +
geom_bar() +
scale_x_discrete(name = "Property Type") +
scale_y_continuous(name = "Nr. of Listings") +
labs (title = "Fig 02: Nr. of Listings per Property Type") +
theme(plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1))
### Fig 03: Nr. of Listings per Main Property Type ------------------------- ###
# Aggregate the many less frequent property types into "Other"
hawai_property_grouped <- clean_hawai_EDA %>%
within(PROPERTY.TYPE[(PROPERTY.TYPE != 'APARTMENT') &
(PROPERTY.TYPE != 'CONDOMINIUM') &
(PROPERTY.TYPE != 'HOUSE')] <- 'OTHER')
fig03 <- ggplot(data = hawai_property_grouped, aes(x = PROPERTY.TYPE)) +
geom_bar() +
scale_x_discrete(name = "Property Type") +
scale_y_continuous(name = "Nr. of Listings") +
labs (title = "Fig 03: Nr. of Listings per Main Property Type") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 04: Mix of Main Property Type per Market --------------------------- ###
# A stacked 100% bar chart is appropriate for showing the percentage split of a
#variable within another
fig04 <- ggplot(data = hawai_property_grouped, aes(x = MARKET)) +
geom_bar(aes(fill = PROPERTY.TYPE), position = "fill") +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "% of Listings", labels = scales::percent) +
scale_fill_discrete(name = "Property Type") +
labs (title = "Fig 04: Mix of Main Property Types per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 05: Nr. of Listings per Room Type ---------------------------------- ###
fig05 <- ggplot(data = clean_hawai_EDA, aes(x = ROOM.TYPE)) +
geom_bar() +
scale_x_discrete(name = "Room Type") +
scale_y_continuous(name = "Nr. of Listings",
breaks = c(0, 2500, 5000, 7500, 10000, 12500, 15000,
17500, 20000)) +
labs (title = "Fig 05: Nr. of Listings per Room Type") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 06: Median Minimum Nights per Market ------------------------------- ###
# To decrease the influence of outliers in an unlimited scale, use median
#instead of mean. A point chart is appropriate for showing specific calculated
#measures
market_nights_medians <- clean_hawai_EDA %>%
group_by(MARKET) %>%
dplyr::summarise(median.nights = median(MINIMUM.NIGHTS))
fig06 <- ggplot(data = market_nights_medians,
aes(x = reorder(MARKET, -median.nights), y = median.nights)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "Median Minimum Nights", limits = c(0, NA),
breaks = c(0, 1, 2, 3)) +
labs (title = "Fig 06: Median Minimum Nights per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 07: Median Minimum Nights per Main Property Type ------------------- ###
property_nights_medians <- hawai_property_grouped %>%
group_by(PROPERTY.TYPE) %>%
dplyr::summarise(median.nights = median(MINIMUM.NIGHTS))
fig07 <- ggplot(data = property_nights_medians, aes(x = reorder(PROPERTY.TYPE,
-median.nights),
y = median.nights)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Property Type") +
scale_y_continuous(name = "Median Minimum Nights",
limits = c(0, NA), breaks = c(0, 1, 2, 3)) +
labs (title = "Fig 07: Median Minimum Nights per Main Property Type") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 08: Median Minimum Nights per Room Type ---------------------------- ###
room_nights_medians <- clean_hawai_EDA %>%
group_by(ROOM.TYPE) %>%
dplyr::summarise(median.nights = median(MINIMUM.NIGHTS))
fig08 <- ggplot(data = room_nights_medians,
aes(x = reorder(ROOM.TYPE, -median.nights),
y = median.nights)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Room Type") +
scale_y_continuous(name = "Median Minimum Nights", limits = c(0, NA),
breaks = c(0, 1, 2, 3)) +
labs (title = "Fig 08: Median Minimum Nights per Room Type") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 09: Median Nr. of Guests Accommodated per Market ------------------- ###
market_guests_medians <- hawai_property_grouped %>%
group_by(MARKET) %>%
dplyr::summarise(median.guests = median(ACCOMMODATES))
fig09 <- ggplot(data = market_guests_medians,
aes(x = reorder(MARKET, -median.guests),
y = median.guests)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "Median Nr. of Guests Accommodated",
limits = c(0, NA)) +
labs (title = "Fig 09: Median Nr. of Guests Accommodated per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 10: Median Nr. of Guests Accommodated per Main Property Type ------- ###
property_guests_medians <- hawai_property_grouped %>%
group_by(PROPERTY.TYPE) %>%
dplyr::summarise(median.guests = median(ACCOMMODATES))
fig10 <- ggplot(data = property_guests_medians,
aes(x = reorder(PROPERTY.TYPE, -median.guests),
y = median.guests)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Property Type") +
scale_y_continuous(name = "Median Nr. of Guests Accommodated",
limits = c(0, NA)) +
labs (title = "Fig 10: Median Nr. of Guests Accommodated per
Main Property Type") +
theme(plot.title = element_text(hjust = 0.5))
### ---------------------- Section Host Features ----------------------- ###
### Fig 11: Mean Response Rate per Market ---------------------------------- ###
RR_market_means <- clean_hawai_EDA %>%
group_by(MARKET) %>%
dplyr::summarise(mean.RR = mean(HOST.RESPONSE.RATE))
fig11 <- ggplot(data = RR_market_means, aes(x = MARKET, y = mean.RR)) +
geom_bar(stat ="identity") +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "Mean Response Rate (%)",
breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)) +
labs (title = "Fig 11: Mean Response Rate per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 12: Mean Review Scores Rating per Market --------------------------- ###
review_market_means <- clean_hawai_EDA %>%
group_by(MARKET) %>%
dplyr::summarise(mean.review = mean(REVIEW.SCORES.RATING))
fig12 <- ggplot(data = review_market_means, aes(x = MARKET, y = mean.review)) +
geom_point(stat ="identity", size = 5) +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "Mean Review Scores Rating",
breaks = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
limits = c(0, 10)) +
labs (title = "Fig 12: Mean Review Scores Rating per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 13: Mix of Cancellation Policy Types per Market -------------------- ###
fig13 <- ggplot(data = clean_hawai_EDA, aes(x = MARKET)) +
geom_bar(aes(fill = CANCELLATION.POLICY), position = "fill") +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "% of Listings", labels = scales::percent) +
scale_fill_discrete(name = "Cancellation Policy Type") +
labs (title = "Fig 13: Mix of Cancellation Policy Types per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 14: Mix of Response Times per Superhost Status --------------------- ###
fig14 <- ggplot(data = clean_hawai_EDA, aes(x = HOST.IS.SUPERHOST)) +
geom_bar(aes(fill = HOST.RESPONSE.TIME), position = "fill") +
scale_x_discrete(name = "Superhost Status", limits = c("T", "F"),
labels = c("Yes", "No")) +
scale_y_continuous(name = "% of Listings", labels = scales::percent) +
scale_fill_discrete(name = "Response Time") +
labs (title = "Fig 14: Mix of Response Times per Superthost Status") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 15: Mean Response Rate per Superhost Status ------------------------ ###
RR_superhost_means <- clean_hawai_EDA %>%
group_by(HOST.IS.SUPERHOST) %>%
dplyr::summarise(mean.RR = mean(HOST.RESPONSE.RATE))
fig15 <- ggplot(data = RR_superhost_means,
aes(x = HOST.IS.SUPERHOST, y = mean.RR)) +
geom_bar(stat ="identity") +
scale_x_discrete(name = "Superhost Status", limits = c("T", "F"),
labels = c("Yes", "No")) +
scale_y_continuous(name = "Mean Response Rate (%)",
breaks = c(0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100)) +
labs (title = "Fig 15: Mean Response Rate per Superhost Status") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 16: Mean Review Scores Rating per Superhost Status ----------------- ###
review_superhost_means <- clean_hawai_EDA %>%
group_by(HOST.IS.SUPERHOST) %>%
dplyr::summarise(mean.review = mean(REVIEW.SCORES.RATING))
fig16 <- ggplot(data = review_superhost_means, aes(x = HOST.IS.SUPERHOST,
y = mean.review)) +
geom_point(stat ="identity", size = 5) +
scale_x_discrete(name = "Superhost Status", limits = c("T", "F"),
labels = c("Yes", "No")) +
scale_y_continuous(name = "Mean Review Scores Rating",
breaks = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
limits = c(0, 10)) +
labs (title = "Fig 16: Mean Review Scores Rating per Superhost Status") +
theme(plot.title = element_text(hjust = 0.5))
### --------------------- Section Price Analysis ----------------------- ###
### Fig 17: Median Price per Market----------------------------------------- ###
market_price_medians <- hawai_property_grouped %>%
group_by(MARKET) %>%
dplyr::summarise(median.price = median(PRICE))
fig17 <- ggplot(data = market_price_medians,
aes(x = reorder(MARKET, -median.price), y = median.price)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Market") +
scale_y_continuous(name = "Median Price", limits = c(0, NA),
breaks = c(0, 50, 100, 150, 200, 250, 300)) +
labs (title = "Fig 17: Median Price per Market") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 18: Median Price per Property Type---------------------------------- ###
property_price_medians <- clean_hawai_EDA %>%
group_by(PROPERTY.TYPE) %>%
dplyr::summarise(median.price = median(PRICE))
fig18 <- ggplot(data = property_price_medians,
aes(x = reorder(PROPERTY.TYPE, -median.price),
y = median.price)) +
geom_point(stat="identity", size = 3) +
scale_x_discrete(name = "Property Type") +
scale_y_continuous(name = "Median Price", limits = c(0, NA)) +
labs (title = "Fig 18: Median Price per Property Type") +
theme(plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(angle = 90, hjust = 1))
### Fig 19: Median Price per Main Property Type----------------------------- ###
main_property_price_medians <- hawai_property_grouped %>%
group_by(PROPERTY.TYPE) %>%
dplyr::summarise(median.price = median(PRICE))
fig19 <- ggplot(data = main_property_price_medians,
aes(x = reorder(PROPERTY.TYPE, -median.price), y = median.price)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Main Property Type") +
scale_y_continuous(name = "Median Price", limits = c(0, NA),
breaks = c(0, 50, 100, 150, 200, 250)) +
labs (title = "Fig 19: Median Price per Main Property Type") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 20: Median Price per Room Type-------------------------------------- ###
room_price_medians <- clean_hawai_EDA %>%
group_by(ROOM.TYPE) %>%
dplyr::summarise(median.price = median(PRICE))
fig20 <- ggplot(data = room_price_medians,
aes(x = reorder(ROOM.TYPE, -median.price), y = median.price)) +
geom_point(stat="identity", size = 5) +
scale_x_discrete(name = "Room Type") +
scale_y_continuous(name = "Median Price", limits = c(0, NA),
breaks = c(0, 50, 100, 150, 200, 250)) +
labs (title = "Fig 20: Median Price per Room Type") +
theme(plot.title = element_text(hjust = 0.5))
### Fig 21: Median Price per Nr. of Guest Accommodated --------------------- ###
guest_price_medians <- clean_hawai_EDA %>%
group_by(ACCOMMODATES) %>%
dplyr::summarise(median.price = median(PRICE))
# Since both axis carry continuous variables,
#we can fit a locally fitted regression line
fig21 <- ggplot(data = guest_price_medians,
aes(x = ACCOMMODATES, y = median.price)) +
geom_point(stat="identity", size = 4) +
geom_smooth() +
scale_x_continuous(name = "Nr. of Guests Accommodated",
breaks = c(1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16)) +
scale_y_continuous(name = "Median Price", limits = c(0, NA),
breaks = c(0, 100, 200, 300, 400, 500, 600, 700)) +
labs (title = "Fig 21: Median Price per Nr. of Guests Accommodated") +
theme(plot.title = element_text(hjust = 0.5))
### ---------------------------- End of EDA ---------------------------- ###
## Feature selection------------------------------------------------------------
'The function below performs fearute selection using Random forests algorithm
It returns a matrix with accuracy scores and corresponding number of variables
It also returns an object to be used in later classification algorithms called
final_vars'
feature_selection <- function(dataset, rf_class, eval_type){
set.seed(123)
hawai_final_rf <- dataset
#Splitting data
ind = createDataPartition(get(rf_class, dataset), p = 0.8, list = FALSE)
train_X = hawai_final_rf[ind,]
train_Y = get(rf_class, hawai_final_rf)[ind]
test_X = hawai_final_rf[-ind,]
test_Y = get(rf_class, hawai_final_rf)[-ind]
#Delete class column from X
train_X <- select(train_X, -rf_class)
test_X <- select(test_X, -rf_class)
#Preparing matrix for model evaluation
variable_names <- colnames(hawai_final_rf)
results_rf <- matrix(nrow = length(train_X) - 1 , ncol = 3)
#Conditional statement for eval_type.
if (eval_type == 1){
colnames(results_rf) <- c("Accuracy", "No Variables", "Variables")
} else if (eval_type == 2){
colnames(results_rf) <- c("Kappa", "No Variables", "Variables")
} else {
return(print("Wrong eval_type"))
}
#Creating a copy for the while loop
train_X2 <- train_X
i <- 1
#Train, Predict, Evaluating model
while (i < length(variable_names) - 1) {#Excluding models with only 1 variable
rf <- ranger(train_Y ~ ., data = train_X2,
importance = "impurity", mtry = 1)
predicted_Y <- predict(object = rf, data = test_X)
evaluate <- confusionMatrix(test_Y, predicted_Y$predictions)
results_rf[i, 1] = as.numeric(round(evaluate$overall[eval_type], 4))
results_rf[i, 2] = as.numeric(rf$num.independent.variables)
results_rf[i, 3] = paste(names(sort(rf$variable.importance,
decreasing = T)), collapse = " , ")
togo <- which.min(rf$variable.importance)
train_X2 <- select(train_X2,-togo)
i = i + 1
}
#Getting the most important variables according to evaluation metric
place <- which.max(results_rf[, 1])
cols <- strsplit(results_rf[place, 3], split = " , ")
final_vars <- as.formula(paste(paste0(rf_class, " ~ "),
paste0(cols$Variables, collapse = " + ")))
print(results_rf[, 1:2])
return(final_vars)
}
## Logistic regression----------------------------------------------------------
#Creating Test and train set
set.seed(23)
trn_index = createDataPartition(y = clean_hawai$HOST.IS.SUPERHOST, p = 0.80,
list = FALSE)
trn_data = clean_hawai[trn_index, ]
tst_data = clean_hawai[-trn_index, ]
### Logistic Regression Baseline Model ------------------------------------- ###
# Train baseline model
set.seed(42)
'We set warnings off for the moment, since warnings are produced from very few
observations in specific levels of some factors. That, combined with CV, forces
LogReg to produce NA coefficients. These coefficients however are of low impor-
tance and rarely contribute, so instead of discarding the whole factor, we dec-
ided to keep it as it is. After all, multicollinearity does not affect predict-
ion resutls.'
options(warn = -1)
host_lgr_baseline <- train(HOST.IS.SUPERHOST ~ .,
method = "glm",
family = binomial(link = "logit" ),
data = trn_data,
trControl = trainControl(method = 'cv',
number = 10))
#Checking coefficients
summary(host_lgr_baseline)
# Testing the performance on the test set
predicted_outcome_bl <- predict(host_lgr_baseline, tst_data)
# Confusion Matrix
host_confM_bl <- confusionMatrix(table(predicted_outcome_bl,
tst_data$HOST.IS.SUPERHOST))
options(warn = 1)
# Recall: 0.8159393
host_confM_bl$byClass["Recall"]
# Precision: 0.7750541
host_confM_bl$byClass["Precision"]
# Accuracy: 0.7393655
host_confM_bl$overall["Accuracy"]
####------------------Plotting variable importance---------------------------###
#Calculating the importance of the variables
var_importance_b <- varImp(host_lgr_baseline)
#Plot of the importance of each variable sorted by importance
fig22 <- plot(var_importance_b, ylab = 'Variables', asp = 2, type = 's',
main = "Fig 22. Variable importance - Baseline model")
### Logistic Regression Embedded Model ------------------------------------- ###
log_reg_vars <- feature_selection(clean_hawai, "HOST.IS.SUPERHOST", 2)
# Train the Embedded Model
set.seed(42)
options(warn = -1) #Same as above
host_lgr_embedded <- train(log_reg_vars,
method = "glm",
family = binomial(link = "logit" ),
data = trn_data,
trControl = trainControl(method = 'cv', number = 10))
#Checking coefficients
summary(host_lgr_embedded)
# Testing the performance on the test set
predicted_outcome_emb <- predict(host_lgr_embedded, tst_data)
# Confusion Matrix
host_confM_emb <- confusionMatrix(table(predicted_outcome_emb,
tst_data$HOST.IS.SUPERHOST))
options(warn = 1)
# Recall: 0.8136622
host_confM_emb$byClass["Recall"]
# Precision: 0.7715005
host_confM_emb$byClass["Precision"]
# Accuracy: 0.7353702
host_confM_emb$overall["Accuracy"]
####------------------Plotting variable importance---------------------------###
#Calculating the importance of the variables
var_importance_e <- varImp(host_lgr_embedded)
#Plot of the importance of each variable sorted by importance
fig23 <- plot(var_importance_e, ylab = 'Variables', asp = 2, type = 's',
main = "Fig 23. Variable importance - Embedded method model")
## k - Nearest Neighbours-------------------------------------------------------
'Since we are using classification algorithm, we discretize price variable into
4 clusters, namely LOW, MEDIUM.LOW, MEDIUM.HIGH, HIGH'
fig_24 <- hist(clean_hawai$PRICE,
main ="Fig 24. Histogram of Price",
xlab = "Hawaiian Airbnb Prices")
# Creating dataset for k-NN
clean_hawai_knn <- clean_hawai %>%
mutate(PRICE = cut_number(PRICE, n = 4),
PRICE = ifelse(PRICE == "[0,125]", "LOW",
ifelse(PRICE == "(125,195]", "MEDIUM.LOW",
ifelse(PRICE == "(195,305]", "MEDIUM.HIGH",
ifelse(PRICE == "(305,999]", "HIGH", "")
)))) %>%
mutate(PRICE = factor(PRICE, levels = c("LOW", "MEDIUM.LOW", "MEDIUM.HIGH",
"HIGH")))
# Price Category after discretization
fig_25 <- clean_hawai_knn %>%
group_by(PRICE) %>%
summarise(NUMBER = n()) %>%
ggplot(aes(x = PRICE, y = NUMBER)) +
geom_bar(stat = "identity") +
scale_x_discrete(name = "Price Category",
limits = c("LOW", "MEDIUM.LOW", "MEDIUM.HIGH", "HIGH"),
labels = c("LOW", "MEDIUM - LOW", "MEDIUM - HIGH", "HIGH")) +
scale_y_continuous(name = "Number of Listing") +
ggtitle("Fig 25. Number of Listing per Price Category") +
theme(plot.title = element_text(hjust = 0.5, face = "bold"))
### Split train-test data -------------------------------------------------- ###
set.seed(42)
clean_hawai_frac = sample_frac(clean_hawai_knn, size = 0.8, replace = FALSE)
knn_idx <- createDataPartition(clean_hawai_frac$PRICE, p = 0.8, list = FALSE)
trn_knn <- clean_hawai_frac[knn_idx, ]
tst_knn <- clean_hawai_frac[-knn_idx, ]
### k-NN for Baseline Model ------------------------------------------------ ###
set.seed(42)
# Train Baseline Model
baseline_knn <- train(PRICE ~ .,
method = "knn",
data = trn_knn,
trControl = trainControl(method = "cv", number = 5),
metric = "Accuracy",
tuneGrid = expand.grid(k = 4:8))
# Plotting Variable Importance
fig_26 <- ggplot(varImp(baseline_knn)) +
geom_histogram(stat = "identity") +
ggtitle(
"Fig 26. Variable Importance in Predicting Price using baseline k-NN model"
) +
theme(plot.title = element_text(hjust = 0.5, face = "bold"))
# Testing the performance on the test set
baseline_knn_pred <- predict(baseline_knn, tst_knn)
baseline_knn_cm <- confusionMatrix(baseline_knn_pred, tst_knn$PRICE)
# Accuracy = 0.5994
baseline_knn_cm$overall["Accuracy"]
### k-NN Embedded Model ---------------------------------------------------- ###
knn_vars <- feature_selection(clean_hawai_frac, "PRICE", 1)
# Train Embedded Model
set.seed(42)
embedded_knn <- train(knn_vars,
method = "knn",
data = trn_knn,
trControl = trainControl(method = "cv", number = 5),
metric = "Accuracy",
tuneGrid = expand.grid(k = 4:8))
# Plotting Variable Importance
fig_27 <- ggplot(varImp(embedded_knn)) +
geom_histogram(stat = "identity") +
ggtitle(
"Fig 27. Variable Importance in Predicting Price using embedded k-NN model"
) +
theme(plot.title = element_text(hjust = 0.5, face = "bold"))
# Testing the performance on the test set
embedded_knn_pred <- predict(embedded_knn, tst_knn)
embedded_knn_cm <- confusionMatrix(embedded_knn_pred, tst_knn$PRICE)
# Accuracy = 0.613
embedded_knn_cm$overall["Accuracy"]
|
1ce717edb4405c9f712d3703d440b3c8a58fdc78
|
79b319935938bcd2cea0b936f0225dbcbd133070
|
/cachematrix.R
|
bc78a32791e34948a4fe57827b306c5ad3bd5642
|
[] |
no_license
|
majeedfarooq/ProgrammingAssignment2
|
91cf52141285a331e6ce73ec07342abee9e61b65
|
1c42bc0e119c877452f07f8252d908685f0b0af0
|
refs/heads/master
| 2021-01-16T19:35:46.151164
| 2016-07-02T06:03:00
| 2016-07-02T06:03:00
| 62,430,558
| 0
| 0
| null | 2016-07-02T02:41:44
| 2016-07-02T02:41:44
| null |
UTF-8
|
R
| false
| false
| 1,274
|
r
|
cachematrix.R
|
## Stores any invertible matrix and its inverse into cache
## If the matrix is reused to find inverse cache is checked first
## if inverse matrix is found in the cache, inverse is not
##re-calculated and the cached inverse matrix is used instead.
## This function takes a matrix as input. Stores the matrix in a
##cached varaible. creats the Set and get function that sets and
#retrieves this cached value of input matrix. This functions
## also sets and gets the cached values of inverse of the input
##matrix.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
set_inverse <- function(inv) m <<- inv
get_inverse <- function() m
list(set = set, get = get,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
##This function calculates the inverse of an invertible matrix
##created through the makeCacheMatrix function if it has not
##already been calculated
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
m<- x$get_inverse()
if (!is.null(m)){
return (m)
}
data<-x$get()
m1<-solve(data)
x$set_inverse(m1)
m1
}
|
c98a405371e485341284ce75e91d5e6c6b76c5db
|
16cad2b04656d88d3a76c6dd70660862a87aef1e
|
/Part 4 - Clustering/Section 25 - Hierarchical Clustering/HierarchicalClustering.R
|
89f2e6057434ebb4a60171a9f1e5dd2b3e885ccc
|
[] |
no_license
|
ytnvj2/Machine_Learning_AZ
|
7d4bfdd01eb7aa5269b5521e575204a960afc0db
|
2bc3c0803b48642b4e0290f9df2cf46308a5b2a5
|
refs/heads/master
| 2020-03-11T11:01:39.372379
| 2018-04-24T10:37:44
| 2018-04-24T10:37:44
| 129,955,508
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
HierarchicalClustering.R
|
dataset=read.csv('Mall_Customers.csv')
X=dataset[4:5]
# dendrogram
dendrogram=hclust(dist(X,method = 'euclidean'),method = 'ward.D')
plot(dendrogram, main = paste('Dendrogram'),xlab = 'Customers',ylab = 'Euclidean Distance')
# fitting X to HC
hc=hclust(dist(X,method = 'euclidean'),method = 'ward.D')
y_hc=cutree(hc,k=5)
# visualizing HC result
library(cluster)
clusplot(X,y_hc,lines = 0,shade = TRUE,color = TRUE,plotchar = FALSE,span = TRUE)
|
f108edd76057cf88fc375fce29c97bebfa004e5f
|
d65e2884871291bc757d619383998761884938e7
|
/script.R
|
d540e037b7b2814bbe3db41d3b452571049e31bc
|
[] |
no_license
|
schepens83/rasp-pi-fitbit
|
a923656493ef1579300b62625f9471c1b7884521
|
a1078167c221753e6c05761130eea4a59368667a
|
refs/heads/master
| 2021-05-13T21:26:11.030638
| 2019-06-28T18:17:58
| 2019-06-28T18:17:58
| 116,463,321
| 0
| 0
| null | 2020-07-28T04:11:45
| 2018-01-06T07:26:47
|
R
|
UTF-8
|
R
| false
| false
| 7,696
|
r
|
script.R
|
source("init.R")
# GLOBAL FILTERS ----------------------------------------------------------
mnth = 4
daily <- daily %>%
filter(date > Sys.Date() - months(mnth))
# sleep_by_hr <- sleep_by_hr %>%
# filter(sleepdate > Sys.Date() - months(mnth))
sleep_detailed <- sleep_detailed %>%
filter(sleepdate > Sys.Date() - months(mnth))
sleep_summaries <- sleep_summaries %>%
filter(dateOfSleep > Sys.Date() - months(mnth))
# CHARTS INTRADAY ------------------------------------------------------------------
# intraday steps
intraday %>%
filter(as.Date(datetime) > today() - days(3)) %>%
mutate(Date = as.character(as.Date(datetime))) %>%
ggplot() +
geom_area(aes(update(datetime, year = 2020, month = 1, day = 1), cum_steps, alpha = Date), fill = step_color, color = "black", position = "dodge") +
geom_line(aes(update(datetime, year = 2020, month = 1, day = 1), steps), color = "black", position = "dodge", size = 0.3) +
labs(title = ("Steps Last 3 Days"), x = "Time (hrs)", y = "Steps") +
scale_x_datetime(breaks=date_breaks("6 hour"), labels=date_format("%H:%M")) +
facet_wrap(~ reorder(format(as.Date(datetime), "%A"), datetime)) +
theme_light() +
theme(legend.position = "bottom")
ggsave("charts/steps-intraday.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
# intraday calories
# label <- intraday %>%
# summarise(
# datetime = max(update(datetime, year = 2020, month = 1, day = 1)),
# calories = max(calories),
# label = "test"
# )
intraday %>%
filter(as.Date(datetime) > today() - days(3)) %>%
mutate(Date = as.character(as.Date(datetime))) %>%
ggplot() +
# geom_area(aes(update(datetime, year = 2020, month = 1, day = 1), cum_calories, alpha = Date), fill = calory_color, color = "black", position = "dodge") +
# geom_line(aes(update(datetime, year = 2020, month = 1, day = 1), calories), color = "black", position = "dodge", size = 0.3) +
geom_area(aes(update(datetime, year = 2020, month = 1, day = 1), calories, alpha = Date), color = "black", fill = calory_color, position = "dodge") +
facet_wrap(~ reorder(format(as.Date(datetime), "%A"), datetime)) +
# geom_text(aes(label = label), data = label, vjust = "top", hjust = "right") +
scale_x_datetime(breaks=date_breaks("6 hour"), labels=date_format("%H:%M")) +
theme_light() +
theme(legend.position = "bottom") +
labs(title = ("Calories Spent Last 3 Days"), x = "Time (hrs)", y = "Calories")
ggsave("charts/cal-intraday.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
# on top of each other
intraday %>%
filter(as.Date(datetime) > today() - days(4)) %>%
mutate(Date = as.character(as.Date(datetime))) %>%
ggplot() +
geom_line(aes(update(datetime, year = 2020, month = 1, day = 1), cum_calories, alpha = Date), color = calory_color, position = "dodge", size = 2) +
# geom_line(aes(update(datetime, year = 2020, month = 1, day = 1), calories), color = "black", position = "dodge", size = 0.3) +
# geom_area(aes(update(datetime, year = 2020, month = 1, day = 1), calories, alpha = Date), color = "black", fill = calory_color, position = "dodge") +
# facet_wrap(~ reorder(format(as.Date(datetime), "%A"), datetime)) +
# geom_text(aes(label = label), data = label, vjust = "top", hjust = "right") +
scale_x_datetime(breaks=date_breaks("6 hour"), labels=date_format("%H:%M")) +
scale_y_continuous(position = "right", breaks = extended_breaks(15)) +
theme_light() +
theme(legend.position = "bottom") +
labs(title = ("Calories over the Day - Last 4 Days"), x = "Time (hrs)", y = "Calories")
ggsave("charts/cal-intraday-cum.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
# CHARTS DAILY ------------------------------------------------------
# mutli month calories
daily %>%
filter(date != today) %>%
ggplot(aes(date, calories)) +
geom_point(aes(color = calories, shape = workday), size = 2) +
geom_line(alpha = 1/3) +
theme(legend.position = "bottom") +
# scale_color_continuous("BrBG") +
# scale_color_gradient_tableau("BrBG") +
scale_color_gradient(low="brown", high="Green", guide = "none") +
geom_smooth(se = FALSE, method = "loess") +
theme_light() +
theme(legend.position = "bottom") +
labs(title = "Calories per Day", x = "Date", y = "Calories")
ggsave("charts/cal-date.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
daily %>%
filter(date != today) %>%
ggplot(aes(x = reorder(format(date, "%b"), date), calories)) +
geom_violin(alpha = 1/4, fill = calory_color) +
geom_jitter(aes(color = workday, shape = vacation), alpha = 2/4) +
labs(title = "Calories spent per day", x = "Time", y = "Calories") +
scale_color_calc() +
theme_light()
ggsave("charts/cal-day2.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
# CHARTS SLEEP SUMARIES-------------------------------------------------------------
sleep_summaries %>%
filter(type == "stages") %>%
filter(hoursAsleep > 5 ) %>%
filter(dateOfSleep > "2017-12-01") %>%
ggplot(aes(dateOfSleep, hoursAsleep)) +
geom_col(aes(fill = hoursAsleep)) +
geom_line(aes(y = hoursAwake, color = hoursAwake), size = 1) +
scale_fill_continuous_tableau(c("Blue")) +
scale_color_continuous_tableau("Red") +
geom_smooth(se = FALSE, method = "loess", color = trend_color, size = 0.5, alpha = 0.2) +
theme_light() +
theme(legend.position = "bottom") +
labs(title = "Sleep Trends", x = "Time", y = "Hours", color = "Hours Awake", fill = "Hours Asleep")
ggsave("charts/sleep-multiday.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
sleep_summaries %>%
filter(type == "stages") %>%
filter(hoursAsleep > 5 ) %>%
ggplot(aes(dateOfSleep, hoursAwake)) +
geom_col(aes(fill = hoursAwake)) +
scale_fill_continuous_tableau(c("Red")) +
geom_smooth(se = FALSE, method = "loess", color = trend_color, size = 1) +
theme_light() +
theme(legend.position = "bottom") +
labs(title = "Hours awake during the night", x = "time", y = "Hours Awake", color = "Hours Awake")
ggsave("charts/sleep-awake-multiday.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
sleep_summaries %>%
filter(type == "stages") %>%
filter(hoursAsleep > 5 ) %>%
ggplot(aes(dateOfSleep, perc_awake)) +
geom_point(aes(color = perc_awake), size = 2.5, alpha = 0.8) +
geom_line(aes(color = perc_awake), alpha = 1/4, size = 2.5) +
scale_color_continuous_tableau("Red") +
geom_smooth(se = FALSE, method = "loess", color = trend_color) +
theme_light() +
theme(legend.position = "bottom") +
labs(title = "Percentage awake", x = "time", y = "% Awake", color = "% Awake")
ggsave("charts/sleep-perc-awake-multiday.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
# CHARTS SLEEP DETAILED -------------------------------------------------------
sleep_detailed %>%
filter(as.Date(sleepdate) > today() - days(3)) %>%
ggplot(aes(fix_start, level)) +
geom_segment(aes(xend = fix_end, yend = level, color = level), size = 7) +
facet_grid(fct_rev(as_factor(reorder(format(sleepdate, "%A"), sleepdate))) ~ .) +
theme_light() +
scale_color_brewer(palette = "RdYlBu", direction = -1) +
scale_x_datetime(breaks=date_breaks("2 hour"), labels=date_format("%H:%M")) +
labs(title = "Sleep Last 3 Nights", x = "Time", y = "")
ggsave("charts/sleep-3nights.png", device = "png", width = 155 * chart_magnifier, height = 93 * chart_magnifier, units = "mm")
#
|
bcc1a2b03336902a4d96348e72e0809d1ce95cb4
|
6fb04083c9d4ee38349fc04f499a4bf83f6b32c9
|
/tests/limitations/test_subset.R
|
9b5b5d777c1e62782fefc0dc05133490ff4c4855
|
[] |
no_license
|
phani-srikar/AdapteR
|
39c6995853198f01d17a85ac60f319de47637f89
|
81c481df487f3cbb3d5d8b3787441ba1f8a96580
|
refs/heads/master
| 2020-08-09T10:33:28.096123
| 2017-09-07T09:39:25
| 2017-09-07T09:39:25
| 214,069,176
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,469
|
r
|
test_subset.R
|
## testing M_Subtraction with different length vectors
## Cases with dates failing
test_that("check FLVector subtraction",
{
flt <- FLTable("FL_DEMO.finequityreturns","txndate")
flv1 <- flt[1:8,"equityreturn"]
flv <- flt[1:10,"equityreturn"]
flv1R <- as.vector(flv1)
flvR <- as.vector(flv)
FLexpect_equal(flv-flv1,flvR-flv1R,check.attributes=FALSE)
})
Renv <- new.env(parent = globalenv())
FLenv <- as.FL(Renv)
test_that(
"Testing subset",
{
result1=eval_expect_equal({test1<-subset(airquality, Temp > 80, select = c(Ozone, Temp))},Renv,FLenv)
## print(result1)
})
test_that(
"Testing subset",
{
result2=eval_expect_equal({test2<-subset(airquality, Day == 1, select = -Temp)},Renv,FLenv)
## print(result2)
})
test_that(
"Testing subset",
{
result3=eval_expect_equal({test3<-subset(airquality, select = Ozone:Wind)},Renv,FLenv)
## print(result3)
})
test_that(
"Testing subset",
{
result4=eval_expect_equal({
nm <- rownames(state.x77)
start_with_M <- nm %in% grep("^M", nm, value = TRUE)
test4<-subset(state.x77, start_with_M, Illiteracy:Murder)},Renv,FLenv)
## print(result4)
})
test_that(
"Testing subset",
{
result5=eval_expect_equal({
nm <- rownames(state.x77)
start_with_M <- nm %in% grep("^M", nm, value = TRUE)
test5<-subset(state.x77, grepl("^M", nm), Illiteracy:Murder)},Renv,FLenv)
## print(result5)
})
|
7c8d42134a7fa6891e8659492441f9cd112460b5
|
9ba49020ab6aadecbf45469709b7db6d4a9fb5fd
|
/R/IDETECT.R
|
b0ac77f40bd3fa9b2297d9a0c8ee6096fd41f386
|
[] |
no_license
|
cran/breakfast
|
938d3c35ffd5f8f8f0d3e30b4f831fd935d7b515
|
b6db9f5933d7de57db0712f590366cd7ce45674e
|
refs/heads/master
| 2022-10-24T01:37:23.643946
| 2022-10-18T12:45:08
| 2022-10-18T12:45:08
| 92,512,136
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,675
|
r
|
IDETECT.R
|
#' @title Solution path generation via the Isolate-Detect method
#' @description This function arranges all possible change-points in the mean of the input vector in the order of importance, via the Isolate-Detect (ID) method.
#' It is developed to be used with the sdll and information criterion (ic) model selection rules.
#' @details
#' The Isolate-Detect method and its algorithm is described in
#' "Detecting multiple generalized change-points by isolating single ones", A. Anastasiou & P. Fryzlewicz (2021), Metrika, https://doi.org/10.1007/s00184-021-00821-6.
#'
#' @param x A numeric vector containing the data to be processed.
#' @param thr_ic A positive real number with default value equal to 0.9. It is used to create the solution path. The lower the value, the larger the solution path vector.
#' @param points A positive integer with default value equal to 3. It defines the distance between two consecutive end- or start-points of the right- or
#' left-expanding intervals, as described in the Isolate-Detect methodology.
#' @return An S3 object of class \code{cptpath}, which contains the following fields:
#' \item{solutions.nested}{\code{TRUE}, i.e., the change-point outputs are nested}
#' \item{solution.path}{Locations of possible change-points in the mean of \code{x}, arranged in decreasing order of change-point importance}
#' \item{solution.set}{Empty list}
#' \item{x}{Input vector \code{x}}
#' \item{cands}{Matrix of dimensions length(\code{x}) - 1 by 4. The first two columns are (start, end)-points of the detection intervals of the corresponding possible change-point location in the third column. The fourth column is a measure of strength of the corresponding possible change-point. The order of the rows is the same as the order returned in \code{solution.path}}
#' \item{method}{The method used, which has value "idetect" here}
#' @seealso \code{\link{sol.idetect_seq}}, \code{\link{sol.not}}, \code{\link{sol.wbs}}, \code{\link{sol.wbs2}}, \code{\link{sol.tguh}},
#' @references A. Anastasiou & P. Fryzlewicz (2021). Detecting multiple generalized change-points by isolating single ones. \emph{Metrika}, https://doi.org/10.1007/s00184-021-00821-6.
#' @examples
#' r3 <- rnorm(1000) + c(rep(0,300), rep(2,200), rep(-4,300), rep(0,200))
#' sol.idetect(r3)
#' @export
sol.idetect <- function(x, thr_ic = 0.9, points = 3) {
#veryfing the input parameters - x
x <- as.numeric(x)
storage.mode(x) <- "double"
lx <- length(x)
check.input(x)
solutions.nested <- TRUE
solution.set <- list()
cands <- matrix(NA, 0, 4)
if (lx < points) {solution.path <- integer()}
else{
points <- as.integer(points)
step1 <- window.idetect.th(x, thr_con = thr_ic, w_points = points)
s1 <- as.matrix(step1$full_information)
if (dim(s1)[2] == 1) {s1 <- t(s1)}
cpt_lower <- step1[[1]]
if (length(cpt_lower) == 0){solution.path <- integer()}
else{
ord <- order(s1[,4], decreasing = T)
cands <- s1[ord, ,drop=FALSE]
lcpt_ic <- length(cpt_lower)
seb_set <- c(0, cpt_lower, lx)
lseb_set <- length(seb_set)
min_C <- numeric()
CS <- matrix(cpt_lower,1,lcpt_ic)
while (lseb_set >= 3) {
Rs <- IDetect_cusum_one(x, seb_set[1:(lseb_set - 2)] + 1, seb_set[3:(lseb_set)],
seb_set[2:(lseb_set - 1)])
indic <- which.min(Rs)
s1 <- setdiff(cpt_lower, seb_set[2:(lseb_set - 1)])
d <- numeric(lcpt_ic)
if(length(s1) == 0){d <- Rs}
else{
indic2 <- match(s1, cpt_lower)
d[-indic2] <- Rs}
CS <- rbind(CS,d)
m_rs <- min(Rs)
seb_set_temp <- seb_set[2:(lseb_set-1)]
min_Rs <- seb_set_temp[indic]
cands <- rbind(cands, c(seb_set[indic], seb_set[indic + 2], min_Rs, m_rs))
min_C <- c(min_C, min_Rs)
if (seb_set_temp[indic] == 1){
seb_set <- seb_set[-2]
} else{
seb_set <- seb_set[-which(seb_set == min_Rs)]}
lseb_set <- lseb_set - 1
}
solution.path <- min_C[length(min_C):1]
cusum_m <- apply(CS[-1,,drop = FALSE],2,max)
indic3 <- match(cpt_lower, cands[,3])
cands[indic3,4] <- cusum_m
ord <- order(cands[,4], decreasing = T)
cands <- cands[ord, ,drop=FALSE]#[-(length(solution.path)+1), ,drop = FALSE]
cands <- cands[!duplicated(cands[,3]),,drop = FALSE]
cands <- cands[!cands[,4] == 0,,drop = FALSE]
if(is.na(solution.path[1])){solution.path <- integer(0)}}}
ret = list(solutions.nested = solutions.nested, solution.path = solution.path, solution.set = solution.set, x = x, cands = cands, method = "idetect")
class(ret) <- "cptpath"
ret
}
idetect.th <- function(x, sigma = stats::mad(diff(x) / sqrt(2)), thr_const = 1,
thr_fin = sigma * thr_const * sqrt(2 * log(length(x))),
s = 1, e = length(x), points = 3, k_l = 1, k_r = 1) {
y <- c(0, cumsum(x))
l <- length(x)
if (sigma == 0) {
s0 <- all.shifts.are.cpts(x)
cpt <- s0$cpts
l.cpt <- length(cpt)
if(l.cpt == 0){
cpt <- integer(0)
Res_fin <- matrix(0, 1, 4)
}
else{
if(l.cpt == 1){
CS <- IDetect_cusum_one(x, s = 1, e = l, b = cpt)
Res_fin <- cbind(1,l,cpt,CS)
}
else{
s1 <- c(1,cpt[-l.cpt]+1)
e1 <- c(cpt[2:l.cpt], l)
CS <- IDetect_cusum_one(x, s = s1, e = e1, b = cpt)
Res_fin <- cbind(s1,e1,cpt,CS)
}
}
}
else{
Res <- matrix(0, 1, 4)
points <- as.integer(points)
r_e_points <- seq(points, l, points)
l_e_points <- seq(l - points + 1, 1, -points)
chp <- 0
if (e - s < 2) {
Res_fin <- matrix(0, 1, 4)
cpt <- integer(0)
} else {
pos_r <- numeric()
CUSUM_r <- numeric()
pos_l <- numeric()
CUSUM_l <- numeric()
moving_points <- start_end_points(r_e_points, l_e_points, s, e)
right_points <- moving_points[[1]]
left_points <- moving_points[[2]]
lur <- length(left_points)
rur <- length(right_points)
if (k_r < k_l) {
while ( (chp == 0) & (k_r < min(k_l, rur))) {
ind <- c(s, right_points[k_r])
tmp <- max.cusum(ind, y)
pos_r[k_r] <- tmp[1]
CUSUM_r[k_r] <- tmp[2]
Res <- rbind(Res, c(s,right_points[k_r],pos_r[k_r],CUSUM_r[k_r]))
if (CUSUM_r[k_r] > thr_fin) {
chp <- pos_r[k_r]
indic <- 0
} else {
k_r <- k_r + 1
}
}
}
if (k_l < k_r) {
while ( (chp == 0) & (k_l < min(k_r, lur))) {
ind <- c(left_points[k_l], e)
tmp <- max.cusum(ind, y)
pos_l[k_l] <- tmp[1]
CUSUM_l[k_l] <- tmp[2]
Res <- rbind(Res, c(left_points[k_l], e, pos_l[k_l], CUSUM_l[k_l]))
if (CUSUM_l[k_l] > thr_fin) {
chp <- pos_l[k_l]
indic <- 1
} else {
k_l <- k_l + 1
}
}
}
if (chp == 0) {
while ( (chp == 0) & (k_l <= lur) & (k_r <= rur)) {
ind <- c(s, right_points[k_r])
tmp <- max.cusum(ind, y)
pos_r[k_r] <- tmp[1]
CUSUM_r[k_r] <- tmp[2]
Res <- rbind(Res, c(s,right_points[k_r],pos_r[k_r],CUSUM_r[k_r]))
if (CUSUM_r[k_r] > thr_fin) {
chp <- pos_r[k_r]
indic <- 0
} else {
ind <- c(left_points[k_l], e)
tmp <- max.cusum(ind, y)
pos_l[k_l] <- tmp[1]
CUSUM_l[k_l] <- tmp[2]
Res <- rbind(Res, c(left_points[k_l], e, pos_l[k_l], CUSUM_l[k_l]))
if (CUSUM_l[k_l] > thr_fin) {
chp <- pos_l[k_l]
indic <- 1
} else {
k_r <- k_r + 1
k_l <- k_l + 1
}
}
}
}
if (chp != 0) {
if (indic == 1) {
r <- idetect.th(x, s = s, e = chp, points = points,
thr_fin = thr_fin, k_r = k_r, k_l = 1)
} else {
r <- idetect.th(x, s = chp + 1, e = e, points = points,
thr_fin = thr_fin, k_r = 1, k_l = max(1, k_l - 1))
}
cpt <- c(chp, r[[1]])
Res_fin <- rbind(Res, r[[2]])
} else {
cpt <- chp
Res_fin <- Res
}
}
cpt <- cpt[cpt != 0]
Res_fin <- Res_fin[which(Res_fin[,3] != 0), , drop = FALSE]
}
return(list(changepoints = sort(cpt), full_information = Res_fin, y = y))
}
window.idetect.th <- function(xd, sigma = stats::mad(diff(xd) / sqrt(2)), thr_con = 1,
c_win = 5000, w_points = 3) {
lg <- length(xd)
w_points <- as.integer(w_points)
c_win <- min(lg, c_win)
c_win <- as.integer(c_win)
t <- sigma * thr_con * sqrt(2 * log(lg))
if (lg <= c_win) {
u <- idetect.th(x = xd, thr_const = thr_con, points = w_points)
return(u)
} else {
K <- ceiling(lg / c_win)
tsm <- list()
u <- list()
ufin <- list()
uaddition <- list()
tsm[[1]] <- xd[1:c_win]
ufin <- idetect.th(tsm[[1]], thr_fin = t, points = w_points)
uaddition[[1]] <- list()
uaddition[[1]] <- idetect.th(x = xd[(max(1, c_win - (10 * w_points) + 1)):min( (c_win + (10 * w_points)), lg)], thr_fin = t, points = 2)
uaddition[[1]][[1]] <- uaddition[[1]][[1]] + c_win - (10 * w_points)
uaddition[[1]][[2]][,1] <- uaddition[[1]][[2]][,1] + c_win - (10 * w_points)
uaddition[[1]][[2]][,2] <- min(uaddition[[1]][[2]][,2] + c_win - (10 * w_points),min( (c_win + (10 * w_points)), lg))
uaddition[[1]][[2]][,3] <- uaddition[[1]][[2]][,3] + c_win - (10 * w_points)
ufin[[1]] <- c(ufin[[1]], uaddition[[1]][[1]])
i <- 2
while (i < K) {
tsm[[i]] <- xd[( (i - 1) * c_win + 1):(i * c_win)]
u[[i]] <- list()
u[[i]] <- idetect.th(x = tsm[[i]], thr_fin = t, points = w_points)
u[[i]][[1]] <- u[[i]][[1]] + (i - 1) * c_win
u[[i]][[2]][,1] <- u[[i]][[2]][,1] + (i - 1) * c_win
u[[i]][[2]][,2] <- u[[i]][[2]][,2] + (i - 1) * c_win
u[[i]][[2]][,3] <- u[[i]][[2]][,3] + (i - 1) * c_win
uaddition[[i]] <- list()
uaddition[[i]] <- idetect.th(x = xd[(max(1, i * c_win - (10 * w_points) + 1)):(min(i * c_win + (10 * w_points), lg))], thr_fin = t, points = 2)
uaddition[[i]][[1]] <- uaddition[[i]][[1]] + i * c_win - (10 * w_points)
uaddition[[i]][[2]][,1] <- uaddition[[i]][[2]][,1] + i * c_win - (10 * w_points)
uaddition[[i]][[2]][,2] <- min(uaddition[[i]][[2]][,2] + i * c_win - (10 * w_points), min(i * c_win + (10 * w_points), lg))
uaddition[[i]][[2]][,3] <- uaddition[[i]][[2]][,3] + i * c_win - (10 * w_points)
ufin[[1]] <- c(ufin[[1]],u[[i]][[1]], uaddition[[i]][[1]])
i <- i + 1
}
tsm[[K]] <- xd[( (K - 1) * c_win + 1):lg]
u[[K]] <- list()
u[[K]] <- idetect.th(tsm[[K]], thr_fin = t, points = w_points)
u[[K]][[1]] <- u[[K]][[1]] + (K - 1) * c_win
u[[K]][[2]][,1] <- u[[K]][[2]][,1] + (K - 1) * c_win
u[[K]][[2]][,2] <- u[[K]][[2]][,2] + (K - 1) * c_win
u[[K]][[2]][,3] <- u[[K]][[2]][,3] + (K - 1) * c_win
ufin_cpt <- c(ufin[[1]], u[[K]][[1]])
Res_fin <- matrix(0, 1, 4)
Res_fin <- rbind(Res_fin, ufin[[2]], uaddition[[1]][[2]])
if (K > 2){
for (i in 2:(K-1)){Res_fin <- rbind(Res_fin,u[[i]][[2]], uaddition[[i]][[2]])}}
Res_fin <- rbind(Res_fin, u[[K]][[2]])
Res_fin <- Res_fin[which(Res_fin[,3] != 0),]
return(list(changepoints = unique(sort(ufin_cpt)), full_information = Res_fin, y = c(0, cumsum(xd))))
#return(list(changepoints = sort(ufin_cpt), full_information = Res_fin, y = c(0, cumsum(xd))))
}
}
|
8da668b6347eb9e9d96d36771a8bfd57779fed46
|
f1de1e557c0013509bfa93ebec936e81549232fe
|
/Chapter 4/Chapter4_Problem13.R
|
f8dd8657e1e8364164ef9191505f9af15d9605fb
|
[] |
no_license
|
BassJohn09/Introduction-to-Statistical-Learning---Solution
|
f79036df36e4e741b2d2df79cec35f0a4b84cf55
|
499587a5bb836f3d6486b294d7144f983636979b
|
refs/heads/master
| 2020-04-24T10:40:52.559098
| 2019-02-26T23:06:32
| 2019-02-26T23:06:32
| 171,902,010
| 0
| 0
| null | 2019-02-26T23:06:33
| 2019-02-21T16:00:22
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 2,238
|
r
|
Chapter4_Problem13.R
|
# Chapter 4 - Problem 13
# Using the Boston data set, fit classification models in order to predict
# whether a given suburb has a crime rate above or below the median.
# Explore logistic regression, LDA, and KNN models using various subsets
# of the predictors. Describe your findings.
require(ISLR)
crim01 <- ifelse(Boston$crim >median(Boston$crim),1,0)
Boston$crim01 <- crim01
cor(Boston)
boxplot(medv~crim01,data = Boston)
#Important Variables: zn+indus+nox+age+dis+rad+tax+lstat+medv
#Non Important: chas+rm+black+ptratio
#Indus - Nox - age are somewhat correlated
#Tax and Rad are highly correlated
#lstat and medv are correlated
# Function to get data
# (a) Logistic regresion
test.selection <- sample(1:nrow(Boston),76,replace = TRUE) # 15% of data used for test
test.set <- Boston[test.selection,]
training.set <- Boston[-test.selection,]
logit.model <- glm(crim01 ~ indus+rad+age+medv,data = training.set)
summary(logit.model)
logit.prob <- predict(logit.model,newdata = test.set, type = "response")
logit.pred <- rep(0,nrow(test.set))
logit.pred[logit.prob > 0.5] <- 1
logit.cm <- table(logit.pred,test.set$crim01)
logit.error <- mean(logit.pred != test.set$crim01)
# (b) LDA
library(MASS)
lda.model <- lda(crim01 ~ nox+rad+age+medv,data = training.set)
lda.model
lda.pred <- predict(lda.model,newdata = test.set,type = "response")
lda.cm <- table(lda.pred$class,test.set$crim01)
lda.error <- mean(lda.pred$class != test.set$crim01)
# (c) QDA
qda.model <- qda(crim01 ~ nox+rad+age+medv,data = training.set)
qda.model
qda.pred <- predict(qda.model,newdata = test.set,type = "response")
qda.cm <- table(qda.pred$class,test.set$crim01)
qda.error <- mean(qda.pred$class != test.set$crim01)
# (d) KNN
training.set.knn <- cbind(training.set$nox,training.set$rad,
training.set$age,training.set$medv)
test.set.knn <- cbind(test.set$nox,test.set$rad,
test.set$age,test.set$medv)
training.response.knn <- as.factor(training.set$crim01)
test.response.knn <- as.factor(test.set$crim01)
library(class)
knn.pred <- knn(training.set.knn,test.set.knn,training.response.knn, k=8)
knn.cm <- table(knn.pred,test.response.knn)
knn.error <- mean(knn.pred != test.response.knn)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.