blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08168ee5d8a90e949a2a7ead112b3cee05973924
|
aaaed74b1a9ce40ddb025d41788852f207c51c81
|
/00_nulldata/02_2nd_stab/s6/00_results.R
|
9c1cf3a5b78f579c67072afa571e5996b9b9c08c
|
[] |
no_license
|
NeuroStat/cluster-stability-m
|
3fc31d71c2d91e2cbf486afe93e8d42a367b2b7c
|
15ebe541c904edb16eccb3a164c6feb8708dfe15
|
refs/heads/master
| 2020-12-18T18:24:30.408260
| 2020-01-22T02:20:20
| 2020-01-22T02:20:20
| 235,483,509
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,686
|
r
|
00_results.R
|
# script to output only the final result table
input <- commandArgs(TRUE)
# functions
from_cluster_to_voxels <- function(nifti.file){
require(oro.nifti)
tmp <- readNIfTI(nifti.file)
raCl <- range(tmp)
TOT <- length(which(tmp!=0))
OUT <- matrix(NA, ncol=4, nrow=TOT)
strt <-1
for(i in seq(raCl[1]+1, raCl[2])){
strt1<-length(which(tmp==i))+strt -1
#cat(strt, " to ")
#cat(strt1, "\n")
OUT[strt:strt1,1] <- i
OUT[strt:strt1,2:4] <- which(tmp==i, arr.ind=TRUE )
strt <- strt1+1
}
rm(tmp)
OUT
}
library(Hmisc)
library(oro.nifti)
cat("start analysing results \n")
for(K in input){
nfile <- "cl.nii.gz"
stab.file <- paste(K, ".nii.gz", sep="")
fcon <- "filecon"
cat(stab.file, " .. read in .. \n")
voxMatrix <- from_cluster_to_voxels(nfile)
tmp<-readNIfTI(stab.file)
voxMatrix <- cbind(voxMatrix,tmp[voxMatrix[,2:4]])
voxM <- data.frame(voxMatrix)
names(voxM) <- c("ID", "x", "y","z", "stab")
voxM$ID <- as.factor(voxM$ID)
clS <- aggregate(stab ~ ID, data=voxM, mean)
clS_sd <- aggregate(stab ~ ID, data=voxM, sd)
names(clS) <- c("ID", "Stability")
names(clS_sd) <- c("ID", "sd")
clS$sd <- clS_sd$sd
tmp <- readNIfTI(nfile)
tmpseq <- as.numeric(as.character(clS$ID))
for(i in tmpseq){
clS$size[tmpseq==i]<-length(which(tmp==i))
}
rm(tmp)
# Read in the orignal cluster file
tmp <-read.table(fcon, sep="\t", header=TRUE)
names(tmp)[1] <- "ID"
out <- merge(x = clS, y = tmp, by ="ID", all.x=TRUE)
out<- out[,]
out<-out[order(out$ID, decreasing=TRUE),]
out$ID <- as.integer(as.character(out$ID))
cat("\t", "write output", "\n")
write.table(out, file=paste("STAB_", K,".txt", sep=""), row.names=FALSE)
cat("\t . \n")
}
cat("done. \n")
|
d79e25eb78d73d39a7b686d488f5974fcff319d7
|
c8762334dcf5c843168f8b3ea65e7fa6446eb027
|
/man/profile_plot.Rd
|
82d3f7030ab736d9c8e706a8a7cc9d29fabb74cc
|
[] |
no_license
|
spgolden/EEAR
|
cd92b6ee6b16eee07a77b88c66d224b12592be2e
|
f4cfff1d7de58d9b0f0edba587a3621d594274eb
|
refs/heads/master
| 2020-12-30T20:58:57.139664
| 2014-05-15T22:55:34
| 2014-05-15T22:55:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,338
|
rd
|
profile_plot.Rd
|
\name{profile_plot}
\alias{profile_plot}
\alias{profile_prepare}
\title{Plot Usage Profile Lines}
\usage{
profile_plot(x, line = c("weekday", "workweek", "day", "month", "season",
"year"), facets = NULL, legend = TRUE, title = "",
xlab = "Time of Day", ylab = "kWh", plot = TRUE, ...)
profile_prepare(x, line, facets, ...)
}
\arguments{
\item{x}{a data table}
\item{line}{The type of line to be drawn. Default day of
week. Possible values include every `day`, `month`,
`season`, and `year`.}
\item{facets}{The type of facets to panel the profiles
by. The avaiable facets depend upon which line is chosen.
Day profiles can be plotted for each week, month,
quarter, season, or year. Weekday profiles for all but
week. Month for quarter, season, or year. Season by
year. Year takes no facets.}
\item{legend}{Logical. Should the legend be included in
plot.}
\item{title}{The title on the plot.}
\item{xlab}{The label for the x-axis. Default is "Time of
Day".}
\item{ylab}{The label for the y-axis. Default is "kW".}
\item{plot}{Logical. Should the plot be drawn.}
\item{\dots}{Further arguments passed to data
preparation.}
}
\value{
Invisibly returns the ggplot plotting object.
}
\description{
Generates profile plots, averaged for the given line
period, paneled for the given facets.
}
|
0d1d815462f88c38860122a56b49db33f156edd7
|
6d42def2aa82e2b20b7467e02bfade4740200d18
|
/Data_Analysis_Visualization/dir.R
|
0f767d5c6e69a55eb2369b17a2fcc2f5f8a44850
|
[] |
no_license
|
Hz-Lin/wur_bioinfomatics
|
f7d88aea294f1e75d8c49d86352810d47bd133e3
|
fe970d029e11a6d32f6e266ddc8f8399eb36c4e0
|
refs/heads/master
| 2021-09-10T17:48:36.601979
| 2018-03-30T11:33:35
| 2018-03-30T11:33:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
dir.R
|
getwd()
setwd("/home/evelina/projects/github/wur_bioinfomatics")
|
c3ffedf88ac2d9cd0cd9de91516749aead545536
|
907c7acdaa598d6bc1fee3843f660bd0afd62520
|
/R/MTGS.mrce.R
|
59c8d5cf4f0a034d2b39448f0b193167b204e011
|
[] |
no_license
|
cran/MTGS
|
25bcabaf14cb0a2d19a3c874ae392a391c771561
|
c3a0dcd95f93d0ec4a315c8884b20b2aa9310aca
|
refs/heads/master
| 2020-12-22T01:26:32.510743
| 2019-10-16T10:50:09
| 2019-10-16T10:50:09
| 236,629,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 715
|
r
|
MTGS.mrce.R
|
#####################
#library("MRCE")
MTGS.mrce<-function(X, Y, r){
requireNamespace("MRCE")
n<-nrow(X)
p<-ncol(X)
q<-ncol(Y)
m<-round(n*r)
for (k in 1:25){
tst<-sample(1:n,size=m,replace=FALSE)
XTRN<-X[-tst,] ; YTRN<-Y[-tst,]
XTST<-X[tst,] ; YTST<-Y[tst,]
fit=mrce(Y=YTRN, X=XTRN, lam1=0.25, lam2=1e-5, method="single")
lam2.mat=1000*(fit$Bhat==0)
refit=mrce(Y=YTRN, X=XTRN, lam2=lam2.mat, method="fixed.omega", omega=fit$omega, tol.in=1e-12)
summary(refit)
Bhat<-refit$Bhat
XTST<-as.matrix(XTST)
X<-as.matrix(X)
Pred<-X%*%Bhat
Pred1<-XTST%*%Bhat
}
return(list("Bhat"=fit$Bhat, "muhat"=fit$muhat, "Pred"=Pred))
}
|
84e1605e18649183ce60c8cba060d118454d211a
|
1a23d38841cfc47b1e0178a62b129298fddd00d9
|
/plot3.r
|
a95664aaf7691890f5882318d50125c4e125f2b8
|
[] |
no_license
|
yeokc2369/exploratorydataanalysis2
|
8b8e2b26c53935d2a3c20dc251de34364ffac6f9
|
475de8ea3d458ec42c593bba5ee10d470ed89c8f
|
refs/heads/master
| 2021-01-10T01:12:39.173183
| 2015-10-25T22:58:25
| 2015-10-25T22:58:25
| 44,934,054
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 999
|
r
|
plot3.r
|
## Question 3:
## Of the four types of sources indicated by the type (point, nonpoint, onroad, nonroad)
## variable, which of these four sources have seen decreases in emissions from 1999–2008
## for Baltimore City? Which have seen increases in emissions from 1999–2008?
## Use the ggplot2 plotting system to make a plot answer this question.
## Call the relevant libraries
library(plyr)
library(ggplot2)
## Read the data file (takes a few seconds)
NEI <- readRDS("summarySCC_PM25.rds")
NEII <- NEI[NEI$fips == "24510",]
TypePM25 <- ddply(NEII, .(year, type), function(x) sum(x$Emissions))
colnames(TypePM25)[3] <- "Emissions"
## initiates the PNG graphics device to save to plot3.png
png("plot3.png")
## produce line graphs
qplot(year, Emissions, data=TypePM25, color=type, geom="line") +
ggtitle(expression("Baltimore City PM2.5 Emissions by Source Type and Year")) +
xlab("Year") +
ylab(expression("Total PM2.5 Emissions (tons)"))
## closes the PNG graphics device
dev.off()
|
0fad45ee4d3003eeeb7ed46244f54ab30f316c0d
|
2a2b58ecc90d0cfc5843641dce103a68ff4910cf
|
/R/iteration.R
|
6e2932562f34f5f667e7b72250d83dcb72bbb411
|
[] |
no_license
|
cran/deSolve
|
feac6b8d4adab866268ed36a903c024677f9127e
|
560bba8ed670b12277ba3483bdd15db91ec12dc9
|
refs/heads/master
| 2023-07-07T18:40:26.379280
| 2023-07-01T15:00:02
| 2023-07-01T15:00:02
| 17,695,430
| 12
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,523
|
r
|
iteration.R
|
### ============================================================================
### Interface to C code for Euler's ODE solver
### with fixed step size and without interpolation, see helpfile for details.
### ============================================================================
iteration <- function(y, times, func, parms, hini = NULL,
verbose = FALSE, ynames = TRUE,
dllname = NULL, initfunc = dllname, initpar = parms,
rpar = NULL, ipar = NULL, nout = 0, outnames = NULL, forcings = NULL,
initforc = NULL, fcontrol = NULL, ...) {
if (is.list(func)) { ### IF a list
if (!is.null(initfunc) & "initfunc" %in% names(func))
stop("If 'func' is a list that contains initfunc, argument 'initfunc' should be NULL")
if (!is.null(initforc) & "initforc" %in% names(func))
stop("If 'func' is a list that contains initforc, argument 'initforc' should be NULL")
initfunc <- func$initfunc
initforc <- func$initforc
func <- func$func
}
if (abs(diff(range(diff(times)))) > 1e-10)
stop (" times should be equally spaced")
dt <- diff(times[1:2])
if (is.null(hini)) hini <- dt
nsteps <- as.integer(dt / hini)
if (nsteps == 0)
stop (" hini should be smaller than times interval ")
if (nsteps * hini != dt)
warning(" hini recalculated as integer fraction of times interval ",dt/nsteps)
## check input
checkInputEuler(y, times, func, dllname)
n <- length(y)
## Model as shared object (DLL)?
Ynames <- attr(y, "names")
Initfunc <- NULL
flist <-list(fmat = 0, tmat = 0, imat = 0, ModelForc = NULL)
Nstates <- length(y) # assume length of states is correct
if (is.character(func) | inherits(func, "CFunc")) {
DLL <- checkDLL(func, NULL, dllname,
initfunc, verbose, nout, outnames)
Initfunc <- DLL$ModelInit
Func <- DLL$Func
Nglobal <- DLL$Nglobal
Nmtot <- DLL$Nmtot
if (! is.null(forcings))
flist <- checkforcings(forcings, times, dllname, initforc, verbose, fcontrol)
rho <- NULL
if (is.null(ipar)) ipar <- 0
if (is.null(rpar)) rpar <- 0
} else {
initpar <- NULL # parameter initialisation not needed if function is not a DLL
rho <- environment(func)
## func and jac are overruled, either including ynames, or not
## This allows to pass the "..." arguments and the parameters
if(ynames) {
Func <- function(time, state, parms) {
attr(state, "names") <- Ynames
func (time, state, parms, ...)
}
} else { # no ynames ...
Func <- function(time, state, parms)
func (time, state, parms, ...)
}
## Call func once to figure out whether and how many "global"
## results it wants to return and some other safety checks
FF <- checkFuncEuler(Func, times, y, parms, rho, Nstates)
Nglobal <- FF$Nglobal
Nmtot <- FF$Nmtot
}
## the CALL to the integrator
on.exit(.C("unlock_solver"))
out <- .Call("call_iteration", as.double(y), as.double(times), nsteps,
Func, Initfunc, parms, as.integer(Nglobal), rho, as.integer(verbose),
as.double(rpar), as.integer(ipar), flist, PACKAGE = "deSolve")
## saving results
out <- saveOutrk(out, y, n, Nglobal, Nmtot,
iin = c(1, 12, 13, 15), iout = c(1:3, 18))
attr(out, "type") <- "iteration"
if (verbose) diagnostics(out)
out
}
|
23d01feeb2f537176e48047f776e30aff333c76b
|
c5ef6312b01eb79e977978f7a72d87c8f386ad32
|
/S0 Assemble Scripts.R
|
04cb968d603262bffa559bff83bd0e696626c322
|
[] |
no_license
|
dlill/Simulations
|
bec6ea865616c6486dfa99f83b30f6d201ae1067
|
9a8fb0eb0717ae38d9d74a388314cd6a30211b6a
|
refs/heads/master
| 2022-05-05T11:58:50.350345
| 2022-03-23T22:47:07
| 2022-03-23T22:47:07
| 137,068,479
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,458
|
r
|
S0 Assemble Scripts.R
|
library(conveniencefunctions)
dir_final <- "~/Promotion/Writing/Papers/2017 02 MRA optimization procedure Paper/Code/"
dir_models <- "~/Promotion/Projects/MRA/Simulations/Models/"
setwd(dir_models)
file_index <- c(Cascade.Rmd = "Cascade/Cascade.Rmd",
`Cascade with cxz complex.Rmd` = "Cascade/Cascade with cxz different perturbations.Rmd",
`Cascade with zy feedback.Rmd` = "Cascade/Cascade with gainlf different perturbations.Rmd",
`Cascade with zx feedback.Rmd` = "Cascade/Cascade with gainuf different perturbations.Rmd",
Hub.Rmd = "Hub/Hub.Rmd",
`Hub in cascade.Rmd` = "HubInCascade/HubInCascade.Rmd",
Phosphatase.Rmd = "Phosphatase/Phosphatase.Rmd",
Prabakaran.Rmd = "Prabakaran/Prabakaran.Rmd",
`Prabakaran_model_com_spec.nb` = "Prabakaran/Mathematica/Prabakaran_model_com_spec.nb",
`Cascade-noise.Rmd` = "Cascade/Noise/Cascade-Noise.Rmd",
`Cascade-noise-3replicates` = "Cascade/Noise3replicates/Cascade-Noise-3replicates.Rmd",
`Cascade-noise-3replicates_many_alphas` = "Cascade/Noise3replicates/manyalphas/Cascade-Noise-3replicates-manyalphas.Rmd"
)
iwalk(file_index, ~file.copy(.x, paste0(dir_final, "/Scripts/",.y), overwrite = T))
setwd("~/Promotion/Projects/MRA/")
system("R CMD build MRAr")
setwd("~/Promotion/Projects/MRA/")
file.copy("MRAr_0.1.0.tar.gz", paste0(dir_final, "MRAr_0.1.0.tar.gz"), overwrite = T)
setwd("~/Promotion/Writing/Papers/2017 02 MRA optimization procedure Paper/")
system("zip -r Code.zip Code")
|
af03a261576340968b7d73f77a837a39451806ce
|
bd17e9f7aafe9273d6418a1f4710c9a0dbd69ccf
|
/make_table1.R
|
78c88b12af3c38db110df0e7727ef9d76939a3fb
|
[
"MIT"
] |
permissive
|
JohannesNE/gitt-after-braininjury
|
cbb74dfe5b8e820a318b3347e58c565cfd816e5b
|
5df353f6c4c0e8f179901ade7ce0262e01027b68
|
refs/heads/master
| 2021-04-29T21:38:26.370235
| 2018-02-15T11:42:27
| 2018-02-15T11:42:27
| 121,617,493
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,940
|
r
|
make_table1.R
|
### Generates table 1 tex file
library("xtable")
library("Gmisc")
library("Hmisc")
getStats <- function(varname, digits=0, type = "mean", data_df=df_pt, use_html = FALSE){
if(type == "mean"){
return(describeMean(data_df[[varname]],
html = use_html,
digits = digits,
plusmin_str = ""
))
}
if(type == "prop"){
return(describeProp(data_df[[varname]],
html = use_html,
digits = digits
))
}
if(type == "factor"){
return(describeFactors(data_df[[varname]],
html = use_html,
digits = digits
))
}
if(type == "median"){
return(describeMedian(data_df[[varname]],
html = use_html,
digits = digits
))
}
if(type == "n"){
counts <- length(data_df[[varname]])
names(counts) <- "n"
counts
}
else return("error: Type does not exist")
}
#Vectorized version of function. The second arg is the functionarguments to be vectorized.
vec_getStats <- Vectorize(getStats, c("varname", "type", "digits"))
##Genereates list from vectors of variable names and summarytype. ... is used to pass
#use_html = T/F and digits to vec_getStats()
make_list_data <- function(var_vector, type_vector, name_vector = var_vector,
use_html = FALSE, ...){
temp_list <- list()
temp_list[name_vector] <- vec_getStats(var_vector,
type = type_vector, use_html = use_html,
...)
temp_list <- lapply(temp_list, as.matrix)
if (use_html) {
return(lapply(temp_list, function(x) {
colnames(x) <- "Statistics" #Adds the same colname to all list items
#Nesessary to use mergeDesc()
return(x)
}))
}
else {
return(temp_list)
}
}
vars_in_table_pt <- c("anon_id", "sex", "age", "days_since_inj", "fim", "inj_cat",
"use_laxative", "gitt")
names_in_table_pt <- c("Total", "Sex", "Age", "Days since injury", "FIM score",
"Type of injury", "Laxative use", "Total GITT")
types_in_table_pt <- c("n","factor", "median", "median", "median",
"factor", "factor", "mean")
table_data_pt <- make_list_data(vars_in_table_pt,
types_in_table_pt,
names_in_table_pt,
data_df = df_pt,
digits = c(0, 0, 1, 0, 0, 0, 0, 2),
use_html = FALSE)
table_data_html_pt <- make_list_data(vars_in_table_pt,
types_in_table_pt,
names_in_table_pt,
data_df = df_pt,
digits = c(0, 0, 1, 0, 0, 0, 0, 2),
use_html = TRUE)
vars_in_table_cnt <- c("sex", "sex", "age", "use_laxative", "gitt")
names_in_table_cnt <- c("Total", "Sex", "Age","Laxative use", "Total GITT")
types_in_table_cnt <- c("n","factor", "median", "factor", "mean")
table_data_cnt <- make_list_data(vars_in_table_cnt,
types_in_table_cnt,
names_in_table_cnt,
data_df = df_cnt,
digits = c(0,0,1,0,2),
use_html = FALSE)
table_data_html_cnt <- make_list_data(vars_in_table_cnt,
types_in_table_cnt,
names_in_table_cnt,
data_df = df_cnt,
digits = c(0,0,1,0,2),
use_html = TRUE)
|
eef84ca7c22f94a172dd5a93623d3d0b047bad93
|
c6d41e51bbfe2669c4fbdc916949bba9bb62cf30
|
/R/summary.R
|
eb3817eb0a0c9d1effc0f8f217299b453757b331
|
[] |
no_license
|
DonjetaR/blm
|
2a88a0adcc15ea90e7a3615209d435d58beccf88
|
f8cb30e86fb79416fc951ecf0c87cff294e2d9ca
|
refs/heads/master
| 2020-12-24T18:12:44.011365
| 2017-01-16T03:31:10
| 2017-01-16T03:31:10
| 76,725,862
| 0
| 0
| null | 2016-12-17T13:33:58
| 2016-12-17T13:33:58
| null |
UTF-8
|
R
| false
| false
| 496
|
r
|
summary.R
|
#' Bayesian linear model.
#'
#' Fits a model, given as a formula, optionally with data provided through the "..." parameter.
#'
#' @param x A formula describing the model.
#' @param ... Additional data, for example a data frame. Feel free to add other options.
#'
#' @export
summary<- function(x, ...){
cat('\nCall:\n')
print(x$func_call)
cat('\nCoefficients:\n')
print(coefficients(x))
cat('\nResiduals:\n')
print(residuals(x))
cat('\nDeviance:\n')
print(deviance(x))
}
|
a4d75c5c11c9d680a7be6129f1bc32a6c2f9156b
|
bc882e24dd0a08aa93b9a5faa968e1016674d6a2
|
/man/modeltime_wfs_bestmodel.Rd
|
16348e1e04f84841da83d7ee282d785bacb3f93d
|
[
"MIT"
] |
permissive
|
dedenistiawan/sknifedatar
|
350ecb9725f8819914a1635f306a9c34bb97a167
|
af29fe7e755ba926f4c6203bcb936dc225813310
|
refs/heads/master
| 2023-08-15T23:24:43.467326
| 2021-07-18T16:35:20
| 2021-07-18T16:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,152
|
rd
|
modeltime_wfs_bestmodel.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modeltime_wfs_bestmodel.R
\name{modeltime_wfs_bestmodel}
\alias{modeltime_wfs_bestmodel}
\title{Modeltime best workflow from a set of models}
\usage{
modeltime_wfs_bestmodel(
.wfs_results,
.model = NULL,
.metric = "rmse",
.minimize = TRUE
)
}
\arguments{
\item{.wfs_results}{a tibble generated from the \code{modeltime_wfs_fit()} function.}
\item{.model}{string or number, It can be supplied as follows: “top n,” “Top n” or “tOp n”, where n is the number
of best models to select; n, where n is the number of best models to select; name of the
workflow or workflows to select.}
\item{.metric}{metric to get best model from ('mae', 'mape','mase','smape','rmse','rsq')}
\item{.minimize}{a boolean indicating whether to minimize (TRUE) or maximize (FALSE) the metric.}
}
\value{
a tibble containing the best model based on the selected metric.
}
\description{
get best workflows generated from the \code{modeltime_wfs_fit()} function output.
}
\details{
the best model is selected based on a specific metric ('mae', 'mape','mase','smape','rmse','rsq').
The default is to minimize the metric. However, if the model is being selected based on rsq
minimize should be FALSE.
}
\examples{
library(dplyr)
library(earth)
data <- sknifedatar::data_avellaneda \%>\% mutate(date=as.Date(date)) \%>\% filter(date<'2012-06-01')
recipe_date <- recipes::recipe(value ~ ., data = data) \%>\%
recipes::step_date(date, features = c('dow','doy','week','month','year'))
mars <- parsnip::mars(mode = 'regression') \%>\%
parsnip::set_engine('earth')
wfsets <- workflowsets::workflow_set(
preproc = list(
R_date = recipe_date),
models = list(M_mars = mars),
cross = TRUE)
wffits <- sknifedatar::modeltime_wfs_fit(.wfsets = wfsets,
.split_prop = 0.8,
.serie=data)
sknifedatar::modeltime_wfs_bestmodel(.wfs_results = wffits,
.metric='rsq',
.minimize = FALSE)
}
|
ec1bade4cb292e29b738baff8e7dfb243bcfb113
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.machine.learning/man/translate_list_terminologies.Rd
|
05f1e5050e5d1261a4ef86ebbf4f7d2c5f932695
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 787
|
rd
|
translate_list_terminologies.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/translate_operations.R
\name{translate_list_terminologies}
\alias{translate_list_terminologies}
\title{Provides a list of custom terminologies associated with your account}
\usage{
translate_list_terminologies(NextToken = NULL, MaxResults = NULL)
}
\arguments{
\item{NextToken}{If the result of the request to ListTerminologies was truncated, include
the NextToken to fetch the next group of custom terminologies.}
\item{MaxResults}{The maximum number of custom terminologies returned per list request.}
}
\description{
Provides a list of custom terminologies associated with your account.
See \url{https://www.paws-r-sdk.com/docs/translate_list_terminologies/} for full documentation.
}
\keyword{internal}
|
1304c6973b0ca8a7aeed606c0409af5e84ede3da
|
a2c19b12165936b6f38edcdc657cf64e985becf3
|
/man/panorama.Rd
|
69e01ee4accf303aedfef3773bcc6e77b41015a5
|
[] |
no_license
|
cran/vetools
|
fcfb882851ac667562bcee832cf860619ac09b73
|
945e115cc46ee4ee07684d2535d4ea60d1dccb5e
|
refs/heads/master
| 2021-03-12T21:55:17.754628
| 2013-08-01T00:00:00
| 2013-08-01T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,153
|
rd
|
panorama.Rd
|
\name{panorama}
\alias{panorama}
\alias{panomapa}
\title{
Overview of a \code{collection} of stations
}
\description{
These functions present an overview of the data quality for a
\code{collection} of meteorological stations in a temporal or
spatial perspective.
}
\usage{
panorama(collection, main, cut, ylab.push.factor = 10, cut.col = "darkred",
cut.lty = 1, cut.lwd = 2, col = "RoyalBlue", col.ramp = c("red",
"pink", "blue"), col.line = "gray30", mar = c(5, 4 +
ylab.push.factor, 3, 2), cex.axis = 0.8, cex.yaxis = 0.7,
xlab = "Year", color.by.data = FALSE, ...)
panomapa(collection, main, axis = TRUE, xlab = "Long",
ylab = "Lat", lab.col = "black", bg = NA, map.bg = NA,
map.col = "black", col.ramp = c("Green3", "darkorange1",
"red"), arrow.cex = 4.5, arrow.plot = TRUE,
pt.col = rgb(0, 0, 0, 0.75), pt.cex = 4.5, pt.pch = 21,
leg.pt.bg = pt.bg, leg.bg = NA, leg.title = "Lengevity\n(years)",
leg.offset = c(0, 0), leg.y.intersp = 1.75)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{arrow.cex}{Magnification passed to \code{arrow.plot}, defaults to 4.5}
\item{arrow.plot}{Logical flag to indicate if to call \code{arrow.plot}, defaults to TRUE.}
\item{axis}{Logical flag to indicate if to plot the axes, defaults to TRUE}
\item{bg}{Backgrund color for the map, defaults to NA}
\item{cex.axis}{Magnification for axis, defaults to 0.8}
\item{cex.yaxis}{Magnification for y-axis, defaults to 0.8 = 0.7}
\item{col}{\code{col} from \code{par}, defaults to "RoyalBlue"}
\item{col.line}{Color for lines, defaults to "gray30"}
\item{col.ramp}{Color for the color ramp, defaults to \code{c("red", "pink", "blue")} for \code{panorama} and to \code{c("Green3", "darkorange1", "red")} for \code{panomapa}}
\item{color.by.data}{Logical flag to use \code{collection$data} to color
the plotted boxes. This implies that all elements of \code{data} are between zero and one.
Defaults to FALSE.}
\item{collection}{An collection of stations. Object of class \code{Catalog}}
\item{cut}{A concatenation of dates for which to trace a vertical line}
\item{cut.col}{Color to the \code{cut} line(s), defaults to "darkred". Can be a list}
\item{cut.lty}{Line type for the \code{cut} line(s), defaults to 1. Can be a list}
\item{cut.lwd}{Line width for the \code{cut} line(s), defaults to 2. Can be a list}
\item{lab.col}{Color for the labels, defaults to "black"}
\item{leg.bg}{Legend box Backgrund color, defaults to NA}
\item{leg.offset}{Legend offset, defaults to \code{c(0, 0)}}
\item{leg.pt.bg}{Legend points background color, defaults to \code{pt.bg}}
\item{leg.title}{Legend title, defaults to "Lengevity\\n(years)"}
\item{leg.y.intersp}{Legend y interspace, is passed to \code{legend} and defaults to 1.75}
\item{main}{Main title}
\item{map.bg}{Map background color, defaults to NA}
\item{map.col}{map lines color, defaults to "black"}
\item{mar}{\code{par()$mar}, defaults to \code{c(5, 4 + ylab.push.factor, 3, 2)}}
\item{pt.cex}{Points magnification in map, defaults to 4.5}
\item{pt.col}{Points color in map, defaults to \code{rgb(0, 0, 0, 0.75)}}
\item{pt.pch}{Points \code{pch} in map, defaults to 21}
\item{xlab}{for \code{panorama} defaults to "Year" and for \code{panomapa} to "Long".}
\item{ylab}{y-axes label, defaults to "Lat"}
\item{ylab.push.factor}{Factor in which to push the labels in \code{panorama}, defaults to 10}
\item{...}{Any valid parametres for \code{par()}}
}
\value{
These functions do not return anything.
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
A.M. Sajo-Castelli
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\pkg{\link{vetools}},
\link[=CatalogConvention]{Catalog Convention},
\link[=summary.Catalog]{summary}.
}
\examples{\dontrun{
panorama(collection)
collection
panomapa(collection)
plot(collection)}}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ panorama }
\keyword{ panomapa }
\keyword{ overview }
% __ONLY ONE__ keyword per line
|
3acd3f6f87f5b24c92925e5d6949383e739f44c7
|
e0d5629bb3d960b87b710e13b34d8f3be212d433
|
/University/Brexit Investigation/Scripts/Solutionv3.R
|
06d31e44e43aa1b2920520b8eb692d26dbc444da
|
[] |
no_license
|
michaelfilletti/myrepository
|
3af31a48a9977c6fdac8d60c33e671c74245b0f2
|
48d9cb7b8800d08b7f508f5b8663b3049d98029d
|
refs/heads/master
| 2021-07-15T05:39:12.034572
| 2020-06-08T08:48:53
| 2020-06-08T08:48:53
| 163,402,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,630
|
r
|
Solutionv3.R
|
#----------------------------------------------------------------------
#This script involves two main steps:
#Step 1: Extracts the data from the FOREX files & News files
#Step 2a: Cleaning and merging the ToM data and the currency data
#Step 2b: Testing whether weekends should be included
#----------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------------------------
#Installing relevant packages and setting the working directory
#--------------------------------------------------------------------------------------------------------------------------------------------
#NOTE: WHEN RUNNING ON UBUNTU 18.04
#Installing the following libraries may require XML2 and GSL
library(gtools) #Installed
library(chron) #Installed
library(ggplot2) #Installed
library(stringr) #Installed
library(data.table) #Installed
library(tidyr) #Installed
library(xts) #Installed - zoo package will be installed with it
library(tm) #Should install and call NLP with it
library(dplyr)
library(tidytext)
library(topicmodels)
#library(Quandl) #Required for the Quandl functions, however returning some issues on Ubuntu
#library(tidyverse) #Giving serious issues on my version of Ubuntu. Required to run complete() function.
#library(erer) #Is this needed?
#library(rprojroot) #Not needed
#library(here) #Not needed
#Setting the working directory to be that of the file
setwd(dirname(sys.frame(1)$ofile)) #Should work for Ubuntu
#----------------------------------------------------------------------
#1. Data Extraction
#----------------------------------------------------------------------
#Extract Financial Data
GBPEUR=read.csv('Data/GBPEUR')
EURUSD=read.csv('Data/EURUSD')
GBPUSD=read.csv('Data/GBPUSD')
XRPUSD=read.csv('Data/XRPUSD')
BTCUSD=read.csv('Data/BTCUSD')
LTCUSD=read.csv('Data/LTCUSD')
#Selecting relevant currency data
#Set the value for each dataset
GBPUSD = GBPUSD[,c("Date","Settle")]
colnames(GBPUSD)=c("Date","Value")
XRPUSD = XRPUSD[,c("Date","Mid")]
colnames(XRPUSD)=c("Date","Value")
BTCUSD = BTCUSD[,c("Date","Mid")]
colnames(BTCUSD)=c("Date","Value")
LTCUSD = LTCUSD[,c("Date","Mid")]
colnames(LTCUSD)=c("Date","Value")
#Extracting Brexit Data
#This data is already from 1 January onwards so we do not need to set it
BrexitData=read.csv("Data/BrexitNewsData.csv")
#Cleaning any further punctuation
BrexitData$Tokens=stringr::str_replace_all(BrexitData$Tokens,"[^a-zA-Z\\s]","") #Remove anything that is not a number or letter
#----------------------------------------------------------------------
#2a. Data Cleaning & Transforming
#----------------------------------------------------------------------
#Cleaning & Transforming News Article Data
#Count number of times Brexit is mentioned in each article
BrexitDataToM=BrexitData[BrexitData$Source=='Times of Malta',]
#Searching the number of times per row the word Brexit was mentioned
RawBrexitCounter=data.frame("Date"=BrexitDataToM$Date,"Count"=str_count(BrexitDataToM$Tokens, "brexit"))
#Grouping data and summing counts by date
DT <- as.data.table(RawBrexitCounter)
BrexitCounter =data.frame(DT[ , lapply(.SD, sum), by = "Date"])
#Ordering by date
BrexitCounter <- BrexitCounter[order(BrexitCounter$Date),]
#Converting to date type from string
BrexitCounter $Date=as.Date(BrexitCounter $Date)
#Filling empty dates (for days when we have no Brexit articles published)
BrexitCounter<-merge(data.frame(Date= as.Date(min(BrexitCounter $Date):max(BrexitCounter $Date),"1970-01-01")),
BrexitCounter, by = "Date", all = TRUE)
BrexitCounter[is.na(BrexitCounter)] <- 0
colnames(BrexitCounter)=c("Date","WordCount")
#Alternative to carry out the above is to make use of the tidyverse/dplyr package, however this has some trouble on Linux
#BrexitCounter = BrexitCounter %>% complete(Date = seq(Date[1], Sys.Date(), by = "1 day"),fill = list(Count = 0))
#Removing index (affected due to sorting)
rownames(BrexitCounter) <- NULL
#Cleaning & Transforming Financial Data (Function since we want to apply this to multiple currencies)
CurrencyDataClean=function(dataset){
#Order data by date
dataset <- dataset[order(dataset$Date),]
#Select data January 2016 onwards (when news articles first began to appear)
dataset <- dataset[as.Date(dataset $Date)>=as.Date('2016-01-01'),]
#Converting columns to date
#BrexitCounter$Date=as.Date(BrexitCounter$Date) #Is this necessary?
dataset$Date= as.Date(dataset$Date)
#Obtaining the number of articles per day
ArticleCountDF=data.frame(table(BrexitDataToM$Date))
colnames(ArticleCountDF)=c("Date","ArticleCount")
ArticleCountDF $Date =as.Date(ArticleCountDF $Date)
#Filling empty dates (for days when we have no Brexit articles published)
ArticleCountDF <-merge(data.frame(Date= as.Date(min(ArticleCountDF $Date):max(ArticleCountDF $Date),"1970-01-01")),
ArticleCountDF, by = "Date", all = TRUE)
ArticleCountDF[is.na(ArticleCountDF)] <- 0
#Alternative to carry out the above is to make use of the tidyverse/dplyr package, however this has some trouble on Linux
#ArticleCountDF = ArticleCountDF %>% complete(Var1 = seq(Var1[1], Sys.Date(), by = "1 day"),fill = list(Count = 0)) #Setting days with no articles to 0
#Merging the final data
FinalData=merge(x = merge(x = dataset, y = data.frame(BrexitCounter), by.x = "Date",by.y="Date"), y = data.frame(ArticleCountDF), by = "Date")
}
#FinalData=CurrencyDataClean(GBPEUR)
#----------------------------------------------------------------------
#2b. Should weekends be included?
#----------------------------------------------------------------------
#SCENARIO A - EXCLUDING WEEKENDS
#Using the command below we find the average number of times Brexit is mentioned during the weekends vs number of times mentioned on weekdays
wkndcount=mean(BrexitCounter $WordCount[is.weekend(BrexitCounter $Date)])
print('Average Times Brexit is mentioned during weekend')
print(wkndcount)
#SCENARIO B - INCLUDING WEEKENDS
#Using the command below we find the average number of times Brexit is mentioned during the weekends vs number of times mentioned on weekdays
weekcount=mean(BrexitCounter $WordCount[!is.weekend(BrexitCounter $Date)])
print('Average Times Brexit is mentioned during weekday')
print(weekcount)
#The results indicate that there are a fairly high amount of articles released over the weekend (similar to during the week). Since these will influence correlation (as whatever happens with the number of articles the rate will remain the same), we decide to use scenario A and EXCLUDE weekends.
|
127a409bfea645a414eef25270b65df6b0c9ccc2
|
25c63cd1d9ef691adfae8a1c6a1c752e1aef1aa2
|
/R/spectrum.arma.R
|
bf6e6e1e0bad5f09c6c798d234b937f8f69ef201
|
[] |
no_license
|
cran/afmtools
|
068da611bc7318d6160a0e251f84300867bff0b2
|
10801682a6c0419a23f28279e5fca8f288bd25f1
|
refs/heads/master
| 2020-12-30T10:36:50.591497
| 2011-04-15T00:00:00
| 2011-04-15T00:00:00
| 17,718,425
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
spectrum.arma.R
|
spectrum.arma <-
function(ar=0,ma=0,sd.innov=1)
{
if (is.na(ar[1])) ar=0
if (is.na(ma[1])) ma=0
nar=length(ar)
nma=length(ma)
if (nar == 1 && ar ==0) nar=0
if (nma == 1 && ma ==0) nma=0
M=check.parameters.arfima(d=0, ar=ar, ma=ma)
if (!M$Total.OK) cat("WARNING: Model is not OK.")
ar.poly <- c(1, -ar)
z.ar <- polyroot(ar.poly)
ma.poly <- c(1, ma)
z.ma <- polyroot(ma.poly)
Phi.z=as.function(polynomial(coef = ar.poly))
Theta.z=as.function(polynomial(coef = ma.poly))
k=function(lambda) Theta.z(exp(-1i*lambda))/Phi.z(exp(-1i*lambda))
if (nma == 0) k=function(lambda) 1/Phi.z(exp(-1i*lambda))
spec=function(lambda) (Mod(k(lambda))^2)*sd.innov^2/(2*pi)
invisible(spec)
}
|
5fa39dcd845d173c096aa4649b3c8de9c5155e0a
|
386abca575985e19b96c754d3b379b2b37840e03
|
/LongestFasta.r
|
64e6cd94e81d9a64547d1ba2da9d9895ac3ee0f7
|
[] |
no_license
|
ebuchbe/Elisa_Montse
|
76a0ed0ce3943df8220cebe077311b05732d4657
|
61503160b81addd5192e6b2dea5eb6662ab6b970
|
refs/heads/master
| 2021-01-18T17:04:27.166434
| 2015-08-05T14:01:00
| 2015-08-05T14:01:00
| 39,561,969
| 0
| 0
| null | 2015-07-23T10:50:23
| 2015-07-23T10:50:22
| null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
LongestFasta.r
|
#install.packages("seqinr") ###this package is important for handling fasta files
#library("seqinr") ###loading the fasta file e.g. haemo.fasta
input <- args[1] ##this is the input fasta file
fasta <- read.fasta(input) ##this reads the input fasta file
sequences <- NULL
len <- NULL
both <- NULL
result <- NULL
lengths <- NULL #this are all the empty vectors which are filled during the iterations
for (i in 1:length(fasta)){
sequences <- fasta[i] #takes the first, second,... sequence
len <- length(sequences[[1]]) #calculates the length of the sequence
both <- c(sequences,len) #puts together the sequence and the length of it
result <-c (result, both) #safes the sequences and the lengths together
lengths <- c(lengths, len)} #this creates a vector with the lengths of the sequences
maximum <- result[which.max(result[[2]])] #this gives me the longest sequence
longest <- max(lengths) #I get the max of the length vector
#print (result)
#print (longest)
#print (maximum)
print (c("the longest sequence is", names(maximum), "with a length of", longest, "nucleotides"))
|
11b63ed679d3df17941a0ab1cc2a8c9c2e294ce9
|
7b17bffea97d078734c2dbd05b11be7d0428b67b
|
/R/cumlativeMonthlyDownloadStats/registryConnection.R
|
62f77fb4108b2e72d22f5e9ba3e4f6cfbc19e9cd
|
[
"Apache-2.0"
] |
permissive
|
jhnwllr/analytics
|
d8f9d2d417fe0706e077b4112bf4bc653dde40e5
|
fad76d4b8bb3279616152cddf7594543b4598aeb
|
refs/heads/master
| 2021-06-13T21:16:27.979552
| 2020-11-17T08:43:21
| 2020-11-17T08:43:21
| 311,628,171
| 0
| 0
|
Apache-2.0
| 2020-11-10T10:49:47
| 2020-11-10T10:49:47
| null |
UTF-8
|
R
| false
| false
| 300
|
r
|
registryConnection.R
|
# connect to the registry with some defaults
registryConnection <- function(
pw,
user="jwaller",
host="pg1.gbif.org",
port="5432",
dbname="prod_b_registry") {
con <- dbConnect(
RPostgres::Postgres(),
host=host,
port=port,
dbname=dbname,
user=user,
password=pw)
return(con)
}
|
67eeed612fc4fd506dbb41075cc5698d3dea599f
|
6febf32c916d5734ff34df50369a0bc73b9178f2
|
/R/wrapper.prepare.grid.R
|
d8432699239d15620ceda54924d33e9236e51ff4
|
[] |
no_license
|
asiono/rein
|
e190efbb0579685608bd48a171504016b2d0b7fe
|
ff837b696edc083a6ecf0c9435f1d8825a9f0c5d
|
refs/heads/master
| 2021-05-12T07:21:37.391867
| 2018-03-08T13:48:05
| 2018-03-08T13:48:05
| 117,106,249
| 0
| 0
| null | 2018-01-12T09:40:12
| 2018-01-11T13:47:56
|
R
|
UTF-8
|
R
| false
| false
| 4,073
|
r
|
wrapper.prepare.grid.R
|
################################################################################
#' @title wrapper.prepare.grid
#' @description runs all necessary functions for preparing and execute loa flow calculation from SimTOOL.
#' @param grid List containing information of grid to be optimized.
#' @param check logical, to check if the structure of the grid allows optimization
#' @param U_set Grid's lower voltage level
#' @param oltc.trigger indication for OLTC transformator usage.
#' @param verbose Verbosity level. value greater than zero to display step by step of reinforcement
#' @return grid after load flow calculation
#' @export
################################################################################
wrapper.prepare.grid <- function(grid, check = F, U_set = NULL, oltc.trigger = F, verbose = 0){
# setting some probleme cases to NULL
grid$current <- NULL
grid$transm_power <- NULL
if (verbose > 0) print('################# processing function: check_reinforcement #################')
if (check) check_reinforcement(lines = grid$lines)
if (verbose > 0) print('################# processing function: replace_line_types #################')
grid$lines <- replace_line_types(lines = grid$lines, verbose = verbose)
if (verbose > 0) print('################# processing function: replace_trafo_types #################')
grid$lines <- replace_trafo_types(lines = grid$lines, verbose = verbose)
if (verbose > 0) print('################# processing function: convert.lines #################')
grid <- convert.lines(grid = grid, verbose = verbose )
if (verbose > 0) print('################# processing function: create.admittance #################')
grid <- create.admittance(grid = grid, verbose = verbose )
if (verbose > 0) print('################# processing function: create.power #################')
if (is.null(grid$S_cal)) {
names_actual <- rownames(grid$Y_red)
actual <- rep(0, length(names_actual))
names(actual) <- names_actual
warm = T
} else {
warm = F
#change the power into kilo-Watt
actual <- grid$S_cal*3/1000
}
grid <- create.power(grid, verbose = verbose, actual = actual)
if (verbose > 0) print('################# processing function: solve.LF #################')
#add the parallel lines into U_cal matrice for calculation
if (any(grepl('_p', grid$cal_node))) {
add_U_cal <- matrix(0:0, length(c(grid$cal_node[grepl('_p', grid$cal_node)])), 1,
dimnames = list(c(grid$cal_node[grepl('_p', grid$cal_node)])))
grid$U_cal <- rbind(grid$U_cal, add_U_cal)
}
grid <- solve.LF(grid = grid, warm = F , save = F, fast = F, verbose = verbose)
if (any(grepl('OLTC', grid$lines$model)) & oltc.trigger == T) {
#need to add trafo in the element because solve.LF in SimTOOL requires checking it
grid$lines$element[which(grid$lines$type == 'trafo')] <- as.character('trafo')
#create controller list
grid$ctr <- list()
#define controller entry
grid$ctr[[1]] <- list()
grid$ctr[[1]]$mode <- "OLTC"
#connection nodes
grid$ctr[[1]]$hv_node <- grid$lines$begin[which(grid$lines$type == 'trafo')]
grid$ctr[[1]]$lv_node <- grid$lines$end[which(grid$lines$type == 'trafo')]
grid$ctr[[1]]$ctr_node <- grid$lines$end[which(grid$lines$type == 'trafo')]
#tap settings
grid$ctr[[1]]$pos_taps <- 6 #voltage up regulation
grid$ctr[[1]]$neg_taps <- 6 #voltage down regulation
# pos_taps+neg taps + 1(0-tap) < [5,7,9]
grid$ctr[[1]]$curr_tap <- 0
grid$ctr[[1]]$tap_size <- 1.5 #percentual of U_set usual [1,5%, 2% or 2,5%]
#[0.8 ... 2.5%]according to: On-Load Tap-Changers for Power Transformers A Technical Digest, MR Publication
#lead voltage
grid$ctr[[1]]$U_set <- U_set
grid$ctr[[1]]$deadband <- 0.6 #percentual of U_set 0.6
grid$ctr[[1]]$U_min <- U_set*(1 - 0.08)
grid$ctr[[1]]$U_max <- U_set*(1 + 0.08)
grid$ctr[[1]]$verbose <- 2
grid <- solve.LF(grid = grid, meth = "G", ctr = c("OLTC"), warm = F, verbose = 0)
}
return(grid)
}
|
dee7dbc2318a30ff013eca438309a774bfb96005
|
ee32e45955352d97186d9cd4e329be90f78c11c3
|
/scripts/plotly_stateaves.R
|
ec291de691b214af97de377af980bc15f6263e1d
|
[
"MIT"
] |
permissive
|
r-introtodatascience/sample_repo
|
a276aebc232f550929cc990777585bfad6bc7fd1
|
bbb5a2a01ff4a54fa1ef0f5c2d68afba0a47aa35
|
refs/heads/master
| 2023-06-23T13:04:34.445954
| 2021-07-15T15:20:11
| 2021-07-15T15:20:11
| 386,334,418
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,232
|
r
|
plotly_stateaves.R
|
#Interactive plots of state unempl averages over time.
# clean workspace and environment
cat("\014")
rm(list=ls())
### Put in your own folder path here
workingdir<-#"Path_to_your_main_folder"
# for example for me it was workingdir<-"C:/Users/Documents/GitHub/Rcourse/"
#load in folder paths
source(paste0(workingdir, "workingdir.R"))
#load required packages
require(stringr)
require(dplyr)
require(ggplot2)
require(tidyr)
require(plotly)
##############################################################
#load in unemployment data
load(paste0(folder_processed_data, "/unemp_data.rda"))
county_data$state_abbr<-substr(county_data$County_name, str_length(county_data$County_name)-1, str_length(county_data$County_name))
county_data$state_abbr[county_data$State_fips=='11']<-"DC"
checkdup<-county_data %>% group_by(State_fips, state_abbr) %>% summarize(ave=mean(Unemp_rate))
state_ave<-county_data %>% group_by(state_abbr, Year) %>% summarise(ave_unemp=sum(Unemployed)/sum(Labor_force))
pal<-rainbow(52)
state_ave %>%
plot_ly(
x = ~Year,
y = ~ave_unemp,
color = ~state_abbr,
text = ~state_abbr,
hoverinfo = "text",
type = 'scatter',
mode = 'lines',
colors = ~pal,
line =list(width=1)
)
|
63481774cd5a9b980cb1922025e6cc1ba600beb9
|
1454621e71c58f41204543640b1a6a8069c0fec9
|
/code/phys_model/HO_parameters.R
|
83e5ac91811216c71a49ae5d7a21b71ab0236d37
|
[] |
no_license
|
SPATIAL-Lab/CombinedDynamicHOModel
|
a859f810b3b0a1ca6d0f4c8e80a4e51ed2993b35
|
20792b25e5296827846e0beb4e74cb138cf8e06e
|
refs/heads/master
| 2022-12-17T22:04:26.298227
| 2020-09-24T18:01:19
| 2020-09-24T18:01:19
| 238,896,060
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,706
|
r
|
HO_parameters.R
|
############################
####### HO PARAMETERS ######
############################
## used by phys/food model (HO) functions
# Functions to convert delta values into R values and vice versa # constants but needed here to convert d18Oo2 into ROo2
RstandardH = 0.00015575
RstandardO = 0.0020052
delta.to.R = function(delta, Rstandard) {
R.value = ((1000 + delta)/1000) * Rstandard
return(R.value)
}
R.to.delta = function(Rsample, Rstandard) {
delta.value = (Rsample/Rstandard - 1) * 1000
return(delta.value)
}
# Waste type; can be "urea" or "uric acid"
waste_type = "uric acid"
# Proportions of macronutrients in the diet
Pcarb = 0.57
Pprot = 0.33
Pfat = 1 - Pcarb - Pprot
# Body mass [g]
M = 40
# Body temperature [degrees C]
bTc = 37
# Body temperature [K]
bTk = bTc + 273.15 # calc from 1&2 but needed here to calc BMR
# Proportion of food mass that is in liquid water form
Pw = 0.6
# Constants for the calculation of the basal metabolic rate [W = J s^-1]
# Normalization constant for endotherms [W (g^(3 / 4))^-1]
b0 = 2.94 * 10^8
# Elevation coefficient
elc = 0.71
# Activation energy [eV]
E = 0.63
# Boltzmann's constant [eV K^-1]
k = 8.62 * 10^-5 # constant but needed here to calc BMR
# Constants for the conversion of the basal metabolic rate into field metabolic rate [mol O2 d^-1]
# Mean ratio of field metabolic rate to basal metabolic rate
FMR_BMR = 2.91
# Constants for the calculation of the flux of vapor water out [mol H2O d^-1]
# Coefficients for the allometric relationship between body mass [g] and evaporative water loss [mol H2O d^-1] - fit to Altman & Dittmer (1968) and Crawford & Lasiewski (1986) data (datafile: EWL_mammals_birds_Altman_Crawford.csv)
aEWL = 0.009
bEWL = 0.80
BMR = b0 * M^elc * exp(-E/(k*bTk)) # Basal metabolic rate based as a function of body size and temperature [W = J s^-1] # calc from 1&2 but needed here to calc O2 per prey
## constants but needed here to calc CF ##
Rqcarb = 6 / 6
Rqprot = if (waste_type == "urea") {5 / 6} else {14 / 21}
Rqfat = 16 / 23
Entcarb = 467.1
Entprot = 432.0
Entfat = 436.5
CFcarb = Rqcarb / Entcarb
CFprot = Rqprot / Entprot
CFfat = Rqfat / Entfat
CF = CFcarb*Pcarb + CFprot*Pprot + CFfat*Pfat # calc from 1&2 but needed here to calc BMR_O2
BMR_O2 = BMR * (60*60*24/1000) * CF # Basal metabolic rate expressed as [mol O2 d^-1] # calc from 1&2 but needed here to calc O2 per prey
FMR = (FMR_BMR * BMR_O2) # Field metabolic rate [mol O2 d^-1] # calc from 1&2 but needed here to calc O2 per prey
FMR = FMR *2 #multiply by 2 to rescale to data for songbirds
FMR = FMR / 24 #Scaled per hour
#FMR = FMR / 5 #scale for nighttime rate for simlicity
#(hunger growth = 1 at night)
Day = c(6:18)
Night = c(1,2,3,4,5,19,20,21,22,23,24)
# Proportion of body mass that is fat (reserves)
p_reserves = 0.15
# Amount of energy provided by each fat (triacylglycerol) molecule (reserve unit) [kJ g^-1]
energy_per_reserve_unit = 38
# Proportion of body mass that is water representing minimum preferred threshold
pTBW = 0.68
# H isotope fractionation associated with evaporative water loss
alphaHbw_vw = 0.937
# O isotope composition of atmospheric O2 [per mil]
d18Oo2 = 23.5
ROo2 = delta.to.R(d18Oo2, RstandardO)
# O isotope fractionation associated with the absorption of O2 in the lungs
alphaOatm_abs = 0.992
# O isotope fractionation associated with evaporative water loss
alphaObw_vw = 0.981 # value used in both Schoeller et al. and Kohn papers
# O isotope fractionation associated with the exhalation of CO2
alphaObw_CO2 = 1.038
# Proportions of carbohydrate and protein in a defatted prey sample in the lab
PLEpcarb = 0.50
PLEpprot = 0.50
# H isotopic offset between dietary (prey) carbohydrate and protein
offHpcarb_pprot = 40
# H isotopic offset between dietary (prey) protein and lipids
offHpprot_pfat = 53.42365
# O isotopic offset between dietary (prey) carbohydrate and protein
offOpcarb_pprot = 8.063528
# O isotopic offset between dietary (prey) protein and lipids
offOpprot_pfat = 6
# Proportion of keratin H routed from dietary protein
PfH = 0.60
# H isotope fractionation associated with the synthesis of keratin protein
alphaHprot = 1.002
# Proportion of keratin O routed from dietary protein
PfO = 0.19
# Proportion of follicle water derived from body water
Pbw = 0.81
# O isotope fractionation associated with carbonyl O-water interaction [per mil] - Tuned after accounting for O routing
epsOc_w = 10.8
alphaOc_w = (epsOc_w + 1000) / 1000
# Proportion of gut water derived from body water - Tuned after accounting for O routing
g1 = 0.56
# Proportion of gut water derived from drinking water - Tuned after accounting for O routing
g2 = 0.09
|
0a49a0c57435414f632ca353eaf99039afedb71d
|
77f862e8bc5d874799e5f775528b8d4d192fb127
|
/plot4.R
|
06ca926f8fbdb953b4a3bece086ac088ee66dd91
|
[] |
no_license
|
bolilla/ExData_Plotting1
|
38cf85629cee8f8620dd3ceff3a8881ee9febad4
|
24e204ad6f17b07d4c9ab3721a4026ecf1393d43
|
refs/heads/master
| 2020-02-26T16:47:02.847443
| 2015-11-08T22:08:24
| 2015-11-08T22:08:24
| 45,772,143
| 0
| 0
| null | 2015-11-08T08:05:05
| 2015-11-08T08:05:04
| null |
UTF-8
|
R
| false
| false
| 1,723
|
r
|
plot4.R
|
#Creates the FOURTH plot of the assignment
plot4 <- function(){
if(is.null(myData)){
myData <- getData4()
}
#Set 2 x2 matrix for plotting
old.par <-par(mfrow=c(2,2))
#Top Left graph
plot(myData$Global_active_power, type="l", ylab = "Global Active Power", xlab="", x=myData$theDate)
#Top right graph
plot(myData$Voltage,type="l", ylab = "Voltage", xlab="datetime", x=myData$theDate)
#Bottom Left Graph
plot(myData$Sub_metering_1,type="l", ylab = "Energy sub metering", xlab="", x=myData$theDate, ylim=c(0,max(myData$Sub_metering_1, myData$Sub_metering_2,myData$Sub_metering_3)))
lines(myData$Sub_metering_2, x=myData$theDate,type = "l", col="red")
lines(myData$Sub_metering_3, x=myData$theDate,type = "l", col="blue")
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, col = c("black", "red", "blue"),lwd=1, bty="n")
#Bottom Right Graph
plot(myData$Global_reactive_power,type="l", ylab = "Global_reactive_power", xlab="datetime", x=myData$theDate)
par(old.par)
}
#Gets a zip file from the internet, unzips it, loads one of the inner files and filters it given two dates
getData4 <- function(fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", dataFile = "household_power_consumption.txt", begin = "1/2/2007", end = "2/2/2007"){
temp <- tempfile()
download.file(fileUrl,temp)
data <- read.csv(unz(temp, dataFile),sep=";",na.strings = "?")
data <- data[data$Date %in% c(begin, end),]
#Creates column "theDate" as a Date column
data$theDate <- strptime(paste(data$Date, data$Time, sep=" "),"%d/%m/%Y %H:%M:%S")
unlink(temp)
data
}
png("plot4.png", bg=NA)
plot4()
dev.off()
|
d9fd1b8cf00f911cd3874b08db71cdda2a1ccf91
|
c1034056ae08c728f8318a24eb756292108ca727
|
/R/ipinfodb.R
|
79b667081d8b353710fe74d11c17254a42ecef7f
|
[] |
no_license
|
omegahat/Ripinfo
|
9776eef10b1db7fa281e94b07871448c6a4f3306
|
9e99414618a3d67097151b9a961a8fa6cfce4edd
|
refs/heads/master
| 2021-01-22T23:43:34.308508
| 2012-02-22T01:54:38
| 2012-02-22T01:54:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,737
|
r
|
ipinfodb.R
|
getIPCountry =
#
# getIPCountry(c("www.omegahat.org", "128.32.135.25", "www.google.com"))
# getIPCountry(c("www.omegahat.org", "www.google.com"))
# getIPCountry(c("169.237.46.32", "128.32.135.26"))
#
function(ip, ..., curl = getCurlHandle(), stringsAsFactors = default.stringsAsFactors(),
byHostName = grepl("[a-z]", ip))
{
ifinfoQuery(ip, curl = curl, stringsAsFactors = stringsAsFactors, FALSE, byHostName = byHostName,
c( "http://ipinfodb.com/ip_query2_country.php", "http://ipinfodb.com/ip_query_country.php"), ...)
}
getIPLocation =
#
# getIPLocation(c("www.omegahat.org", "128.32.135.25", "www.google.com"))
# getIPLocation(c("www.omegahat.org", "www.google.com"))
# getIPLocation(c("169.237.46.32", "128.32.135.26"))
#
function(ip, ..., curl = getCurlHandle(), stringsAsFactors = default.stringsAsFactors(),
byHostName = grepl("[a-z]", ip))
{
x = ifinfoQuery(ip, curl = curl, stringsAsFactors = stringsAsFactors, FALSE, byHostName = byHostName,
urls = c( "http://ipinfodb.com/ip_query2.php", "http://ipinfodb.com/ip_query.php"), ...)
x[c("Latitude", "Longitude")] = lapply(x[c("Latitude", "Longitude")], function(x) as.numeric(as.character(x)))
x
}
ifinfoQuery =
#
# This is the common worker function.
# It splits the ips into host names and dotted-quad addresses and works on these as two separate groups.
# It breaks the ips into groups of 25 or fewer and makes the requests for each block in separate requests
# and merges the results back.
function(ip, curl = getCurlHandle(), stringsAsFactors = default.stringsAsFactors(),
multi.part = FALSE, byHostName = grepl("[a-z]", ip), urls, ...)
{
# figure out which ip addresses are given as addresses and which are hostnames.
# If they are not homogeneous, we have to use different query URLs to get the results
# for the different groups.
if(length(byHostName) > 1 && length(table(byHostName)) > 1) {
h = ifinfoQuery(ip[byHostName], ..., curl = curl, stringsAsFactors = FALSE, multi.part = TRUE, byHostName = TRUE, urls = urls)
i = getIPCountry(ip[!byHostName], ..., curl = curl, stringsAsFactors = FALSE, multi.part = TRUE, byHostName = FALSE, urls = urls)
ans = rbind(h, i)
return(if(!stringsAsFactors)
as.data.frame(lapply(ans, as.character), stringsAsFactors = FALSE)
else
ans)
}
multi = length(ip) > 1
# limit of 25 per call so group the ip values into groups of 25 or less.
if(multi && length(ip) > 25) {
vals = lapply(makeGroups(ip), ifinfoQuery, curl = curl, stringsAsFactors = FALSE, multi.part = TRUE, urls = urls, byHostName = byHostName)
ans = as.data.frame(do.call(rbind, vals),
row.names = 1:length(ip),
stringsAsFactors = stringsAsFactors)
return(ans)
}
u = if(multi || byHostName)
urls[1]
else
urls[2]
txt = getForm(u, ip = paste(ip, collapse = ","), timezone = "false", curl = curl, ...)
doc = xmlParse(txt, asText = TRUE)
r = xmlRoot(doc)
if(multi) {
ans = xmlApply(r, function(x) xmlSApply(x, xmlValue))
m = do.call(rbind, ans)
if(multi.part)
m
else
data.frame(m, stringsAsFactors = stringsAsFactors, row.names = 1:length(ip))
} else {
as.data.frame(xmlApply(r, xmlValue))
}
}
makeGroups =
#
# Uses to split collection of ips into groups of 25.
#
function(ip)
{
n = length(ip)
num = n%/%25
ans = split(ip, gl(num + 1, 25)[1:n])
ans[sapply(ans, length) > 0]
}
|
283414128cf5564e361616bbf925f5c2596d6130
|
15b0e0513e6206bb50c8fe6be4abbb16793eb560
|
/man/surgecapacity.Rd
|
e06b345237ff4afecd037e47104c8beef4d2bbb3
|
[
"MIT"
] |
permissive
|
terminological/arear
|
2566759ae77342a79a28b2a0d1cb389da337194b
|
98cece8079c4470e029c966c6b6d103797207eba
|
refs/heads/main
| 2023-06-07T16:02:46.919877
| 2023-05-26T14:49:17
| 2023-05-26T14:49:17
| 340,773,310
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,368
|
rd
|
surgecapacity.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{surgecapacity}
\alias{surgecapacity}
\title{Locations of UK general medical hospitals in mid march 2020 with estimates of beds available and maximal surge capacity HDU beds}
\format{
A sf geometry with:
\describe{
\item{nation}{England, Wales, etc...}
\item{hospitalId}{An id for the hospital}
\item{sector}{NHS or independent}
\item{hospitalName}{the hospital name}
\item{pcds}{the UK postcode of the hospital}
\item{trustId}{the NHS trust or local health board of the hospital}
\item{trustName}{the NHS trust or local health board name}
\item{tier1}{indicator of the role of the hospital as an acure provider}
\item{hduBeds}{the number of hdu beds the hospital could have provided at maximum surge in March 2020}
\item{acuteBeds}{the number of acute beds the hospital could have provided at maximum surge in March 2020}
}
}
\usage{
surgecapacity
}
\description{
This was manually assembled and curated from various sources in mid march 2020 as the NHS geared up to provide
additional capacity to cope with the surge in COVID cases. It is not an up to date picture of NHS capacity. It
does not include mental health or community hospitals. The surge capacity seems to have been calculated quite differently
in Scotland.
}
\keyword{datasets}
|
30f34bfe726f5cf257bf38fb064e508e52bf6da7
|
5ca8793fd39a818675306047c861e8c32965022a
|
/website/Old_source/Function/R/ff_harmonics.R
|
1a9390ed51da45392ce9c117fb84ead7a34fc063
|
[] |
no_license
|
SOCR/TCIU
|
a0dfac068670fa63703b8e9a48236883ec167e06
|
85076ae775a32d89676679cfa6050e683da44d1d
|
refs/heads/master
| 2023-03-09T01:28:28.366831
| 2023-02-27T20:27:26
| 2023-02-27T20:27:26
| 188,899,192
| 8
| 5
| null | 2022-11-12T01:56:25
| 2019-05-27T19:33:38
|
R
|
UTF-8
|
R
| false
| false
| 1,948
|
r
|
ff_harmonics.R
|
#' @title ff_harmonics
#' @description Compute the spectral decomposition of an array (harmonics)
#' @details This function computes the FT of the singlal and plots the first few harmonics
#'
#' @param x Original signal (1D, 2D, or 3D array).
#' @param n Number of first harmonics to report (integer).
#' @param up Upsamping rate (default=10).
#' @param plot Boolean indicating whether to print the harmonics plot(default==TRUE).
#' @param add whether to overplot the harmonics on an existing graph (default=FALSE),
#' @param main Title for the plot.
#' @return A plot and a dataframe with the sampled harmonics and their corresponding FT magnitudes/amplitudes.
#' @examples
#' ff_harmonics(x = y, n = 12L, up = 100L, col = 2L, lwd=3, cex=2)
#'
#' @author SOCR team <http://socr.umich.edu/people/>
#' @export
#'
ff_harmonics = function(x=NULL, n=NULL, up=10L, plot=TRUE, add=F, main=NULL, ...) {
# The discrete Fourier transformation
dff = fft(x)
# time
t = seq(from = 1, to = length(x))
# Upsampled time
nt = seq(from = 1, to = length(x)+1-1/up, by = 1/up)
#New spectrum
ndff = array(data = 0, dim = c(length(nt), 1L))
ndff[1] = dff[1] # mean, DC component
if(n != 0){
ndff[2:(n+1)] = dff[2:(n+1)] # positive frequencies come first
ndff[length(ndff):(length(ndff) - n + 1)] = dff[length(x):(length(x) - n + 1)] # negative frequencies
}
# Invert the FT
indff = fft(ndff/length(y), inverse = TRUE)
idff = fft(dff/length(y), inverse = TRUE)
if(plot){
if(!add){
plot(x = t, y = x, pch = 16L, xlab = "Time", ylab = "Measurement",
col = rgb(red = 0.5, green = 0.5, blue = 0.5, alpha = 0.5),
main = ifelse(is.null(main), paste(n, "harmonics"), main))
lines(y = Mod(idff), x = t, col = adjustcolor(1L, alpha = 0.5))
}
lines(y = Mod(indff), x = nt, ...)
}
ret = data.frame(time = nt, y = Mod(indff))
return(ret)
}
|
ab2a4550117ed0488ea6c52ca236f4c2cd56fed8
|
fb0b8c413ae95c961e0351eccb22263a2d0917dd
|
/man/is_sorted.Rd
|
62df5786abdfa51c53c0c4f30ae34effbda8931e
|
[] |
no_license
|
cran/hutilscpp
|
f3acfdf2af949df69c8887d937050aa3daf39a02
|
22994140414c52919756eb799ddd10ed4d666f74
|
refs/heads/master
| 2022-10-14T17:45:09.954044
| 2022-10-07T07:00:02
| 2022-10-07T07:00:02
| 168,961,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 769
|
rd
|
is_sorted.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is_sorted.R
\name{is_sorted}
\alias{is_sorted}
\alias{isntSorted}
\title{Is a vector sorted?}
\usage{
is_sorted(x, asc = NA)
isntSorted(x, asc = NA)
}
\arguments{
\item{x}{An atomic vector.}
\item{asc}{Single logical. If \code{NA}, the default, a vector is considered
sorted if it is either sorted ascending or sorted descending;
if \code{FALSE}, a vector is sorted only if sorted descending;
if \code{TRUE}, a vector is sorted only if sorted ascending.}
}
\value{
\code{is_sorted} returns \code{TRUE} or \code{FALSE}
\code{isntSorted} returns \code{0} if sorted or the first position
that proves the vector is not sorted
}
\description{
Is a vector sorted?
}
|
4255ac13e9ddd4257e53e0d74629f31c438c23b6
|
58ae28f35ed0796476c25a95b15f7464fe91c8e1
|
/R/nflcombine.R
|
f0069a6bdc0dd686262adb546dc55209236a5a5d
|
[] |
no_license
|
burrisk/midamix
|
761ef51c19cefe4cf000cfac1961610b006213fb
|
7f869de706edea2afbf57ff1897eefe92025d51f
|
refs/heads/master
| 2020-05-14T20:46:52.920481
| 2019-05-24T13:34:04
| 2019-05-24T13:34:04
| 181,950,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,094
|
r
|
nflcombine.R
|
#' National Football League Scouting Combine Data
#'
#' A compilation of drill results all players who attended the NFL
#' scouting combine from 1987 to 2019. Some of the measurements come from the player's
#' pro day.
#'
#' @format A data frame with 10502 rows and 13 variables:
#' \describe{
#' \item{year}{year}
#' \item{name}{player name}
#' \item{college}{university the player attended}
#' \item{position}{primary position}
#' \item{height}{height of the player, in inches}
#' \item{weight}{weight of the player, in pounds}
#' \item{wonderlic}{score on the wonderlic intelligence test, ranging from 0-50}
#' \item{forty_yard_dash}{time taken to run the forty yard dash, in seconds}
#' \item{bench_press}{number of 225 lb repetitions player could bench press}
#' \item{vertical_jump}{height of maximum vertical jump, in inches}
#' \item{broad_jump}{length of maximum player broad jump, in inches}
#' \item{shuttle}{time taken to run the twenty yard shuttle, in seconds}
#' \item{three_cone}{time taken to run the three-cone drill, in seconds}
#' }
"nfl_combine"
|
69577894d3698b74389d5f9626f0a814740cd8ef
|
709c16710d7cae612de6c779cafb7199813e0f24
|
/Study 1 - Treatment Pathways/R Version/MainAnalysis.R
|
6643ae1527485ae9454fdb5b03e8102e034517a7
|
[] |
no_license
|
OHDSI/StudyProtocols
|
87a17fc3c00488b350f9416c584a1d0334d8dfcb
|
8de0454c6be4c120ba97d7376907d651327573a4
|
refs/heads/master
| 2023-04-27T18:59:35.785026
| 2020-02-16T00:32:52
| 2020-02-16T00:32:52
| 27,415,586
| 37
| 41
| null | 2023-04-25T19:55:45
| 2014-12-02T04:49:53
|
R
|
UTF-8
|
R
| false
| false
| 3,584
|
r
|
MainAnalysis.R
|
###########################################################
# R script for creating SQL files (and sending the SQL #
# commands to the server) for the treatment pattern #
# studies for these diseases: #
# - Hypertension (HTN) #
# - Type 2 Diabetes (T2DM) #
# - Depression #
# #
# Requires: R and Java 1.6 or higher #
###########################################################
# Install necessary packages if needed
install.packages("devtools")
library(devtools)
install_github("ohdsi/SqlRender")
install_github("ohdsi/DatabaseConnector")
# Load libraries
library(SqlRender)
library(DatabaseConnector)
###########################################################
# Parameters: Please change these to the correct values: #
###########################################################
folder = "F:/Documents/OHDSI/StudyProtocols/Study 1 - Treatment Pathways/R Version" # Folder containing the R and SQL files, use forward slashes
minCellCount = 1 # the smallest allowable cell count, 1 means all counts are allowed
cdmSchema = "cdm_schema"
resultsSchema = "resuts_schema"
sourceName = "source_name"
dbms = "sql server" # Should be "sql server", "oracle", "postgresql" or "redshift"
# If you want to use R to run the SQL and extract the results tables, please create a connectionDetails
# object. See ?createConnectionDetails for details on how to configure for your DBMS.
user <- NULL
pw <- NULL
server <- "server_name"
port <- NULL
connectionDetails <- createConnectionDetails(dbms=dbms,
server=server,
user=user,
password=pw,
schema=cdmSchema,
port=port)
###########################################################
# End of parameters. Make no changes after this #
###########################################################
setwd(folder)
source("HelperFunctions.R")
# Create the parameterized SQL files:
htnSqlFile <- renderStudySpecificSql("HTN",minCellCount,cdmSchema,resultsSchema,sourceName,dbms)
t2dmSqlFile <- renderStudySpecificSql("T2DM",minCellCount,cdmSchema,resultsSchema,sourceName,dbms)
depSqlFile <- renderStudySpecificSql("Depression",minCellCount,cdmSchema,resultsSchema,sourceName,dbms)
# Execute the SQL:
conn <- connect(connectionDetails)
executeSql(conn,readSql(htnSqlFile))
executeSql(conn,readSql(t2dmSqlFile))
executeSql(conn,readSql(depSqlFile))
# Extract tables to CSV files:
extractAndWriteToFile(conn, "summary", resultsSchema, sourceName, "HTN", dbms)
extractAndWriteToFile(conn, "person_cnt", resultsSchema, sourceName, "HTN", dbms)
extractAndWriteToFile(conn, "seq_cnt", resultsSchema, sourceName, "HTN", dbms)
extractAndWriteToFile(conn, "summary", resultsSchema, sourceName, "T2DM", dbms)
extractAndWriteToFile(conn, "person_cnt", resultsSchema, sourceName, "T2DM", dbms)
extractAndWriteToFile(conn, "seq_cnt", resultsSchema, sourceName, "T2DM", dbms)
extractAndWriteToFile(conn, "summary", resultsSchema, sourceName, "Depression", dbms)
extractAndWriteToFile(conn, "person_cnt", resultsSchema, sourceName, "Depression", dbms)
extractAndWriteToFile(conn, "seq_cnt", resultsSchema, sourceName, "Depression", dbms)
dbDisconnect(conn)
|
329a4073fad8e2b2eedcb840da467facadcbd407
|
9a1b28b43abec0215c24261d1d4412ea201e24fc
|
/man/kurtz2013e2bld.Rd
|
e306a927e82bbc77a03180d0a02ffb7128c23802
|
[] |
no_license
|
ajwills72/sixproblems
|
5c824666387f39b5a733f679a30db06f969baf4c
|
3b850a8d7989ef6805dcfaf62e15449a1dd4a3ea
|
refs/heads/master
| 2021-07-07T22:49:49.251549
| 2020-07-01T15:08:31
| 2020-07-01T15:08:31
| 131,574,504
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,321
|
rd
|
kurtz2013e2bld.Rd
|
\name{kurtz2013e2bld}
\alias{kurtz2013e2bld}
\docType{data}
\title{
Kurtz et al. (2013, Exp. 2) Type II versus Type IV data set
}
\description{
Block-level indidivdual-participant data for the Kurtz et al. (2013) comparison of Type
II and Type IV problems.
}
\usage{data("kurtz2013e2bld")}
\format{
A data frame with 2584 observations on the following 4 variables.
\describe{
\item{\code{type}}{Problem type: 2 or 4.}
\item{\code{subj}}{Unique ID number for subject.}
\item{\code{block}}{Block number: 1-8; each block contains 8
trials.}
\item{\code{acc}}{The participant's probability of a correct
response in that block, range: 0-1.}
}
}
\details{
These are the block-level individual-participant data for the Kurtz et al. (2013,
Experiment 2) comparison of problem types II and IV. A total of 322
students from Binghamton University completed the experment (133 for
the Type II problem and 189 for the Type IV problem). For further details of this
experiment, see Kurtz et al. (2013).
FORMAT NOTES: Participants were trained to criterion, the problem
terminating when the participant had completed two consecutive
blocks without error. However, the analyses reported in Kurtz
et al. (1994) assume that participants who met the criterion would
have made no further errors had they continued for the full 8
blocks. In order to faciltate the reproduction of such analyses, this
dataset explicitly represents those assumed post-criterion
blocks.
SOURCE NOTES: These data were reported in Kurtz et
al. (2013). Schlegelmilch subsequently requested and received a
digital copy of the individual block-level data from Kurtz. Wills
verified that these data reproduced Figure 3 of Kurtz et
al. (2013). Wills then pre-processed the data into the long-file data
format presented here.
}
\source{
Kurtz, K.J., Levering, K.R., Stanton, R.D., Romero, J. and Morris,
S.N. (2013). Human learning of elemental category structures: Revising
the classic result of Shepard, Hovland, and Jenkins
(1961). \emph{Journal of Experimental Psychology: Learning, Memory,
and Cognition, 39}, 552-572.
}
\examples{
data(kurtz2013e2bld)
library(tidyverse)
kurtz2013e2bld \%>\% group_by(type, block) \%>\% summarise(mean(acc))
}
\keyword{datasets}
|
83d142faf4aca6c58cda90fc43fa38758083d977
|
51a57d4a937f3e6d7621d61c1caa7dbd741fb8f7
|
/Regression/code/feature_engineering_black.R
|
bb8be5bca863654a6ec85adba3cbc936e2bd7672
|
[] |
no_license
|
SiriusIE/2019_Advanced
|
163e1ad47bbb457fb43c291538d245703251979c
|
629e013b60768670400115c9c94a2d75a6a81159
|
refs/heads/master
| 2020-04-25T23:38:18.736181
| 2019-06-06T16:09:03
| 2019-06-06T16:09:03
| 173,152,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,836
|
r
|
feature_engineering_black.R
|
source('Regression/code/load_libraries.R')
raw_data<-fread('Datasets/Regression/BlackFriday.csv', stringsAsFactors = T)
str(raw_data)
raw_data[, length(User_ID)]
raw_data[, length(unique(User_ID))]
raw_data[, which(sapply(names(raw_data),function(x) grep(pattern='ID',x))>0):= NULL]
str(raw_data)
raw_data[, Occupation:=as.factor(Occupation)]
# Objective: predict the purchase amount based on user features
str(raw_data)
data_proc<-copy(raw_data)
str(data_proc)
# we create a function that turns discrete variables into factors
data_proc[,sapply(data_proc,function(x) length(unique(x)))]
data_proc[,which(sapply(data_proc,function(x) length(unique(x))<200)):=lapply(.SD, as.factor),
.SDcols=sapply(data_proc,function(x) length(unique(x))<200)]
str(data_proc)
data_proc[ , which(sapply(data_proc, is.integer)):=lapply(.SD,as.numeric),
.SDcols = sapply(data_proc, is.integer)]
str(data_proc)
data_proc[, ggplot(data_proc, aes(x=Purchase))+geom_histogram()]
data_proc[, ggplot(data_proc, aes(x=log(Purchase)))+geom_histogram()]
# NA to level
data_proc[, which(sapply(data_proc, is.factor)):=lapply(.SD, as.character),
.SDcols=sapply(data_proc, is.factor)]
data_proc[, which(sapply(data_proc, is.character)):=lapply(.SD, function(x) ifelse(is.na(x),"__",x)),
.SDcols=sapply(data_proc, is.character)]
data_proc[, which(sapply(data_proc, is.character)):=lapply(.SD, as.factor),
.SDcols=sapply(data_proc, is.character)]
str(data_proc)
# We analyze the counting per Occupation:
sort(summary(data_proc$Occupation), dec=T)/nrow(data_proc)
p<-ggplot(data_proc, aes(x=Occupation))+geom_bar(stat='count')+
theme(axis.text.x = element_text(angle=45))
p
ggplotly(p)
# lets re-order the factor levels of Occupation in decreasing order
data_proc[, Occupation:=factor(Occupation, levels=names(sort(summary(data_proc$Occupation), dec=T)))]
levels(data_proc$Occupation)
ggplotly(p)
# We will create a label that will agregate into "others" those Occupationrs with less than 3% of share
niche_Occupation<-names(which(summary(data_proc$Occupation)/nrow(data_proc)<0.01))
niche_Occupation
data_proc[, Occupation_agg:=as.factor(ifelse(Occupation%in%niche_Occupation,'others',as.character(Occupation)))]
summary(data_proc$Occupation)/nrow(data_proc)
summary(data_proc$Occupation_agg)/nrow(data_proc)
sum(summary(data_proc$Occupation_agg)/nrow(data_proc))
data_proc[, length(levels(Occupation_agg))]
data_proc[, length(levels(Occupation))]
data_proc[, length(levels(Occupation_agg))/length(levels(Occupation))-1] # important reduction in factor cathegories
data_proc[, Occupation_agg:=factor(Occupation_agg, levels=names(sort(summary(data_proc$Occupation_agg), dec=T)))]
p<-ggplot(data_proc, aes(x=Occupation_agg))+geom_bar(stat='count')+
theme(axis.text.x = element_text(angle=45))
ggplotly(p)
# we drop off the former Occupation variable
data_proc[, Occupation:=NULL]
str(data_proc)
# same with Product Category 1
niche_Product_Category_1s<-names(which(summary(data_proc$Product_Category_1)/nrow(data_proc)<0.01))
niche_Product_Category_1s
data_proc[, Product_Category_1_agg:=as.factor(ifelse(Product_Category_1%in%niche_Product_Category_1s,'others',as.character(Product_Category_1)))]
summary(data_proc$Product_Category_1_agg)
data_proc[, Product_Category_1:=NULL]
# same with Product Category 2
niche_Product_Category_2s<-names(which(summary(data_proc$Product_Category_2)/nrow(data_proc)<0.01))
niche_Product_Category_2s
data_proc[, Product_Category_2_agg:=as.factor(ifelse(Product_Category_2%in%niche_Product_Category_2s,'others',as.character(Product_Category_2)))]
summary(data_proc$Product_Category_2_agg)
data_proc[, Product_Category_2:=NULL]
# same with Product Category 3
niche_Product_Category_3s<-names(which(summary(data_proc$Product_Category_3)/nrow(data_proc)<0.01))
niche_Product_Category_3s
data_proc[, Product_Category_3_agg:=as.factor(ifelse(Product_Category_3%in%niche_Product_Category_3s,'others',as.character(Product_Category_3)))]
summary(data_proc$Product_Category_3_agg)
data_proc[, Product_Category_3:=NULL]
str(data_proc)
#### summary
str(data_proc) # ...just numeric & factor variables
sum(sapply(data_proc, is.numeric))
sum(sapply(data_proc, is.factor))
#### NA treatment
sum(is.na(data_proc))
# we do nothing
#### We check if any numeric variable has null variance
numeric_variables<-names(data_proc)[sapply(data_proc, is.numeric)]
# calculating sd and CV for every numeric variable
sd_numeric_variables<-sapply(data_proc[,numeric_variables, with=F], sd)
sd_numeric_variables
cv_numeric_variables<-sd_numeric_variables/colMeans(data_proc[,numeric_variables, with=F])
cv_numeric_variables
# allright!!!
# Now lets check the number of categories per factor variable
factor_variables<-names(data_proc)[sapply(data_proc, is.factor)]
count_factor_variables<-sapply(data_proc[,factor_variables, with=F], summary)
count_factor_variables
# lets define a rule... if a label weight less than 10% goes into the "others" bag:
f_other<-function(var,p){
count_levels<-summary(var)/length(var)
to_bag<-names(which(count_levels<p))
reduced_var<-as.factor(ifelse(as.character(var)%in%to_bag,'others',as.character(var)))
return(reduced_var)
}
# and we apply the function to our factor variables
data_proc[, (factor_variables):=lapply(.SD, f_other,p=0.01), .SDcols=factor_variables]
sapply(data_proc[,factor_variables, with=F], summary)
str(data_proc)
# Binary encoding our factor variables (needed for most algos)
data_ready<-caret::dummyVars(formula= ~., data = data_proc, fullRank=T,sep = "_")
data_ready<-data.table(predict(data_ready, newdata = data_proc))
names(data_ready)<-gsub('-','_',names(data_ready))
setnames(data_ready,"Stay_In_Current_City_Years_4+","Stay_In_Current_City_Years_4")
setnames(data_ready,"Age_55+","Age_55")
str(data_proc)
str(data_ready)
sum(is.na(data_ready))
fwrite(data_ready, 'Datasets/Regression/data_black_ready.csv', row.names = F)
# data partition for individual project
data_ready<-fread('Datasets/Regression/data_house_ready.csv')
source('/Users/ssobrinou/IE/Advanced/2019_Advanced/Regression/code/f_partition.R')
whole_data<-f_partition(df=fread('/Users/ssobrinou/IE/Advanced/2019_Advanced/Datasets/Regression/kc_house_data.csv'),
test_proportion = 0.2,
seed = 872367823)
plot(whole_data$test$price)
lapply(whole_data, dim)
fwrite(whole_data)
# geo-analysis
library(leaflet)
m<-leaflet(data=raw_data)%>%addTiles()%>%addCircleMarkers(lat=~lat, lng=~long,
radius=0.5,
color = 'gray',
opacity = 0.25,label = ~price)
print(m)
|
9b430febc2ca9ab595ba45642fad54442c943bcf
|
98c40fe72bfe9caafc3db5ca0a2c0944cad33988
|
/man/after.envelope.Rd
|
e6b9c9d6f55cce75eac69e224636bb571255bde0
|
[] |
no_license
|
datawookie/emayili
|
c91d38dc5bf0fc38cff45260dc0ba99bce7e754f
|
cb0f2f7e6c8738a30ddd88834abd88b63585244c
|
refs/heads/master
| 2023-09-01T00:02:36.105432
| 2023-08-30T10:49:01
| 2023-08-30T10:49:01
| 187,310,940
| 158
| 41
| null | 2023-08-03T06:52:34
| 2019-05-18T03:45:55
|
R
|
UTF-8
|
R
| false
| true
| 334
|
rd
|
after.envelope.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/envelope.R
\name{after.envelope}
\alias{after.envelope}
\title{Append children to message}
\usage{
\method{after}{envelope}(x, child)
}
\arguments{
\item{x}{Message object}
\item{child}{A child to be appended}
}
\description{
Append children to message
}
|
1f545de8ac0f4165375b775cea3f9d534c8b1ee3
|
395daa1ec2a5f403dda8d65cfa88d5280afe2665
|
/R/varkernelslicerange.R
|
0a6273c320f2a6402d6e366521e0be782645c8d5
|
[] |
no_license
|
CWWhitney/uncertainty
|
4f0cdc86b453e5fbe48767d34d8db435c6979b63
|
0e6a25ba719a59f3104fd8c716d2aff3923f79be
|
refs/heads/main
| 2023-04-07T22:04:54.398851
| 2022-06-13T20:52:49
| 2022-06-13T20:52:49
| 374,667,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,767
|
r
|
varkernelslicerange.R
|
#' Estimated output variable values given the expected values of the influencing variable,
#' based on a slice of 'z' from the Kernel density plot of the influencing variable and output
#' variable data.
#'
#' Plot representing probabilities (shown along the y-axis) for the expected outcome variable (shown along the x-axis).
#' This is a broad slice through the density kernel from uncertainty::varkernel() function, which integrates to 1, the probability values are relative, not absolute measures.
#'
#' @param in_var is a vector of observations of a given influencing variable corresponding to another list with observed values of an outcome variable {out_var}.
#' @param out_var is a vector of observed values of an outcome variable corresponding to another list with observations of a given influencing variable {in_var}.
#' @param max_in_var is a value of the highest expected amount of a given influencing variable {in_var} for which the outcome variable {out_var} should be estimated (must be > {min_in_var}).
#' @param min_in_var is a value of the lowest expected amount of {in_var} for which the outcome variable {out_var} should be estimated (must be < {max_in_var}).
#' @param xlab_vars is the x axis title that describes the two variables being associated
#'
#' @importFrom MASS kde2d
#' @importFrom stats complete.cases
#' @importFrom graphics filled.contour
#' @importFrom graphics plot
#' @importFrom assertthat validate_that
#' @importFrom assertthat see_if
#'
#' @keywords kernel density influence
#'
#' @examples
#' variable <- sample(x = 1:50, size = 20, replace = TRUE)
#' outcome <- sample(x = 1000:5000, size = 20, replace = TRUE)
#' varkernelslicerange(variable, outcome, 10, 20,
#' xlab_vars = "Dist. of outcome given influence variable range")
#'
#' @export varkernelslicerange
varkernelslicerange <- function(in_var,
out_var,
min_in_var,
max_in_var,
xlab_vars = "Outcome variable dist. given influence variable") {
if (!requireNamespace("ggplot2", quietly = TRUE)) {
stop("Package \"ggplot2\" needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("stats", quietly = TRUE)) {
stop("Package \"stats\" needed for this function to work. Please install it.",
call. = FALSE)
}
# Setting the variables to NULL first, appeasing R CMD check
in_outdata <- in_out <- NULL
#add error stops with validate_that
assertthat::validate_that(length(in_var) == length(out_var), msg = "\"in_var\" and \"out_var\" are not equal lengths.")
assertthat::validate_that(is.numeric(in_var), msg = "\"in_var\" is not numeric.")
assertthat::validate_that(is.numeric(min_in_var), msg = "\"min_in_var\" is not numeric.")
assertthat::validate_that(is.numeric(max_in_var), msg = "\"max_in_var\" is not numeric.")
assertthat::validate_that(is.numeric(out_var), msg = "\"out_var\" is not numeric.")
#check that the min_in_var argument is consistent with the values in the kernel density
# assertthat::validate_that(min(in_outkernel$x)> min_in_var, msg = "\"min_in_var\" value is too high.")
#create subset-able data
in_out <- as.data.frame(cbind(in_var, out_var))
## Use 'complete.cases' from stats to get to the collection of obs without NA
in_outdata <- in_out[stats::complete.cases(in_out), ]
#message about complete cases
assertthat::see_if(length(in_out) == length(in_outdata), msg = "Rows with NA were removed.")
#compare length of in_out and in_outdata and print 'you lost 'x' cases
#### kernel density estimation ####
## create a density surface with kde2d with n_runs grid points
in_outkernel <- MASS::kde2d(x = in_outdata$in_var,
y = in_outdata$out_var,
n = 100)
## Cut through density kernel and averaging over a range of x-values (x = variable)
# sets the boundaries of in_var values over which to average
lbound <- which(in_outkernel$x == min(in_outkernel$x[which(in_outkernel$x > min_in_var)]))
rbound <- which(in_outkernel$x == max(in_outkernel$x[which(in_outkernel$x <= max_in_var)]))
graphics::plot(x = in_outkernel$y,
y = rowMeans(in_outkernel$z[, lbound : rbound]),
type = "l", col = "seagreen", lwd = 2,
xlab = paste(xlab_vars, as.character(min_in_var), "to",
as.character(max_in_var)),
ylab = "Relative probability")
#for print we need x for max(in_outkernel$z[, lbound : rbound])
print("Relative probability (y) of the outcome variable for the given values of the influencing variable (x).")
}
|
34f21e06a4ca78a56da8b15d0cf425c65c4882e7
|
3e4b9bafef96b533a6def6c963a4e4bb2897ced9
|
/ukbiobank_prs/phenotype/organize_ukbb/af.R
|
bddc8a9c904de038c1f83de2dc2baccfc851ea72
|
[] |
no_license
|
EmadHassanin/combining_prs_gps
|
28e36b692449eae5ef3da01435f34c6fc157c14e
|
ff9041e829ffd01da38f26042975f3daaf72f457
|
refs/heads/main
| 2023-02-01T05:17:03.945028
| 2020-12-15T08:03:47
| 2020-12-15T08:03:47
| 321,477,204
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,440
|
r
|
af.R
|
af_all <- self_reported %>% filter(code_selfreported == 1471 |
code_selfreported == 1483) %>%
mutate(age_selfreported = if_else(age_selfreported < 0 , mean(age_selfreported), age_selfreported )) %>%
full_join(icd9 %>% filter(str_detect(code_icd9 , "^(4273)")),
by=c("eid", "new")) %>%
full_join(icd10 %>% filter(str_detect(code_icd10 , "^(I48[0-9])")),
by=c("eid", "new")) %>%
full_join(opcs4 %>% filter(str_detect(code_opcs4 ,
"^(K571|K62[1-4])")),
by=c("eid", "new")) %>%
mutate(age = case_when(
!is.na(code_icd10) ~ age_icd10,
!is.na(code_opcs4) ~ age_opcs4,
!is.na(code_selfreported) ~ age_selfreported,
!is.na(code_icd9) ~ age_icd9
)) %>%
mutate(type = case_when(
!is.na(code_icd10) ~ "icd10",
!is.na(code_opcs4) ~ "opcs4",
!is.na(code_selfreported) ~ "selfreported",
!is.na(code_icd9) ~ "icd9")) %>%
group_by(eid) %>%
slice(which.min(age)) %>%
ungroup() %>%
#mutate(age =age_selfreported) %>%
select(eid,code_icd10,code_icd9,code_opcs4,code_selfreported,age,type) %>%
mutate(pheno = 1)
af_final <-
ukbb_df %>% select(
eid,sex_f31_0_0, ethnic_background_f21000_0_0,
matches("f22009"), sex_chromosome_aneuploidy_f22019_0_0,
outliers_for_heterozygosity_or_missing_rate_f22027_0_0,
genetic_sex_f22001_0_0,
age_when_attended_assessment_centre_f21003_0_0) %>%
inner_join(af_all) %>%
filter(ethnic_background_f21000_0_0 == "British") %>%
filter(outliers_for_heterozygosity_or_missing_rate_f22027_0_0 != "Yes" |
is.na(outliers_for_heterozygosity_or_missing_rate_f22027_0_0)) %>%
filter(sex_chromosome_aneuploidy_f22019_0_0 != "Yes" |
is.na(sex_chromosome_aneuploidy_f22019_0_0)) %>%
mutate( sex_check= case_when(
sex_f31_0_0 == genetic_sex_f22001_0_0 ~ TRUE,
TRUE ~ FALSE
)) %>%
filter(sex_check == TRUE) %>%
select(-sex_check) %>%
mutate( prev_inc = case_when(
age_when_attended_assessment_centre_f21003_0_0 >= age ~ "prevalent",
age_when_attended_assessment_centre_f21003_0_0 < age ~ "incident",
TRUE ~ "unknown"
)) %>%
select(
eid, age, ethnic_background_f21000_0_0, type,age_when_attended_assessment_centre_f21003_0_0,
pheno,prev_inc,sex_f31_0_0, matches("f22009"))
|
0eae03597be86c66f35b1704caf007afa314785d
|
68058bd8c023c42962a86584c939f9c32b7a69a4
|
/simulation.R
|
5556e64aedcd7e068b4c2b933f7d6ef61684eaf4
|
[] |
no_license
|
vinayakpathak/gp
|
ea1f64490cfb5c4295876dd970bc49a428106411
|
da568d2d0806d97d467794c8d803d762132564e3
|
refs/heads/master
| 2020-03-10T03:20:09.292527
| 2018-04-22T03:19:26
| 2018-04-22T03:19:26
| 129,162,023
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
simulation.R
|
w_values <- c(1,1,1,1,1,1,1,1,1,
1,1,1,1,1,-1,-1,-1,-1,
1,1,1,-1,-1,-1,1,1,1)
D <- 1
M <- 9
mu <- rep(0, M)
N <- 308
sigma <- 0.1
#W <- matrix(w_values, M, D)
W <- matrix(rnorm(M * D), M, D)
data_list <- list(D = D, M = M, N = N, sigma = sigma, W = W, mu = mu)
fit <- stan(file = "simulation.stan", data = data_list, cores = 1, chains = 2, iter = 2000)
fit_data <- as.data.frame(fit)
ys <- c('y[1]', 'y[2]', 'y[3]', 'y[4]', 'y[5]', 'y[6]', 'y[7]', 'y[8]', 'y[9]')
xs <- c(0, 3, 6, 9, 12, 15, 18, 21, 24)
clr <- rgb(0,0,0,alpha = 0.03)
clr1 <- rgb(1,0,0)
plot(xs, fit_data[1, ys], "l", ylim = c(-10, 10), col=clr1)
for (i in seq(1, nrow(fit_data))) {lines(xs, fit_data[i, ys], col=clr)}
|
08fe39f0321aa37d7df63ccf812d584715bc17a1
|
c839af452b6ec822ef520e743a1a4bee6ed99e60
|
/R/get_nt_urban_aboriginal_pop.R
|
4ba2a2afd9d6a7a1dab5ef099a4593750e1c5f27
|
[
"MIT"
] |
permissive
|
goldingn/contact_matrices_australia
|
48c6b0945f5dc59b720325d706f91d6bf6ebe442
|
237bda4a083721801b91d63a197e86f71b98feb2
|
refs/heads/main
| 2023-09-05T01:39:48.329749
| 2021-11-05T08:14:43
| 2021-11-05T08:14:43
| 408,035,456
| 0
| 0
|
MIT
| 2021-11-08T09:02:14
| 2021-09-19T04:59:20
|
R
|
UTF-8
|
R
| false
| false
| 459
|
r
|
get_nt_urban_aboriginal_pop.R
|
#' .. content for \description{} (no empty lines) ..
#'
#' .. content for \details{} ..
#'
#' @title
#' @return
#' @author Nick Golding
#' @export
get_nt_urban_aboriginal_pop <- function() {
# aboriginal population in urban alice springs
population <- get_nt_lhd_aboriginal_pop() %>%
filter(
district == "Alice Springs Urban"
) %>%
group_by(
lower.age.limit
) %>%
summarise(
population = sum(population)
)
}
|
3812729da10139c5ff154168cd6d18db4f5e8569
|
61daa4c3bb5bc77e89cc9ab3a5424647e732d538
|
/inst/virtual_patient_simulator/bs4/calc_change.R
|
77c711a6ecd2347c39d152972d16699fe0bc9214
|
[] |
no_license
|
DivadNojnarg/CaPO4Sim
|
7a3afafd188b9db1e9929f514ef6a7c46808d5d1
|
8bd3d12702e159c95817c7a724af6824a0cd1517
|
refs/heads/master
| 2022-11-10T10:52:03.637983
| 2022-11-02T13:01:22
| 2022-11-02T13:01:22
| 99,161,393
| 43
| 31
| null | 2020-07-16T05:30:18
| 2017-08-02T21:13:53
|
R
|
UTF-8
|
R
| false
| false
| 5,461
|
r
|
calc_change.R
|
#-------------------------------------------------------------------------
# This code contains the function that calculates the percentage of
# change of each flux, which is then needed to change the color of arrows
# depending on the resulting variation. (see global.R for color change)
#
# David Granjon, the Interface Group, Zurich
# July 10th, 2017
#-------------------------------------------------------------------------
calc_change <- function(out, t_target) {
# change for Ca and PO4 fluxes
# numbers represent the base-case value
# t target is the time at which to compute calc_change
Abs_int_change <- 0.5*((out[t_target,"Abs_int_Ca"] - 9.829864e-04)/9.829864e-04*100 +
(out[t_target,"Abs_int_PO4"] - 8.233724e-04)/8.233724e-04*100)
U_Ca_change <- (out[t_target,"U_Ca"] - 3.907788e-05)/3.907788e-05*100
U_PO4_change <- (out[t_target,"U_PO4"] - 3.969683e-04)/3.969683e-04*100
Res_change <- 0.5*((out[t_target,"Res_Ca"] - 3.921871e-04)/3.921871e-04*100 +
(out[t_target,"Res_PO4"] - 1.176561e-04)/1.176561e-04*100)
Ac_Ca_change <- (out[t_target,"Ac_Ca"] - 1.009965e-03)/1.009965e-03*100
Ac_PO4_change <- (out[t_target,"Ac_PO4"] - 2.178550e-04)/2.178550e-04*100
Reabs_Ca_change <- (out[t_target,"Reabs_Ca"] - 2.592522e-03)/2.592522e-03*100
Reabs_PO4_change <- (out[t_target,"Reabs_PO4"] - 4.606232e-03)/4.606232e-03*100
Net_Ca_pf_change <- ((out[t_target,"Ca_pf"] - out[t_target,"Ca_fp"]) -
(5.306840e-03 - 4.296942e-03))/(5.306840e-03 - 4.296942e-03)*100
Net_PO4_pf_change <- (round((out[t_target,"PO4_pf"] - out[t_target,"PO4_fp"]) -
(1.995840e-01 - 1.993571e-01),4))/(1.995840e-01 - 1.993571e-01)*100
# need to round since the order or magnitude of the difference is 1e-7
Net_PO4_pc_change <- (round((out[t_target,"PO4_pc"] - out[t_target,"PO4_cp"]) -
(2.772000e-03 - 2.771900e-03),6))/(2.772000e-03 - 2.771900e-03)*100
# change for PTH fluxes
PTHg_synth_change <- (out[t_target,"PTHg_synth"] - 54.02698)/54.02698*100
PTHg_synth_D3_change <- (out[t_target,"PTHg_synth_D3"] - 0.68025)/0.68025*100
PTHg_synth_PO4_change <- (out[t_target,"PTHg_synth_PO4"] - 0.18945)/0.18945*100
PTHg_exo_CaSR_change <- (out[t_target,"PTHg_exo_CaSR"] - 0.00693)/0.00693*100
PTHg_deg_change <- (out[t_target,"PTHg_deg"] - 45.086650)/45.086650*100
PTHg_exo_change <- (out[t_target,"PTHg_exo"] - 8.936505)/8.936505*100
PTHp_deg_change <- (out[t_target,"PTHp_deg"] - 8.931000)/8.931000*100
# Changes for PTH contribution in the proximal tubule
Reabs_PT_change <- (out[t_target, "Reabs_PT_PTH"] - 0.0098)/0.0098*100
# changes for PTH and CaSR contribution in TAL
Reabs_TAL_CaSR_change <- (out[t_target, "Reabs_TAL_CaSR"] - 0.0104)/0.0104*100
Reabs_TAL_PTH_change <- (out[t_target, "Reabs_TAL_PTH"] - 0.00465)/0.00465*100
# changes for PTH and D3 contributions in DCT
Reabs_DCT_PTH_change <- (out[t_target, "Reabs_DCT_PTH"] - 0.00417)/0.00417*100
Reabs_DCT_D3_change <- (out[t_target, "Reabs_DCT_D3"] - 0.00108)/0.00108*100
# change for intest Ca reabs due to D3
Abs_int_D3_change <- (out[t_target, "Abs_int_D3"] - 0.000433)/0.000433*100
# change for Ca resorption due to PTH and D3
Res_PTH_change <- (out[t_target, "Res_PTH"] - 0.0000669)/0.0000669*100
Res_D3_change <- (out[t_target, "Res_D3"] - 0.000225)/0.000225*100
# Change for PO4 reabsorption due to PTH and FGF23
Reabs_PT_PO4_PTH_change <- (out[t_target, "Reabs_PT_PO4_PTH"] - 0.09952)/0.09952*100
Reabs_PT_PO4_FGF_change <- (out[t_target, "Reabs_PT_PO4_FGF"] - 0.14124)/0.14124*100
df <- data.frame(
Abs_int_change = Abs_int_change,
U_Ca_change = U_Ca_change,
U_PO4_change = U_PO4_change,
Res_change = Res_change,
Ac_Ca_change = Ac_Ca_change, # 5
Ac_PO4_change = Ac_PO4_change,
Reabs_Ca_change = Reabs_Ca_change,
Reabs_PO4_change = Reabs_PO4_change,
Net_Ca_pf_change = Net_Ca_pf_change,
Net_PO4_pf_change = Net_PO4_pf_change, # 10
Net_PO4_pc_change = Net_PO4_pc_change,
PTHg_synth_change = PTHg_synth_change,
PTHg_synth_D3_change = PTHg_synth_D3_change,
PTHg_synth_PO4_change = PTHg_synth_PO4_change,
PTHg_exo_CaSR_change = PTHg_exo_CaSR_change, # 15
PTHg_deg_change = PTHg_deg_change,
PTHg_exo_change = PTHg_exo_change,
PTHp_deg_change = PTHp_deg_change,
Reabs_PT_change = Reabs_PT_change,
Reabs_TAL_CaSR_change = Reabs_TAL_CaSR_change, # 20
Reabs_TAL_PTH_change = Reabs_TAL_PTH_change,
Reabs_DCT_PTH_change = Reabs_DCT_PTH_change,
Reabs_DCT_D3_change = Reabs_DCT_D3_change,
Abs_int_D3_change = Abs_int_D3_change,
Res_PTH_change = Res_PTH_change, # 25
Res_D3_change = Res_D3_change,
Reabs_PT_PO4_PTH_change = Reabs_PT_PO4_PTH_change,
Reabs_PT_PO4_FGF_change = Reabs_PT_PO4_FGF_change, # 28
stringsAsFactors = FALSE
)
}
# Uncomment if need to set new base case values
# c(out()[1,"Abs_int_Ca"],
# out()[1,"Abs_int_PO4"],
# out()[1,"U_Ca"],
# out()[1,"U_PO4"],
# out()[1,"Res_Ca"],
# out()[1,"Res_PO4"],
# out()[1,"Ac_Ca"],
# out()[1,"Ac_PO4"],
# out()[1,"Reabs_Ca"],
# out()[1,"Reabs_PO4"],
# out()[1,"Ca_pf"],
# out()[1,"PO4_pf"],
# out()[1,"Ca_fp"],
# out()[1,"PO4_fp"],
# out()[1,"PO4_pc"],
# out()[1,"PO4_cp"],
# out()[1,"PTHg_synth"],
# out()[1,"PTHg_deg"],
# out()[1,"PTHg_exo"],
# out()[1,"PTHp_deg"])
|
7ed17dfbb69db13b5cd4d14dfa208d6889abe6c8
|
d514d43ef0958a662bb685d89a6c9ac541b44390
|
/man/importLRR_BAF.Rd
|
9e229a9d85e52d8a6c7a9ef3ee7d1fbabb48840a
|
[] |
no_license
|
inambioinfo/CNVRanger
|
e133a885213f7ecea57ad6e0437c016df1ff3f25
|
dbb80477bf2d681d604d6e7cfffe4389d55ddd5b
|
refs/heads/master
| 2020-04-16T07:36:13.024957
| 2019-01-11T01:46:33
| 2019-01-11T01:46:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,216
|
rd
|
importLRR_BAF.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pheno_assoc.R
\name{importLRR_BAF}
\alias{importLRR_BAF}
\title{Import LRR and BAF from text files used in the CNV analysis}
\usage{
importLRR_BAF(all.paths, path.files, list.of.files, verbose = TRUE)
}
\arguments{
\item{all.paths}{Object returned from \code{CreateFolderTree} function with
the working folder tree}
\item{path.files}{Folder containing the input CNV files used for the CNV
calling (i.e. one text file with 5 collumns for each sample). Columns should
contain (i) probe name, (ii) Chromosome, (iii) Position, (iv) LRR, and (v) BAF.}
\item{list.of.files}{Data-frame with two columns where the (i) is the file
name with signals and (ii) is the correspondent name of the sample in the gds file}
\item{verbose}{Print the samples while importing}
}
\description{
This function imports the LRR/BAF values and create a node for each one in
the GDS file at the working folder 'Inputs' created by the
\code{\link{setupCnvGWAS}} function. Once imported, the LRR values can be
used to perform a GWAS directly as an alternative to copy number dosage
}
\examples{
# Load phenotype-CNV information
data.dir <- system.file("extdata", package="CNVRanger")
phen.loc <- file.path(data.dir, "Pheno.txt")
cnv.out.loc <- file.path(data.dir, "CNVOut.txt")
map.loc <- file.path(data.dir, "MapPenn.txt")
phen.info <- setupCnvGWAS('Example', phen.loc, cnv.out.loc, map.loc)
# Extract path names
all.paths <- phen.info$all.paths
# List files to import LRR/BAF
list.of.files <- list.files(path=data.dir, pattern="cnv.txt.adjusted$")
list.of.files <- as.data.frame(list.of.files)
colnames(list.of.files)[1] <- "file.names"
list.of.files$sample.names <- sub(".cnv.txt.adjusted$", "", list.of.files$file.names)
# All missing samples will have LRR = '0' and BAF = '0.5' in all SNPs listed in the GDS file
importLRR_BAF(all.paths, data.dir, list.of.files)
# Read the GDS to check if the LRR/BAF nodes were added
cnv.gds <- file.path(all.paths[1], 'CNV.gds')
genofile <- SNPRelate::snpgdsOpen(cnv.gds, allow.fork=TRUE, readonly=FALSE)
SNPRelate::snpgdsClose(genofile)
}
\author{
Vinicius Henrique da Silva <vinicius.dasilva@wur.nl>
}
|
a2e24e8ca9e11b1e88564429d43d0d4716d2d36d
|
7835fe6cd53ce1781f9f146e1b6ba65dfdbf7f3c
|
/cachematrix.R
|
cd2f818aeef8a7cf93eb6bc95ccb0e2aba8aad82
|
[] |
no_license
|
JBrown0303/ProgrammingAssignment2
|
7cf2cba09e7f59b0619087dc13a58185a3ee1bb7
|
0ae363d4b328126551159603a5c6fdd363a58053
|
refs/heads/master
| 2020-04-01T15:49:43.075529
| 2018-10-16T21:54:10
| 2018-10-16T21:54:10
| 153,353,437
| 0
| 0
| null | 2018-10-16T20:59:24
| 2018-10-16T20:59:24
| null |
UTF-8
|
R
| false
| false
| 1,364
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Function that sets and returns a chaced matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
#initialize the inverse as Null
i <- NULL
#create function for caching the input matrix
set <- function(y) {
x <<- y ## stores input matrix y as x
i <<- NULL ## reset the inverse to NULL
}
#create function for returning the cached matrix
get <- function() x
#create function for caching the inverse
setInverse <- function(inverse) i <<- inverse
#create function for returning the cached inverse
getInverse <- function() i
#return a list of the created sub-functions
list(set = set, get = get, setInverse = setInverse,
getInverse = getInverse)
}
## Returns the inverse of the matrix. the inverse is cached after the
## first calculation function loads inverse from cach after it has
## been calculated
cacheSolve <- function(x, ...) {
#pull the cached value of 'i'
i <- x$getInverse()
#check if cached value is NULL. If it is not, return the cached
#value
if(!is.null(i)){
message("getting cached data")
return(i)
}
#if there is no value for the inverse cached, pull the cached matrix
data <- x$get()
#then calculate the inverse
i <- solve(data, ...)
#then cache the inverse
x$setInverse(i)
#return the inverse
i
}
|
b6b53b0572acbb04111b2538b4aa6df179844420
|
1b622a48cf7c8853d67605cb6501aea6ef4df678
|
/IntroductionToR/Day 5/ExercisesDay5DataAnalysis2019.r
|
01015d2edd9e429e425b3af4187ef9db247f6d11
|
[] |
no_license
|
ThibaultSchowing/unistuff
|
0ab6e1c3ec27dda631b0399a1cfca351e959b7fc
|
380506b30a69df6c82f639064b3d5700fe90de14
|
refs/heads/master
| 2020-09-08T19:43:24.176721
| 2020-05-14T21:59:24
| 2020-05-14T21:59:24
| 221,225,865
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,799
|
r
|
ExercisesDay5DataAnalysis2019.r
|
# -------------------------------------------------------------------
# R course 2019
# Exercizes day 5
# -------------------------------------------------------------------
# Exercize 5.1 -------------------------------------------------------------------
#
# 1) Calculate summary statistics for the data set "StudentData2016.txt".
# Calculate summary statistics for females and males separatly.
df = read.table("StudentData2016.txt", header = T, na.strings = "?")
summary(df)
summary(df[df$Sex=="M",])
summary(df[df$Sex=="W",])
#df$Weight <- as.numeric(as.character(df$Weight))
#summary(df)
#summary(df[which(df$Sex=='W'),])
#summary(df[which(df$Sex=='M'),])
# Exercize 5.2 --------------------------------------------------------------------
# 1) Plot a histogram of student body weight and add a normal distribution with mean and variance
# estimated from the observation
# remove NA
dfna <- na.omit(df)
# plot
hist(dfna$Weight, breaks = seq(min(dfna$Weight), max(dfna$Weight), 1), xlab = "Weight", main = "Weight distribution")
# solution of Escofier with density distribution instead of frequency
# we need the density if we superimpose the density distribution over the graph
hist(df$Weight, n=25, xlim = c(40,110), freq = F)
x=seq(40, 100, length.out = 100)
y=dnorm(x, mean=mean(df$Weight, na.rm=T), sd = sd(df$Weight, na.rm = T))
lines(x, y, col="blue", lwd=2)
# Alternative way of ploting
#plot(function(x) dnorm(x, mean=mean(df$Weight, na.rm=T), sd = sd(df$Weight, na.rm = T)))
weights <- df$Weight
heights <- df$Height
# 2) Compare the distribution of weight to a normal distribution with a QQ-plot.
qqnorm(weights, main="QQPlot of students weight")
qqline(weights)
# others / ignore
qqnorm(heights, main="QQPlot of students heights")
qqline(heights)
hist(heights, freq = F)
xseq = seq(150, 200, 1)
lines(xseq, dnorm(xseq, mean=mean(heights), sd=sd(heights)))
abline(v=mean(heights), col = "blue", lwd = 3)
# Exercise 5.3. ----------------------------------------------------------------------
# Here we want to check that the CI contains the true parameter value in 95% of times,
# if we repeated the sampling procedure 100 times
# Imagine that the height of students follow a Normal distribution with mean 170 and standard deviation 10 (cm)
# 5.3.1. Simulate a random sample of 20 students using function rnorm() and save it into a variable
spldst <- rnorm(20, mean = 170, sd = 70)
# 5.3.2. Compute the 95% CI of the simulated sample using the t.test() function and save it into a variable.
ttest <- t.test(spldst, df$Height, paired = FALSE)
# the output of t.test() function is a list.
# use str() function to check the structure of the variable where you saved the output of t.test().
str(ttest)
# you can get the confidence intervals with "var_name$conf.int"
# 5.3.3. save the 95% CI into a vector of size 2, using function as.numeric()
CI <- c(as.numeric(ttest$conf.int[1]), as.numeric(ttest$conf.int[2]))
# 5.3.4. make a function that does steps 2 to 3, i.e. a function that gets as input a sample and
# that outputs the 95% CI
# also it generates a normal distribution
asdf <- function(smpl){
spldst <- rnorm(20, mean = 170, sd = 70)
ttest <- t.test(spldst, smpl, paired = FALSE)
print(c(as.numeric(ttest$conf.int[1]), as.numeric(ttest$conf.int[2])))
}
asdf(df$Height)
# 5.3.5. Perform a for loop, generating 1000 random samples of 20 students from a
# normal distribution with mean 170 and standard deviation 10.
# For each random sample compute the 95% CI and the mean.
# Save the 95% CI of each sample into a matrix with 2 columns and 1000 rows.
# Save the mean of each sample into a vector with 1000 elements.
# How many times does the 95% CI includes the true value of the mean?
# what is the true mean? what is the distribution of the mean of the samples?
cim <- matrix(nrow = 1000, ncol = 2)
mc <- c()
# True data mean
dfmean <- mean(df$Height)
meanInCI <- 0
for(i in 0:1000){
# Generate 20 random students weight
students <- rnorm(20, mean = 170, sd = 10)
mean <- mean(students)
ttest <- t.test(students)
#print(ttest)
low <- as.numeric(ttest$conf.int[1])
cim[i,1] <- low
hi <- as.numeric(ttest$conf.int[2])
cim[i,2] <- hi
mc <- c(mc, mean)
#cat("Low: ", low, "\tHi: ", hi, "\tMean", mean, "\n")
# count if the mean is in the CI
if(dfmean > low & dfmean < hi){
meanInCI = meanInCI + 1
}
}
#Number of time true mean is in CI
print(meanInCI)
hist(mc, freq = F, main = "Random sample of Students Heights")
plot(density(mc))
x=seq(150, 180, length.out = 100)
######COOOOOL
# On doit diviser par la sqrt(20) car la distribution variance densité moyenne etc trop chaud
y=dnorm(x, mean=mean(df$Height, na.rm=T), sd = sd(df$Height, na.rm = T)/sqrt(20))
lines(x, y, col="blue", lwd=2)
ysim = dnorm(x, mean=170, sd=10/sqrt(20))
lines(x, ysim, col="red", lwd=2)
# Exercize 5.4 --------------------------------------------------------------------
#
# 5.4.1 Calculate the 95% CIs for the heights of the StudentData2016.txt females and males separatly.
# What does the result suggest?
# 5.4.2 Directly test that the heights of males and females are different by means of a t-test
df2 <- read.table("StudentData2016.txt", header = TRUE, na.strings = "?")
ttmale <- t.test(df2$Height[df2$Sex=="M"])
ttfemale <- t.test(df2$Height[df2$Sex=="W"])
print(ttmale$conf.int)
print(ttfemale$conf.int)
# To formally test
t.test(df2$Height[df2$Sex=="M"],df2$Height[df2$Sex=="W"])
# Ho dingue: mens are taller
# Exercize 5.5 -------------------------------------------------------------------------
#
# Create a contingency table showing the frequency distiribution of the variables
# "Sex" and "Smoking". Use the chi square test to test if the two variables are independent.
# Hint 1: If you pass a contingency table to the function "chisq.test, Pearson's chi-squared test is
# performed with the null hypothesis that the joint distribution of the cell counts in a 2-dimensional
my.table <- table(df2$Sex, df2$Smoking);my.table
prop.table(my.table)
chisq.test(my.table)
# contingency table is the product of the row and column marginals.
# Hint 2: Two random variables X and Y are independent if P (X = x and Y = y) = P(X = x) P(Y = y).
#p-value is < 0.05 -> smaller than 0.05 -> smoking habits are different.
# Warnings: some of the expected entries are smaller than 5 -> small sample
# Exercize 5.6 --------------------------------------------------------------------
#
# 5.6.1 Make a QQ-plot to compare the distributions of weights and heights in 2016. What does the plot tell you?
weights <- df2$Weight
heights <- df2$Height
qqp=qqplot(weights, heights)
relm=lm(qqp$y~qqp$x)
abline(relm, lwd=2, col="blue")
plot(weights, heights, main="Height vs Weight")
my.lm <- lm(heights~weights)
abline(my.lm$coefficients[1], my.lm$coefficients[2])
#NOT WHAT IS ASKED
qqnorm(weights, main="QQPlot of students weight")
qqline(weights)
qqnorm(heights, main="QQPlot of students height")
qqline(heights)
# 5.6.2 Now plot the line that goes through the qqplot points. What does it tell you?
# Note: This cannot be done using qqline, which only applies to qqnorm.
# 5.6.3 Further check the relationship between the two vaiables by plotting
# the data against each other and overlay a regression line obtained using the lm function
#Exercise 5.7 --------------------------------------------------------
#
# Follow the smoking habits in years 2003, 2014 and 2016. What do you see?
df2003 <- read.table("StatWiSo2003.txt", header = T, na.strings = "?")
df2014 <- read.csv("StudentData2014.txt", header = T, na.strings = "-")
df2016 <- read.csv("StudentData2016.txt", header = T, na.strings = "?")
smoke2003 <- table(df2003$Rauchen)/length(df2003$Rauchen)
|
3ac86e0a7014305ac6dac63c2e40fe69284bdc50
|
50284f0424b73a2fabe8705809c3fa018867ccd6
|
/R/is.evenodd.R
|
f5f9d9e6121ae76207e3700e2f7ce0347bbd7c6e
|
[] |
no_license
|
InfProbSciX/PeRLib
|
2756830a8b897f3fad94dc417b2eb8eeafb64b8c
|
9f68dc8c903950de2b2025fbaa79de6ae3a21b90
|
refs/heads/master
| 2020-03-22T08:21:55.349877
| 2018-07-04T20:11:32
| 2018-07-04T20:11:32
| 139,762,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 853
|
r
|
is.evenodd.R
|
#' Check if a vector of numbers is Even
#'
#' \code{is.even} Returns a vector of TRUE/FALSEs by applying x%2 to a vector x.
#'
#' @param x A numeric vector.
#' @param n An integer specifying if a number of even numbers should be returned.
#' @examples
#' is.even(1:5)
#'
#' ## FALSE TRUE FALSE TRUE FALSE
#' @export
is.even <- function(x, n = FALSE){
if(!n){
x %% 2 == 0
}else{
x[perlib::is.even(x)]
}
}
#' Check if a vector of numbers is Odd
#'
#' \code{is.odd} Returns a vector of TRUE/FALSEs by applying x%2 to a vector x.
#'
#' @param x A numeric vector.
#' @param n An integer specifying if a number of odd numbers should be returned.
#' @examples
#' is.odd(1:5)
#'
#' ## TRUE FALSE TRUE FALSE TRUE
#' @export
is.odd <- function(x, n = FALSE){
if(!n){
x %% 2 != 0
}else{
x[perlib::is.odd(x)]
}
}
|
2145b8cf7f0dd99884b599f502fafecc8e7d9b84
|
1e36964d5de4f8e472be681bad39fa0475d91491
|
/man/SDMXDimension.Rd
|
9087a8882d4dab0425e39d9cad346b8a4f6d54c0
|
[] |
no_license
|
cran/rsdmx
|
ea299980a1e9e72c547b2cca9496b613dcf0d37f
|
d6ee966a0a94c5cfa242a58137676a512dce8762
|
refs/heads/master
| 2023-09-01T03:53:25.208357
| 2023-08-28T13:00:02
| 2023-08-28T13:30:55
| 23,386,192
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,884
|
rd
|
SDMXDimension.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class-SDMXDimension.R,
% R/SDMXDimension-methods.R
\docType{class}
\name{SDMXDimension}
\alias{SDMXDimension}
\alias{SDMXDimension-class}
\alias{SDMXDimension,SDMXDimension-method}
\title{Class "SDMXDimension"}
\usage{
SDMXDimension(xmlObj, namespaces)
}
\arguments{
\item{xmlObj}{object of class "XMLInternalDocument derived from XML package}
\item{namespaces}{object of class "data.frame" given the list of namespace URIs}
}
\value{
an object of class "SDMXDimension"
}
\description{
A basic class to handle a SDMX Dimension
}
\section{Slots}{
\describe{
\item{\code{conceptRef}}{Object of class "character" giving the dimension conceptRef (required)}
\item{\code{conceptVersion}}{Object of class "character" giving the dimension concept version}
\item{\code{conceptAgency}}{Object of class "character" giving the dimension concept agency}
\item{\code{conceptSchemeRef}}{Object of class "character" giving the dimension conceptScheme ref}
\item{\code{conceptSchemeAgency}}{Object of class "character" giving the dimension conceptScheme agency}
\item{\code{codelist}}{Object of class "character" giving the codelist ref name}
\item{\code{codelistVersion}}{Object of class "character" giving the codelist ref version}
\item{\code{codelistAgency}}{Object of class "character" giving the codelist ref agency}
\item{\code{isMeasureDimension}}{Object of class "logical" indicating if the dimension is a measure dimension. Default value is FALSE}
\item{\code{isFrequencyDimension}}{Object of class "logical" indicating if the dimension is a frequency dimension. Default value is FALSE}
\item{\code{isEntityDimension}}{Object of class "logical" indicating if the dimension is an entity dimension. Default value is FALSE}
\item{\code{isCountDimension}}{Object of class "logical" indicating if the dimension is a count dimension. Default value is FALSE}
\item{\code{isNonObservationTimeDimension}}{Object of class "logical" indicating if the dimension is a non-observation dimension. Default value is FALSE}
\item{\code{isIdentityDimension}}{Object of class "logical" indicating if the dimension is an identity dimension. Default value is FALSE}
\item{\code{crossSectionalAttachDataset}}{Object of class "logical"}
\item{\code{crossSectionalAttachGroup}}{Object of class "logical"}
\item{\code{crossSectionalAttachSection}}{Object of class "logical"}
\item{\code{crossSectionalAttachObservation}}{Object of class "logical"}
}}
\section{Warning}{
This class is not useful in itself, but all SDMX non-abstract classes will
encapsulate it as slot, when parsing an SDMX-ML document (Concepts, or
DataStructureDefinition)
}
\seealso{
\link{readSDMX}
}
\author{
Emmanuel Blondel, \email{emmanuel.blondel1@gmail.com}
}
|
ec0990a01dde572cbbd0781ae8d1858aa2537086
|
a069edc2eeb426ca950c15d1a8d2e0afa8b98239
|
/inst/examples/interactive/src/count/script.R
|
32ead43f34dfd3b8f4465b00cdd8e8d3193c623e
|
[] |
no_license
|
kmaheshkulkarni/orderly
|
62913a83e7f0a1e46072fe69e62ea97b499334af
|
cd8c6bfffb9e9851e80ae57fdf738a3ca0c8f247
|
refs/heads/master
| 2020-07-08T08:20:29.418822
| 2019-08-13T16:19:38
| 2019-08-13T16:19:38
| 203,616,293
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 271
|
r
|
script.R
|
p <- jsonlite::read_json("parameters.json")
end <- Sys.time() + p$time
while (Sys.time() < end) {
cat(sprintf("%s waiting...\n", runif(1)))
Sys.sleep(p$poll)
}
png("mygraph.png")
par(mar = c(15, 4, .5, .5))
barplot(setNames(dat$number, dat$name), las = 2)
dev.off()
|
0af130219998b2c6d532da2e75a205d6ef9f238e
|
8f3c2fe27572f66ee3387f00e8c12bf774055f96
|
/8-9-jiance-duochong-gongxianxing.r
|
6820d791f09aa4cd96e1bc676c23a8f5aa3c33c4
|
[] |
no_license
|
papadalin/R-book-code
|
a26f18b667c26b6a0355720751457e773bcc9e42
|
6c81959762a14916bb056ca3a8db953e54ed7246
|
refs/heads/master
| 2020-04-13T13:25:39.584386
| 2015-09-01T14:00:10
| 2015-09-01T14:00:10
| 41,002,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
8-9-jiance-duochong-gongxianxing.r
|
#liyong tongjiliang VIF(Variance Inflation Factor, fangcha pengzhang yinzi) jinxing
#jiance.yiban qingkuang xia, sqart(vif) > 2 biaoming cunzai duochong gongxianxing
#wenti ,hui daozhi zhixinqujian pengzhang guoda ,yingxiang jiashe jianyan jieguo
library(car)
fit <- lm(formula = Murder ~ Population + Illiteracy + Income + Frost,
data = states)
vif(fit)
sqrt(vif(fit)) > 2
#jieguo junwei FALSE, shuoming bucunzai duochong gongxianxing wenti.
|
36f6d78cc51afd1f32279df58b059edcaff6c169
|
bc6980ed8352efdce2cdd1b7965e7c1fd697fdf4
|
/man/data.frame.cgOpsum.list.Rd
|
a931fde9bb6bafe2b8dd78874b62922e305a8093
|
[] |
no_license
|
jrh05/opsumToolbox
|
12d4d2a086939a2fa64d0b1c087a605f5e4cfb6a
|
60c9a61a5a076a12fe5fe95df6cd94cf0ddd561e
|
refs/heads/master
| 2021-09-05T14:36:29.747919
| 2018-01-07T06:44:28
| 2018-01-07T06:44:28
| 115,367,468
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 357
|
rd
|
data.frame.cgOpsum.list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cgOpsumExtractorFunctions.R
\name{data.frame.cgOpsum.list}
\alias{data.frame.cgOpsum.list}
\title{data frame Method for cgOpsum list}
\usage{
data.frame.cgOpsum.list(object)
}
\arguments{
\item{object}{A cgOpsum.list object.}
}
\description{
data frame Method for cgOpsum list
}
|
7b6e7629907d2701f90c71b279ba7b3f7b3fd819
|
48c61dee0351116997721984723165617e1d3111
|
/tests/testthat/test-SHOWF_UFun.R
|
c265d43ec2cbb46fb4d1d687043976b01540df82
|
[] |
no_license
|
mlysy/realPSD
|
4747062fc78dec25750f4f4143e610980b9a0522
|
f10afcc83b08f2fe7e0823c6a6334ede38ae8fa8
|
refs/heads/master
| 2023-02-28T13:12:09.200872
| 2021-01-13T00:00:14
| 2021-01-13T00:00:14
| 189,429,001
| 0
| 1
| null | 2021-02-02T10:39:11
| 2019-05-30T14:32:24
|
R
|
UTF-8
|
R
| false
| false
| 2,325
|
r
|
test-SHOWF_UFun.R
|
source("realPSD-testfunctions.R")
context("SHOWF UFun Tests")
test_that("The SHOWF UFun returned is the same in R and TMB", {
ntest <- 50
nphi <- sample(2:5,1)
for(ii in 1:ntest) {
# pick model
model <- sample(c("SHOWF_log", "SHOWF_nat"), size = 1)
ufun_r <- get_ufun(model)
# simulate data
N <- sample(10:20,1)
f <- sim_f(N)
## Y <- sim_Y(N)
## fs <- sim_fs()
phi0 <- sim_showf_phi(model) # phi = c(f0, Q, Rw, Rf, alpha)
# create TMB model and functions
tmod <- TMB::MakeADFun(data = list(model = model,
method = "UFun",
f = matrix(f)),
parameters = list(phi = matrix(phi0)),
# map = map,
ADreport = TRUE,
silent = TRUE, DLL = "realPSD_TMBExports")
ufun_tmb <- function(phi) setNames(tmod$fn(phi), NULL)
# check they are equal
Phi <- replicate(nphi, sim_showf_phi(model = model))
U_r <- apply(Phi, 2, ufun_r, f = f)
U_tmb <- apply(Phi, 2, ufun_tmb)
expect_equal(U_r, U_tmb)
}
})
test_that("The SHOWF UFun (with map) returned is the same in R and TMB", {
ntest <- 20
nphi <- sample(2:5,1)
for(ii in 1:ntest) {
# pick model
model <- sample(c("SHOWF_log", "SHOWF_nat"), size = 1)
ufun_r <- get_ufun(model)
# simulate data
N <- sample(10:20,1)
f <- sim_f(N)
## Y <- sim_Y(N)
## fs <- sim_fs()
phi0 <- sim_showf_phi(model) # phi = c(f0, Q, Rw, Rf, alpha)
# create TMB model and functions
map <- list(as.factor(c(1,2,NA,4,5)))
phi0[3] <- 0
tmod <- TMB::MakeADFun(data = list(model = model,
method = "UFun",
f = matrix(f)),
parameters = list(phi = matrix(phi0)),
map = map,
ADreport = TRUE,
silent = TRUE, DLL = "realPSD_TMBExports")
ufun_tmb <- function(phi) setNames(tmod$fn(phi), NULL)
# check they are equal
Phi <- replicate(nphi, sim_showf_phi(model = model))
Phi["Rw", ] <- rep(0, nphi)
U_r <- apply(Phi, 2, ufun_r, f = f)
U_tmb <- apply(Phi, 2, ufun_tmb)
expect_equal(U_r, U_tmb)
}
})
|
a63f5befb5024738eddc05082c9c189643396a36
|
15341939deb8974a78a98ec48c6840d69b148580
|
/R/tree-utilities.R
|
23cb7ae7a41d7eccc2d43420287346adacebe121
|
[] |
no_license
|
mwpennell/arbutus
|
44d2544d4d5d669fffd5c726bdd4b05da5272164
|
781fa94901a356fd22770647d28c4223ffc4ad66
|
refs/heads/master
| 2022-10-26T08:46:34.963778
| 2022-10-05T16:31:54
| 2022-10-05T16:31:54
| 17,573,460
| 9
| 2
| null | 2022-10-05T16:31:55
| 2014-03-09T20:51:53
|
R
|
UTF-8
|
R
| false
| false
| 3,021
|
r
|
tree-utilities.R
|
## these are utility fxns for extracting information from trees
## get node heights (this is equivalent to the internal geiger fxn heights.phylo() )
edge.height <- function(phy){
phy <- reorder(phy, "postorder")
n <- length(phy$tip.label)
n.node <- phy$Nnode
xx <- numeric(n + n.node)
for (i in nrow(phy$edge):1) xx[phy$edge[i, 2]] <- xx[phy$edge[i, 1]] + phy$edge.length[i]
root <- ifelse(is.null(phy$root.edge), 0, phy$root.edge)
labs <- c(phy$tip.label, phy$node.label)
depth <- max(xx)
tt <- depth - xx
idx <- 1:length(tt)
dd <- phy$edge.length[idx]
mm <- match(1:length(tt), c(phy$edge[, 2], Ntip(phy) + 1))
dd <- c(phy$edge.length, root)[mm]
ss <- tt + dd
res <- cbind(ss, tt)
rownames(res) <- idx
colnames(res) <- c("start", "end")
data.frame(res)
}
check.tree.data <- function(phy, data, sort=FALSE, warnings=TRUE) {
if (missing(data))
stop("If a 'phylo' or 'multiPhylo' object is supplied, 'data' must be included as well")
if (!is.null(dim(data)))
stop("Multidimensional data")
if (!identical(names(data), phy$tip.label))
stop("Trait data and species do not align")
}
## geiger and diversitree's drop.tip function
prune.phylo <- function(phy, tip, trim.internal = TRUE, subtree = FALSE, root.edge = 0, rooted = is.rooted(phy)){
if(missing(tip)) return(phy)
if (is.character(tip)) tip <- which(phy$tip.label %in% tip)
if(!length(tip)) return(phy)
phy=as.phylo(phy)
Ntip <- length(phy$tip.label)
tip=tip[tip%in%c(1:Ntip)]
if(!length(tip)) return(phy)
phy <- reorder(phy)
NEWROOT <- ROOT <- Ntip + 1
Nnode <- phy$Nnode
Nedge <- nrow(phy$edge)
wbl <- !is.null(phy$edge.length)
edge1 <- phy$edge[, 1]
edge2 <- phy$edge[, 2]
keep <- !(edge2 %in% tip)
ints <- edge2 > Ntip
repeat {
sel <- !(edge2 %in% edge1[keep]) & ints & keep
if (!sum(sel))
break
keep[sel] <- FALSE
}
phy2 <- phy
phy2$edge <- phy2$edge[keep, ]
if (wbl)
phy2$edge.length <- phy2$edge.length[keep]
TERMS <- !(phy2$edge[, 2] %in% phy2$edge[, 1])
oldNo.ofNewTips <- phy2$edge[TERMS, 2]
n <- length(oldNo.ofNewTips)
idx.old <- phy2$edge[TERMS, 2]
phy2$edge[TERMS, 2] <- rank(phy2$edge[TERMS, 2])
phy2$tip.label <- phy2$tip.label[-tip]
if (!is.null(phy2$node.label))
phy2$node.label <-
phy2$node.label[sort(unique(phy2$edge[, 1])) - Ntip]
phy2$Nnode <- nrow(phy2$edge) - n + 1L
i <- phy2$edge > n
phy2$edge[i] <- match(phy2$edge[i], sort(unique(phy2$edge[i]))) + n
storage.mode(phy2$edge) <- "integer"
collapse.singles(phy2)
}
check.names.phylo <- function(phy, data, data.names = NULL) {
if (is.null(data.names)) {
if (is.vector(data)) {
data.names <- names(data);
} else {
data.names <- rownames(data);
}
}
t <- phy$tip.label;
r1 <- t[is.na(match(t, data.names))];
r2 <- data.names[is.na(match(data.names, t))];
r <- list(sort(r1), sort(r2));
names(r) <- cbind("tree_not_data", "data_not_tree")
if (length(r1) == 0 && length(r2) == 0) {
return("OK");
} else {
return(r);
}
}
|
a5ad351a1bca3cd401cf4bd32495ada38f766a0d
|
d69e83586753456a6bb387a0122479ceb2bdc6e0
|
/R/catSpatInterp.R
|
2e5ac9ef6fb75f41082d8c8cd4e1c14cc8c67773
|
[] |
no_license
|
cran/swfscMisc
|
6f1e85dba988db1e235f3e2f58ef0b831567736d
|
3644ab0ba65c3e24f11627865d00910e4d97c82f
|
refs/heads/master
| 2022-05-13T09:41:35.823940
| 2022-04-21T12:30:02
| 2022-04-21T12:30:02
| 22,867,576
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,786
|
r
|
catSpatInterp.R
|
#' @title Categorical Spatial Interpolation
#' @description Create a raster of probability of categorical values
#' interpolated across a 2-dimensional space given a set of points where
#' each is assigned to one of several classes.
#'
#' @param data matrix or data.frame containing points and grouping designation.
#' @param x.col,y.col,group.col numbers or characters identifying which columns
#' in \code{data} are the x and y values and grouping designation.
#' @param num.grid number of grid cells for k-nearest neighbor interpolation.
#' @param knn number of nearest neighbors to consider for interpolation.
#' @param hull.buffer percent increase of convex hull to use as spatial area to
#' interpolate over.
#' @param num.cores number of cores to distribute interpolations over.
#' @param num.batches number of batches to divide grid cell interpolations into.
#'
#' @return A list containing a raster and points of buffered convex hull.
#'
#' @author Eric Archer \email{eric.archer@@noaa.gov}
#'
#' @references Adapted from code originally presented in a blog post on
#' Categorical Spatial Interpolation by Timo Grossenbacher
#' \url{https://timogrossenbacher.ch/2018/03/categorical-spatial-interpolation-with-r/}
#'
#' @examples
#' \dontrun{
#' iris.mds <- stats::cmdscale(dist(iris[, 1:4]), k = 2)
#' mds.df <- setNames(
#' cbind(iris.mds, data.frame(iris$Species)),
#' c("dim1", "dim2", "Species")
#' )
#'
#' result <- catSpatInterp(
#' mds.df, x.col = "dim1", y.col = "dim2", group.col = "Species",
#' num.grid = 300, knn = 20, hull.buffer = 0.05,
#' num.cores = 5, num.batches = NULL
#' )
#'
#' library(ggplot2)
#' ggplot(mapping = aes(dim1, dim2)) +
#' geom_raster(
#' aes(fill = Species, alpha = prob),
#' data = result$raster
#' ) +
#' geom_polygon(data = result$hull.poly, fill = NA, col = "black") +
#' geom_hline(yintercept = 0, col = "white") +
#' geom_vline(xintercept = 0, col = "white") +
#' geom_point(
#' aes(fill = Species),
#' data = mds.df,
#' col = "black",
#' shape = 21,
#' size = 4
#' ) +
#' theme(
#' axis.ticks = element_blank(),
#' axis.text = element_blank(),
#' axis.title = element_blank(),
#' legend.position = "top",
#' panel.grid = element_blank(),
#' panel.background = element_blank()
#' )
#' }
#'
#' @export
#'
catSpatInterp <- function(data, x.col = "x", y.col = "y", group.col = "group",
num.grid = 100, knn = 10, hull.buffer = 0.1,
num.cores = 1, num.batches = NULL) {
if(is.numeric(x.col)) x.col <- colnames(data)[x.col]
if(is.numeric(y.col)) y.col <- colnames(data)[y.col]
if(is.numeric(group.col)) group.col <- colnames(data)[group.col]
if(!all(c(x.col, y.col, group.col) %in% colnames(data))) {
stop("'x.col', 'y.col', and 'group.col' must be column names in 'data'")
}
# create data frame of points
df <- as.data.frame(data[, c(x.col, y.col, group.col)])
df <- df[stats::complete.cases(df), ]
df$group <- as.character(df[[group.col]])
if(group.col != "group") df[[group.col]] <- NULL
# find convex hull around points and create buffer around that
pt.hull <- df[grDevices::chull(df[, c(x.col, y.col)]), c(x.col, y.col)]
pt.hull <- rbind(pt.hull, pt.hull[1, ])
hull.poly <- sf::st_buffer(
sf::st_polygon(list(as.matrix(pt.hull))),
dist = max(
abs(diff(range(df[[x.col]]))),
abs(diff(range(df[[y.col]])))
) * hull.buffer
)
# create grid that covers buffered hull
hull.grid <- sf::st_make_grid(hull.poly, n = num.grid)
# transform points to data.frame containing sf coordinates
pts <- sf::st_as_sf(df, coords = c(x.col, y.col))
train.df <- stats::setNames(
cbind(
pts$group,
as.data.frame(sf::st_coordinates(pts))[, 1:2]
),
c("group", x.col, y.col)
)
# function to compute k-nearest neighbors at given grid points
# return probability of most likely group
.computeGrid <- function(grid, train.df, knn) {
result <- stats::setNames(
as.data.frame(sf::st_coordinates(grid))[, 1:2],
c(x.col, y.col)
)
group.kknn <- kknn::kknn(
group ~ ., train = train.df, test = result,
kernel = "gaussian", k = knn
)
result$group <- stats::fitted(group.kknn)
result$prob = apply(group.kknn$prob, 1, max)
result
}
# distribute k-nearest neighbor probability calculation among batches and cores
# return sf coordinates of raster
cl <- setupClusters(num.cores)
raster <- tryCatch({
if(is.null(cl)) { # Don't parallelize if num.cores == 1
.computeGrid(hull.grid, train.df, knn)
} else {
parallel::clusterEvalQ(cl, require(swfscMisc))
parallel::clusterExport(cl, c("hull.grid", "train.df", "knn"), environment())
n <- length(hull.grid)
if(is.null(num.batches)) num.batches <- ceiling(sqrt(n) / num.cores)
start.i <- seq(1, n, ceiling(n / num.batches))
raster.list <- parallel::parApply(
cl, cbind(start = start.i, end = c(start.i[-1] - 1, n)), 1,
function(i, full.grid, train.df, knn) {
.computeGrid(full.grid[i["start"]:i["end"]], train.df, knn)
},
full.grid = hull.grid, train.df = train.df, knn = knn
)
do.call(rbind, raster.list)
}
}, finally = if(!is.null(cl)) parallel::stopCluster(cl) else NULL)
raster[[group.col]] <- as.character(raster$group)
if(group.col != "group") raster$group <- NULL
raster <- sf::st_as_sf(raster, coords = c(x.col, y.col), remove = F)
# return raster clipped by hull polygon and points of hull polygon
list(
raster = raster[hull.poly, ],
hull.poly = stats::setNames(
as.data.frame(as.matrix(hull.poly)),
c(x.col, y.col)
)
)
}
|
ee5fb82626a8a8c021ac23f4dff80908f2053b1b
|
a8c39687b8088d171f376d4d9b23561e08a56ce3
|
/R code for analysis_Yuhueng/scatter3d.R
|
9c365b43ba71cadb2877484676201b4f6d280b40
|
[] |
no_license
|
Zongmin-Liu/MSI2_HyperTRIBE_codes
|
ce5bbfd4e71b299fcad500a3e3e6e28d7091fd0c
|
735986cef8e6b32eb8f02efb3e46029aee36273b
|
refs/heads/master
| 2022-04-10T01:11:58.776631
| 2020-03-11T03:27:35
| 2020-03-11T03:27:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,011
|
r
|
scatter3d.R
|
library(openxlsx)
library(scatterplot3d)
df <- read.xlsx( "mouse_lsk_snp_counts_dedupped.xlsx" )
rowmask <- ( df$`Sample_Hyper-dADAR_12617_IGO_08269_2.ref.count` + df$`Sample_Hyper-dADAR_12617_IGO_08269_2.alt.count` >= 25 ) &
( df$`Sample_Hyper-dADAR_121417_IGO_08269_5.ref.count` + df$`Sample_Hyper-dADAR_121417_IGO_08269_5.alt.count` >= 25 ) &
( df$`Sample_Hyper-dADAR_121817_IGO_08269_8.ref.count` + df$`Sample_Hyper-dADAR_121817_IGO_08269_8.alt.count` >= 25 )
num.sites <- sum( rowmask )
filler <- rep( 0, num.sites )
x <- df$`Sample_Hyper-dADAR_12617_IGO_08269_2.alt.count` / ( df$`Sample_Hyper-dADAR_12617_IGO_08269_2.ref.count` + df$`Sample_Hyper-dADAR_12617_IGO_08269_2.alt.count` )
y <- df$`Sample_Hyper-dADAR_121417_IGO_08269_5.alt.count` / ( df$`Sample_Hyper-dADAR_121417_IGO_08269_5.ref.count` + df$`Sample_Hyper-dADAR_121417_IGO_08269_5.alt.count` )
z <- df$`Sample_Hyper-dADAR_121817_IGO_08269_8.alt.count` / ( df$`Sample_Hyper-dADAR_121817_IGO_08269_8.ref.count` + df$`Sample_Hyper-dADAR_121817_IGO_08269_8.alt.count` )
cor.xy <- cor( x[ rowmask ], y[ rowmask ] )
cor.xz <- cor( x[ rowmask ], z[ rowmask ] )
cor.yz <- cor( y[ rowmask ], z[ rowmask ] )
sp3d <- scatterplot3d( x[ rowmask ], z[ rowmask ], y[ rowmask ],
xlab = "dADAR-1", ylab = "dADAR-3", zlab = "dADAR-2",
grid = F, angle = 45, pch = 3, color = 'lightgrey' )
sp3d$points3d( filler, z[ rowmask ], y[ rowmask ], pch = 19, col = 'grey60')
sp3d$points3d( x[ rowmask ], z[ rowmask ], filler, pch = 19, col = 'grey45' )
sp3d$points3d( x[ rowmask ], filler, y[ rowmask ], pch = 19, col = 'grey30')
text( sp3d$xyz.convert( .8, 1, .075 ), labels = substitute( paste( r[13], " = ", cor ), list( cor = sprintf( "%.3f", cor.xz ) ) ) )
text( sp3d$xyz.convert( .55, 0, .875 ), labels = substitute( paste( r[12], " = ", cor ), list( cor = sprintf( "%.3f", cor.xy ) ) ) )
text( sp3d$xyz.convert( .15, 1, .8 ), labels = substitute( paste( r[23], " = ", cor ), list( cor = sprintf( "%.3f", cor.yz ) ) ) )
|
3da7b90fa6d7ed03ad5a271471aac2880e274bfc
|
72afe86e08b02ee3726ac303b4b81801f1ea5538
|
/App/global.r
|
7b2104c896df43a2aaee6ad1cf0e9d27a5a01a25
|
[] |
no_license
|
trevorbix/AutoDinwiddie
|
9a33168b8d6ff3ec21d40ceda8f290ff043159a0
|
feabea9195c80e1271081d95228ca335d1bf8e7e
|
refs/heads/master
| 2020-03-28T13:23:44.712410
| 2018-09-11T23:03:08
| 2018-09-11T23:03:08
| 148,391,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 588
|
r
|
global.r
|
suppressWarnings(library(stringr))
suppressWarnings(library(DMwR))
suppressWarnings(library(shiny))
suppressWarnings(library(shinythemes))
suppressWarnings(library(knitr))
suppressWarnings(library(kableExtra))
suppressWarnings(library(Matrix))
suppressWarnings(library(matrixStats))
suppressWarnings(library(DT))
suppressWarnings(library(tidyverse))
require(ggplot2)
require(lpSolve)
require(stringr)
shinyInput <- function(FUN,id,num,...) {
inputs <- character(num)
for (i in seq_len(num)) {
inputs[i] <- as.character(FUN(paste0(id,i),label=NULL,value=TRUE,...))
}
inputs
}
|
4d2ae97abef1dc9b575732445c510e4c525f93d0
|
eca03552d6cacef5ebdf689ffb76549bdcfe24e2
|
/euler22.R
|
ab981074d45e39be15b8bc8943999eaa83eed01a
|
[] |
no_license
|
liuminzhao/eulerproject
|
792cd4a4df255b774c40a7b6b8a4e4f3b4335d0e
|
844e5bc1909c35d5ad398fbbe8df16d7366caf47
|
refs/heads/master
| 2021-01-01T06:26:50.206519
| 2019-03-21T04:40:16
| 2019-03-21T04:40:16
| 5,541,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 386
|
r
|
euler22.R
|
dat <- read.table('names.txt', header=FALSE, sep=',')
y <- as.vector(dat)
r <- rank(y)
x <- c('COLIN')
subscore <- function(x){
which(LETTERS==x)
}
score <- function(x){
splitx <- as.matrix(unlist(strsplit(x,"")))
sum(apply(splitx, 1, subscore))
}
e22 <- function(y){
y.score <- apply(as.matrix(y), 2, score)
finalscore <- y.score*rank(y)
sum(finalscore)
}
e22(y)
|
52300f9f77ed3dc42ab98b58a54d8dbeb6305b22
|
5f71d135dff92755cdb1fed882f7a5450b834abc
|
/loaddata.R
|
dfb4c321e40564b9514aaf6f7f51a68efcde511d
|
[] |
no_license
|
schweitzerb/RepData_PeerAssessment1
|
0b80d333219d0967c3e7afa4c23ffd6ed560e8c4
|
c3094edf9d22029e128304520d2401684895236f
|
refs/heads/master
| 2021-01-21T18:38:49.125798
| 2014-11-10T22:29:24
| 2014-11-10T22:29:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,488
|
r
|
loaddata.R
|
### Libraries
library(plyr)
library(ggplot2)
### Read Data
stepsData <- read.csv("activity.csv")
### format variables
stepsData$date <- as.POSIXct(as.character(stepsData$date),format = "%Y-%m-%d")
stepsData$interval <- sprintf("%04d",stepsData$interval)
##stepsData$interval <- format(strptime(stepsData$interval, format="%H%M"), format = "%H:%M")
stepsData$interval <- as.ordered(stepsData$interval)
###Basic stats
stepsByDay <- ddply(stepsData,"date",summarise,total=sum(steps,na.rm=T),
mean=mean(steps,na.rm=T),median=median(steps,na.rm=T))
meansteps <- mean(stepsByDay$total)
medianSteps <-median(stepsByDay$total)
stepsByInterval <- ddply(stepsData,"interval",summarise,total=sum(steps,na.rm=T),
mean=mean(steps,na.rm=T),median=median(steps,na.rm=T))
maxSteps <- stepsByInterval$interval[which.max(stepsByInterval$mean)]
###Basic Plots
hist(stepsByDay$total,breaks=10,main="Histogram of the Total Number of Steps per Day",
ylab="Number of Days",xlab="Total Steps")
plot(as.numeric(stepsByInterval$interval)/288*100/4.16667,stepsByInterval$mean,type="l",
main="Mean Steps Per Interval", ylab="Number of Steps",xlab="Interval (by Hour of Day)")
#Missing values
totalMissing <- sum(is.na(stepsData$steps))
missingIndex <- which(is.na(stepsData$steps))
stepsImputed <- stepsData
for (i in 1:length(missingIndex)){
matchInterval <- stepsImputed[missingIndex[i],3]
stepsImputed[missingIndex[i],1] <- stepsByInterval[stepsByInterval$interval
==matchInterval,3]
}
### stats with imputed data
stepsByDay_imputed <- ddply(stepsImputed,"date",summarise,total=sum(steps,na.rm=T),
mean=mean(steps,na.rm=T),median=median(steps,na.rm=T))
meanstepsI <- mean(stepsByDay_imputed$total)
medianStepsI <-median(stepsByDay_imputed$total)
### weekday/weekend
stepsImputed$day <- weekdays(stepsImputed$date)
stepsImputed$day <- with(stepsImputed, replace(day,day%in%c("Saturday","Sunday"),"Weekend"))
stepsImputed$day <- with(stepsImputed, replace(day,!(day%in%"Weekend"),"Weekday"))
stepsImputed$day <- as.factor(stepsImputed$day)
ByWeekday <- ddply(stepsImputed,c("interval","day"),summarise,total=sum(steps,na.rm=T),
mean=mean(steps,na.rm=T),median=median(steps,na.rm=T))
panels <- qplot(as.numeric(interval)/288*100/4.16667,mean,data=ByWeekday,facets=day~.,color=day)+
geom_line(aes(group=day))
print(panels)
|
e02a1a4f2a67b311393cff12fda0d1f60c7a22a8
|
89b872fd80ca1953ff3239f60234fbb95d5e2ca6
|
/server.R
|
7092aa8b875db2f245fb68959d5814dda2170ad9
|
[] |
no_license
|
eefermat/Zoek_Data_Analysis
|
45ef00f9ba2cd2917270d0b93c1d0d7e63cb5969
|
a2ec0ef879b314b6d3a1d333b35f14a76455ccf5
|
refs/heads/master
| 2020-12-29T02:19:10.823276
| 2017-04-23T15:39:13
| 2017-04-23T15:39:13
| 53,297,042
| 0
| 0
| null | 2017-04-27T02:30:19
| 2016-03-07T04:59:48
|
R
|
UTF-8
|
R
| false
| false
| 72,147
|
r
|
server.R
|
library(shiny)
library(ggplot2)
library(dplyr)
library(scales)
require(ggmap)
require(XLConnect)
require(magrittr)
require(jsonlite)
setwd("~/Dropbox/App")
member<-readRDS("member_data")
#member_birth<-readRDS("member_birth")
login<-readRDS("login")
orders_member<-readRDS("orders_member")
orders<-readRDS("orders")
branch<-readRDS("branch")
sales<-readRDS("sales")
user_cat<-readRDS("user_cat")
user_pt<-readRDS("user_pt")
user_search<-readRDS("user_search")
# first_shopping<-readRDS("first_shopping")
userlog_member<-readRDS("userlog_member")
MAU<-readRDS("MAU_all")
MAU_IOS<-readRDS("MAU_IOS")
MAU_ANDROID<-readRDS("MAU_ANDROID")
conversion_stats<-readRDS("conversion_stats")
hotspot=read.csv("hotspot.csv",stringsAsFactor=F,header=T,fileEncoding='big5')
branch_bd<-fromJSON("branch_bd.json")
branch_bd<-merge(branch_bd,select(branch,bid,branchname),by="bid")
branch_bd%<>%select(bd_name,branchname)
sales_daily_setting<-fromJSON("sales_daily_setting.json")
sales_daily_setting$cd<-as.Date(sales_daily_setting$lastmodifiedtime)
sales_daily_setting<-merge(sales_daily_setting,select(branch,bid,branchname),by='bid')
# Shiny Main Program
shinyServer(function(input, output) {
#===============Data================
output$merchant_selector <- renderUI({
selectInput("merchant_com", "商家名稱:", as.list(dataset_branch_com()))
})
output$hr_selector <- renderUI({
selectInput("hr_com", "Duration(hr):", as.list(c('All',dataset_hr_com())))
})
dataset_branch_com <- reactive({
branch%>%filter(area_detail==input$area_com&category=="休憩")%$%branchname
})
dataset_hr_com <- reactive({
orders%>%filter(branchname==input$merchant_com)%$%duration
})
# Member Data
dataset_member <- reactive({
{ week_start<-(floor(input$dates[1]-as.Date("2015-11-02"))/7)+1
week_end<-(floor(input$dates[2]-as.Date("2015-11-02"))/7)+1
if(input$y=="Gender"){
member%>%filter((week_create>=week_start)&(week_create<=week_end)&(Register_Type!="GUEST"))%>%group_by(week_create,Gender)%>%dplyr::summarise(n=n())%>%group_by(Gender)%>%mutate(Cumul=cumsum(n))
}
else if(input$y=="Operating_System"){
member%>%filter((week_create>=week_start)&(week_create<=week_end)&(Register_Type!="GUEST"))%>%group_by(week_create,Operating_System)%>%dplyr::summarise(n=n())%>%group_by(Operating_System)%>%mutate(Cumul = cumsum(n))
}
else if(input$y=="Register_Type"){
member%>%filter((week_create>=week_start)&(week_create<=week_end)&(Register_Type!="GUEST"))%>%group_by(week_create,Register_Type)%>%dplyr::summarise(n=n())%>%group_by(Register_Type)%>%mutate(Cumul = cumsum(n))
}
else if(input$y=="Sign_Up"){
member%>%filter((week_create>=week_start)&(week_create<=week_end))%>%group_by(week_create,Sign_Up)%>%dplyr::summarise(n=n())%>%group_by(Sign_Up)%>%mutate(Cumul = cumsum(n))
}
else if(input$y=="Total_Member"){
member%>%filter((week_create>=week_start)&(week_create<=week_end)&(Register_Type!="GUEST"))%>%group_by(week_create)%>%dplyr::summarise(n=n())%>%mutate(Cumul = cumsum(n))
}
}
})
# Login Data
dataset_login <- reactive({
{ week_start<-(floor(input$dates_L[1]-as.Date("2015-11-02"))/7)+1
week_end<-(floor(input$dates_L[2]-as.Date("2015-11-02"))/7)+1
if(input$y_L=="Day_Night"){
login%>%filter((Create_Time>=week_start)&(Create_Time<=week_end)&eventname=="product/home")%>%group_by(Create_Time,Day_Night)%>%dplyr::summarise(n=n())%>%group_by(Day_Night)%>%mutate(Cumul=cumsum(n))
}
else if(input$y_L=="Mon_to_Sun"){
login%>%filter((Create_Time>=week_start)&(Create_Time<=week_end)&eventname=="product/home")%>%group_by(Create_Time,Mon_to_Sun)%>%dplyr::summarise(n=n())%>%group_by(Mon_to_Sun)%>%mutate(Cumul = cumsum(n))
}
else if(input$y_L=="Weekday_Weekend"){
login%>%filter((Create_Time>=week_start)&(Create_Time<=week_end)&eventname=="product/home")%>%group_by(Create_Time,Weekday_Weekend)%>%dplyr::summarise(n=n())%>%group_by(Weekday_Weekend)%>%mutate(Cumul = cumsum(n))
}
}
})
# Orders Data
dataset_orders_member <- reactive({
{
week_start<-(floor(input$dates_O[1]-as.Date("2015-11-02"))/7)+1
week_end<-(floor(input$dates_O[2]-as.Date("2015-11-02"))/7)+1
if(input$y_O=="Total_Order"){
temp<-conversion_stats%>%filter((create_week>=week_start)&(create_week<=week_end))
temp<-aggregate(temp$Counts,by=list(temp$create_week),FUN=sum)
colnames(temp)<-c("create_week","Orders")
temp
}
else{
conversion_stats%>%filter((create_week>=week_start)&(create_week<=week_end))
}
}
})
# Category data
dataset_category <- reactive({
if (input$Category_sel=="all"){
orders
} else {
orders%>%filter(category==input$Category_sel)
}
})
# Merchant Data
dataset_Merchant_Top <- reactive({
temp<-orders%>%filter((cd>=input$dates_M[1])&(cd<=input$dates_M[2])&(status_name=="Paid"))%>%group_by(branchname)%>%dplyr::summarise(n=n())
temp<-merge(temp,select(branch,branchname,area_detail),by="branchname")
temp<-temp[order(-temp$n),]
colnames(temp)<-c("商家","購買次數","地區")
temp
})
# Merchant stats Data
dataset_Merchant_stats <- reactive({
if (input$Category_sel_MS=="all"){
if(input$Weekday_MS=="all"){
if(input$Day_MS=="all"){
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&status_name=="Paid")
}else{
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&DN==input$Day_MS&status_name=="Paid")
}
}else{
if(input$Day_MS=="all"){
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&Weekday==input$Weekday_MS&status_name=="Paid")
}else{
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&Weekday==input$Weekday_MS&DN==input$Day_MS&status_name=="Paid")
}
}
}else{
if(input$Weekday_MS=="all"){
if(input$Day_MS=="all"){
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&category==input$Category_sel_MS&status_name=="Paid")
}else{
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&DN==input$Day_MS&category==input$Category_sel_MS&status_name=="Paid")
}
}else{
if(input$Day_MS=="all"){
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&Weekday==input$Weekday_MS&category==input$Category_sel_MS&status_name=="Paid")
}else{
orders%>%filter(cd>=input$dates_MS[1]&cd<=input$dates_MS[2]&Weekday==input$Weekday_MS&DN==input$Day_MS&category==input$Category_sel_MS&status_name=="Paid")
}
}
}
})
output$local_average<-renderText({
temp_orders<-dataset_Merchant_stats()
temp<-branch%>%filter(((((lat-hotspot$lat[2])^2+(lng-hotspot$lng[2])^2)^0.5)/0.00000900900901)<hotspot$diameter[2])
temp<-temp_orders[temp_orders$branchname%in%temp$branchname,]
paste("Average",mean(temp$amount+temp$bonus),sep=": ")
})
dataset_supply_demand_weekday<-reactive({
if (input$Type_M=="all"){
supply_temp<-sales%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekday")
demand_temp<-orders%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekday"&status_name=="Paid")
} else if(input$Type_M=="休憩"){
supply_temp<-sales%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekday"&(type=="摩鐵"|type=="商旅"))
demand_temp<-orders%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekday"&status_name=="Paid"&(type=="摩鐵"|type=="商旅"))
} else{
supply_temp<-sales%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekday"&type==input$Type_M)
demand_temp<-orders%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekday"&status_name=="Paid"&type==input$Type_M)
}
supply_demand_table<-data.frame(matrix(data = 0,nrow = nrow(hotspot),ncol = 4))
colnames(supply_demand_table)<-c("Location","Sales","Orders","%")
for (i in 1:nrow(hotspot)){
supply_demand_table[i,1]<-hotspot$location[i]
temp<-supply_temp%>%filter(((((lat-hotspot$lat[i])^2+(lng-hotspot$lng[i])^2)^0.5)/0.00000900900901)<hotspot$diameter[i])
supply_demand_table[i,2]<-nrow(temp)
supply_demand_table[i,3]<-nrow(demand_temp[demand_temp$branchname%in%unique(temp$branchname),])
supply_demand_table[i,4]<-supply_demand_table[i,3]/supply_demand_table[i,2]*100
}
supply_demand_table
})
dataset_supply_demand_weekend<-reactive({
if (input$Type_M=="all"){
supply_temp<-sales%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekend")
demand_temp<-orders%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekend"&status_name=="Paid")
} else if(input$Type_M=="休憩"){
supply_temp<-sales%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekend"&(type=="摩鐵"|type=="商旅"))
demand_temp<-orders%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekend"&status_name=="Paid"&(type=="摩鐵"|type=="商旅"))
} else{
supply_temp<-sales%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekend"&type==input$Type_M)
demand_temp<-orders%>%filter(cd>=input$dates_M[1]&cd<=input$dates_M[2]&Weekday=="Weekend"&status_name=="Paid"&type==input$Type_M)
}
supply_demand_table<-data.frame(matrix(data = 0,nrow = nrow(hotspot),ncol = 4))
colnames(supply_demand_table)<-c("Location","Sales","Orders","%")
for (i in 1:nrow(hotspot)){
supply_demand_table[i,1]<-hotspot$location[i]
temp<-supply_temp%>%filter(((((lat-hotspot$lat[i])^2+(lng-hotspot$lng[i])^2)^0.5)/0.00000900900901)<hotspot$diameter[i])
supply_demand_table[i,2]<-nrow(temp)
supply_demand_table[i,3]<-nrow(demand_temp[demand_temp$branchname%in%unique(temp$branchname),])
supply_demand_table[i,4]<-supply_demand_table[i,3]/supply_demand_table[i,2]*100
}
supply_demand_table
})
dataset_Merchant_Line <- reactive({
branch%>%group_by(week)%>%dplyr::summarise(n=n())%>%mutate(Cumul = cumsum(n))
})
# Sales Data
dataset_Sales <- reactive({
temp<-sales%>%filter((cd>=input$dates_M[1])&(cd<=input$dates_M[2]))%$%as.data.frame.matrix(table(Weekday,DN))
temp$Day%<>%+temp$Day_Night
temp$Night%<>%+temp$Day_Night
select(temp,Day,Night)
})
dataset_Orders_sub <- reactive({
orders%>%filter((cd>=input$dates_M[1])&(cd<=input$dates_M[2])&status_name=="Paid")%$%as.data.frame.matrix(table(Weekday,DN))
})
# Behavior Data
dataset_cat<-reactive({
user_cat%>%filter((createtime>=input$dates_B[1])&(createtime<=input$dates_B[2]))
})
dataset_view<-reactive({
temp<-user_cat%>%filter((createtime>=input$dates_B[1])&(createtime<=input$dates_B[2]))
mean(as.integer(select(temp,start)[,1]))/10+1
})
dataset_pt<-reactive({
user_pt%>%filter((createtime>=input$dates_B[1])&(createtime<=input$dates_B[2]))
})
# dataset_pt_QK<-reactive({
# temp<-dataset_pt()%>%filter(category=="休憩"&area=="台北")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
# })
dataset_pt_mix<-reactive({
temp<-dataset_pt()%>%group_by(branchname)%>%dplyr::summarise(n=n())
names(temp)<-c("branchname","Views")
temp<-merge(temp,select(branch,bid,branchname,area,category),by="branchname")
temp$intention_count<-0
temp$order_count<-0
for (i in 1:length(temp$branchname)){
temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
}
temp$intention_percentage<-temp$intention_count/temp$Views
temp$order_percentage<-temp$order_count/temp$Views
temp<-merge(temp,branch_bd,by="branchname")
names(temp)<-c("Merchants","Views","bid","area","cateogry","有意圖","銷售","有意圖比例","銷售比例","負責BD")
temp[order(-temp$Views),]
})
# dataset_pt_massage<-reactive({
# temp<-dataset_pt()%>%filter(category=="按摩"&area=="台北")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
#
# })
# dataset_pt_late<-reactive({
# temp<-dataset_pt()%>%filter(category=="晚鳥過夜"&area=="台北")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
#
# })
# dataset_pt_manicure<-reactive({
# temp<-dataset_pt()%>%filter(category=="美甲美睫"&area=="台北")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
# })
# dataset_pt_bar<-reactive({
# temp<-dataset_pt()%>%filter(category=="主題酒吧"&area=="台北")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
# })
#
# dataset_pt_QK_Taichung<-reactive({
# temp<-dataset_pt()%>%filter(category=="休憩"&area=="台中")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
# })
#
# dataset_pt_late_Taichung<-reactive({
# temp<-dataset_pt()%>%filter(category=="晚鳥過夜"&area=="台中")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname),by="branchname")
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
# temp<-merge(temp,branch_bd,by="branchname")
# names(temp)<-c("Merchants","Views","有意圖","銷售","有意圖比例","銷售比例","負責BD")
# temp[order(-temp$Views),]
#
# })
dataset_search<-reactive({
user_search%>%filter((createtime>=input$dates_B[1])&(createtime<=input$dates_B[2]))
})
order_browsing<-reactive({
orders%>%filter((cd>=input$dates_B[1])&(cd<=input$dates_B[2]))
})
#orders time
dataset_orders_time <- reactive({
temp_order_time<-filter(orders,(cd>=input$dates_order_time[1])&(cd<=input$dates_order_time[2])&status_name=="Paid")
if(input$order_weekday=="ALL"){
if(input$order_cat=="ALL"){
temp_order_time
}else{
temp_order_time%>%filter(type==input$order_cat)
}
} else{
temp_order_time%<>%filter(Weekday==input$order_weekday)
if(input$order_cat=="ALL"){
temp_order_time
}else{
temp_order_time%>%filter(type==input$order_cat)
}
}
})
#map data
dataset_GPS_supply <- reactive({
if(input$Weekday_DS=="all"){
if (input$Type_DS=="all"){
sales%>%filter((cd>=input$dates_DS[1])&(cd<=input$dates_DS[2]))
} else{
sales%>%filter((cd>=input$dates_DS[1])&(cd<=input$dates_DS[2])&type==input$Type_DS)
}
} else{
if (input$Type_DS=="all"){
sales%>%filter((cd>=input$dates_DS[1])&(cd<=input$dates_DS[2])&Weekday==input$Weekday_DS)
} else {
sales%>%filter((cd>=input$dates_DS[1])&(cd<=input$dates_DS[2])&type==input$Type_DS&Weekday==input$Weekday_DS)
}
}
})
dataset_GPS_view <- reactive({
if(input$Weekday_DS=="all"){
if (input$Type_DS=="all"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2]))
} else if(input$Type_DS=="商旅"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="泡湯x休憩")
} else if(input$Type_DS=="摩鐵"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="泡湯x休憩")
} else if(input$Type_DS=="美甲美睫"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="美甲x美睫")
} else if(input$Type_DS=="按摩"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="按摩紓壓")
} else if(input$Type_DS=="主題酒吧"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="主題酒吧")
}
} else{
if (input$Type_DS=="all"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2]))
} else if(input$Type_DS=="商旅"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="泡湯x休憩"&Weekday==input$Weekday_DS)
} else if(input$Type_DS=="摩鐵"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="泡湯x休憩"&Weekday==input$Weekday_DS)
} else if(input$Type_DS=="美甲美睫"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="美甲x美睫"&Weekday==input$Weekday_DS)
} else if(input$Type_DS=="按摩"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="按摩紓壓"&Weekday==input$Weekday_DS)
} else if(input$Type_DS=="主題酒吧"){
user_cat%>%filter((createtime>=input$dates_DS[1])&(createtime<=input$dates_DS[2])&rpgid=="主題酒吧"&Weekday==input$Weekday_DS)
}
}
})
#===============Table==============
output$Order_demography<-renderTable({
orders_member%>%filter(status_name=="Paid")%>%group_by(Gender,Age)%>%dplyr::summarise(n=n())
})
output$Top_summary<-renderTable({
dataset_Merchant_Top()
})
output$Sales_summary<-renderTable({
dataset_Sales()
})
output$Orders_sub_summary<-renderTable({
dataset_Orders_sub()
})
output$local_stats<-renderTable({
temp_orders<-dataset_Merchant_stats()
temp<-branch%>%filter(((((lat-hotspot$lat[2])^2+(lng-hotspot$lng[2])^2)^0.5)/0.00000900900901)<hotspot$diameter[2])
temp<-temp_orders[temp_orders$branchname%in%temp$branchname,]
temp$total<-temp$amount+temp$bonus
temp%>%group_by(branchname,total)%>%summarise(sales=n())
})
output$weekday_supply_demand_summary<-renderTable({
dataset_supply_demand_weekday()
})
output$weekend_supply_demand_summary<-renderTable({
dataset_supply_demand_weekend()
})
#Cohort
output$Cohort_plot<-renderTable({
cohort_date<-data.frame()
cohort<-data.frame()
row<-1
for (i in min(user_cat$week):max(user_cat$week)){
col<-1
for (j in i:max(user_cat$week)){
#temp<-filter(userlog,Create_Time==i)
cohort[row,col]<-sum(member$uid[member$week_create==i&member$Sign_Up=="Sign-up"]%in%unique(user_cat[user_cat$week==j,]%$%uid))
col%<>%+1
}
row%<>%+1
}
cohort<-(cohort/cohort[,1])*100
row<-1
for(i in min(user_cat$week):max(user_cat$week)){
cohort_date[row,1]<-paste(as.Date("2015-11-02")+7*(i-1))
rownames(cohort_date)[row]<-paste("Week",i,sep=" ")
colnames(cohort)[row]<-paste("Week",row,sep=" ")
row%<>%+1
}
colnames(cohort_date)<-"Date"
cbind(cohort_date,cohort)
})
output$Cohort_Spent<-renderTable({
cohort_date_spent<-data.frame()
cohort_spent<-data.frame()
temp_month<-unique(member$create_month)
temp_month<-temp_month[order(temp_month)]
for (i in 1:length(unique(member$create_month))){
col<-2
cohort_spent[i,1]<-nrow(filter(member,create_month==temp_month[i]&Sign_Up=="Sign-up"))
for (j in i:length(unique(member$create_month))){
temp<-orders[orders$uid%in%(filter(member,create_month==temp_month[i]&Sign_Up=="Sign-up")%$%uid),]
if(input$fun_pi){
cohort_spent[i,col]<-(sum(temp%>%filter(create_month<=temp_month[j]&status_name=="Paid")%$%amount)*0.15-sum(temp%>%filter(create_month<=temp_month[j]&status_name=="Paid")%$%bonus))
}else{
cohort_spent[i,col]<-(sum(temp%>%filter(create_month<=temp_month[j]&status_name=="Paid")%$%amount))*0.15
}
colnames(cohort_spent)[1]<-paste("base")
colnames(cohort_spent)[j+1]<-paste("month",j,sep=" ")
col%<>%+1
}
}
cohort_spent_percentage<-cohort_spent
cohort_spent_percentage[,2:ncol(cohort_spent_percentage)]<-(cohort_spent[,2:ncol(cohort_spent)]/cohort_spent[,1])
for (i in 1:length(unique(member$create_month))){
cohort_date_spent[i,1]<-paste("month",i,sep=" ")
}
colnames(cohort_date_spent)<-"Date"
if(input$LTV){
cbind(cohort_date_spent,cohort_spent_percentage)
}else{
cbind(cohort_date_spent,cohort_spent)
}
})
output$Cohort_conversion_plot<-renderTable({
cohort_date_c<-data.frame()
cohort_c<-data.frame()
row<-1
for (i in min(user_cat$week):max(user_cat$week)){
col<-2
cohort_c[row,1]<-nrow(filter(member,week_create==i&Sign_Up=="Sign-up"&area==input$cohort_area))
for (j in i:max(user_cat$week)){
temp<-filter(orders,Create_Time<=j&Create_Time>j-1&status_name=="Paid"&area==input$cohort_area)
cohort_c[row,col]<-sum(member$uid[member$week_create==i&member$Sign_Up=="Sign-up"&member$area==input$cohort_area]%in%temp$uid)
col%<>%+1
}
row%<>%+1
}
cohort_c[,2:ncol(cohort_c)]<-(cohort_c[,2:ncol(cohort_c)]/cohort_c[,1])*100
row<-1
colnames(cohort_c)[1]<-paste("base")
for(i in min(user_cat$week):max(user_cat$week)){
cohort_date_c[row,1]<-paste(as.Date("2015-11-02")+7*(i-1))
rownames(cohort_date_c)[row]<-paste("Week",i,sep=" ")
colnames(cohort_c)[row+1]<-paste("Week",row,sep=" ")
row%<>%+1
}
colnames(cohort_date_c)<-"Date"
cbind(cohort_date_c,cohort_c)
},digit=5)
output$Cohort_conversion_month<-renderTable({
cohort_date_cm<-data.frame()
cohort_cm<-data.frame()
temp_month<-unique(member$create_month)
temp_month<-temp_month[order(temp_month)]
for (i in 1:length(unique(member$create_month))){
col<-2
cohort_cm[i,1]<-nrow(filter(member,create_month==temp_month[i]&Sign_Up=="Sign-up"))
for (j in i:length(unique(member$create_month))){
temp<-orders[orders$uid%in%(filter(member,create_month==temp_month[i]&Sign_Up=="Sign-up")%$%uid),]
if (input$Category_sel_cohort!="all"){
temp%<>%filter(category==input$Category_sel_cohort)
}
cohort_cm[i,col]<-length(unique(temp%>%filter(create_month==temp_month[j]&status_name=="Paid")%$%uid))
colnames(cohort_cm)[1]<-paste("base")
colnames(cohort_cm)[j+1]<-paste("month",j,sep=" ")
col%<>%+1
}
}
if(input$cm_percentage){
cohort_cm[,2:ncol(cohort_cm)]<-(cohort_cm[,2:ncol(cohort_cm)]/cohort_cm[,1])*100
}
for (i in 1:length(unique(member$create_month))){
cohort_date_cm[i,1]<-paste(temp_month[i])
}
colnames(cohort_date_cm)<-"Date"
cbind(cohort_date_cm,cohort_cm)
},digit=3)
output$Conversion_trend<-renderTable({
member_temp<-member
member_temp%<>%select(uid,create_month)
colnames(member_temp)<-c("uid","member_create_month")
member_orders<-merge(orders%>%filter(status_name=="Paid")%>%select(uid,create_month,category),member_temp,by="uid",all.x=TRUE)
if (input$Category_sel_cohort!="all"){
member_orders%<>%filter(category==input$Category_sel_cohort)
}
member_orders<-member_orders[order(member_orders$create_month),]
member_orders$rep<-"Rep"
member_orders$purchase<-1
rep_uid<-unique(member_orders[duplicated(member_orders$uid),]%$%uid)
for (i in 1:length(rep_uid)){
member_orders$purchase[member_orders$uid==rep_uid[i]]<-2
member_orders$purchase[min(which(member_orders$uid==rep_uid[i]))]<-1
}
member_orders_old<-member_orders[member_orders$member_create_month!=member_orders$create_month,]
member_orders_old%<>%filter(purchase==1)%>%select(uid,create_month)
member_orders_old$month<- member_orders_old$create_month
member_orders_old%<>%select(uid,month)
member_orders<-merge(member_orders,member_orders_old,by="uid",all.x=TRUE)
member_orders$rep[member_orders$member_create_month!=member_orders$create_month&member_orders$create_month==member_orders$month]<-"First not same month"
member_orders$rep[member_orders$member_create_month==member_orders$create_month]<-"First"
head_stats<-data.frame(matrix(data = 0,
nrow = length(unique(member_orders$create_month)),
ncol = 4))
member_orders<-member_orders[order(member_orders$create_month),]
if (input$head_count){
row<-1
for (i in unique(member_orders$create_month)){
head_stats[row,1]<-i
head_stats[row,2]<-length(unique(member_orders%>%filter(create_month==i&rep==unique(member_orders$rep)[1])%$%uid))
head_stats[row,3]<-length(unique(member_orders%>%filter(create_month==i&rep==unique(member_orders$rep)[3])%$%uid))
head_stats[row,4]<-length(unique(member_orders%>%filter(create_month==i&rep==unique(member_orders$rep)[2])%$%uid))
row<-row+1
}
colnames(head_stats)<-c('month',unique(member_orders$rep)[1],unique(member_orders$rep)[3],unique(member_orders$rep)[2])
head_stats
} else {
if (input$cm_percentage){
temp<-table(member_orders$create_month,member_orders$rep)
prop.table(temp,1)
} else {
table(member_orders$create_month,member_orders$rep)
}
}
},digit=3)
output$Cohort_buy_size<-renderTable({
cohort_date_bs<-data.frame()
cohort_bs<-data.frame()
temp_month<-unique(member$create_month)
temp_month<-temp_month[order(temp_month)]
for (i in 1:length(unique(member$create_month))){
col<-2
cohort_bs[i,1]<-nrow(filter(member,create_month==temp_month[i]&Sign_Up=="Sign-up"))
for (j in i:length(unique(member$create_month))){
temp<-orders[orders$uid%in%(filter(member,create_month==temp_month[i]&Sign_Up=="Sign-up")%$%uid),]
if (input$Category_sel_cohort!="all"){
temp%<>%filter(category==input$Category_sel_cohort)
}
cohort_bs[i,col]<-(sum(temp%>%filter(create_month==temp_month[j]&status_name=="Paid")%$%amount))/nrow(temp%>%filter(create_month==temp_month[j]&status_name=="Paid"))
colnames(cohort_bs)[1]<-paste("base")
colnames(cohort_bs)[j+1]<-paste("month",j,sep=" ")
col%<>%+1
}
}
for (i in 1:length(unique(member$create_month))){
cohort_date_bs[i,1]<-paste(temp_month[i])
}
colnames(cohort_date_bs)<-"Date"
cbind(cohort_date_bs,cohort_bs)
})
output$buy_size<-renderTable({
cohort_date_bs<-data.frame()
cohort_bs<-data.frame()
temp_month<-unique(member$create_month)
temp_month<-temp_month[order(temp_month)]
for (i in 1:length(unique(member$create_month))){
temp<-orders
if (input$Category_sel_cohort!="all"){
temp%<>%filter(category==input$Category_sel_cohort)
}
cohort_bs[i,1]<-(sum(temp%>%filter(create_month==temp_month[i]&status_name=="Paid")%$%amount))/nrow(temp%>%filter(create_month==temp_month[i]&status_name=="Paid"))
colnames(cohort_bs)<-"Amount"
}
for (i in 1:length(unique(member$create_month))){
cohort_date_bs[i,1]<-paste(temp_month[i])
}
colnames(cohort_date_bs)<-"Date"
cbind(cohort_date_bs,cohort_bs)
})
output$Repeat_ratio_trend<-renderTable({
cohort_date_rtt<-data.frame()
cohort_rtt<-data.frame()
temp_month<-unique(member$create_month)
temp_month<-temp_month[order(temp_month)]
for (i in 1:length(unique(member$create_month))){
col<-2
if (i==1){
cohort_rtt[i,1]<-0
}else{
if (input$Category_sel_cohort!="all"){
order_cat<-orders%>%filter(category==input$Category_sel_cohort)
} else{
order_cat<-orders
}
temp<-order_cat%>%filter(create_month==temp_month[i-1]&status_name=="Paid")
temp_2<-order_cat%>%filter(create_month==temp_month[i]&status_name=="Paid")
temp_2<-temp_2[temp_2$uid%in%temp$uid,]
cohort_rtt[i,1]<-length(unique(temp_2$uid))/length(unique(temp$uid))
}
}
colnames(cohort_rtt)<-c("Rep_Ratio_over_month")
for (i in 1:length(unique(member$create_month))){
cohort_date_rtt[i,1]<-paste(temp_month[i])
}
colnames(cohort_date_rtt)<-"Date"
cbind(cohort_date_rtt,cohort_rtt)
})
output$Orders_Sales_summary<-renderTable({
(dataset_Orders_sub()/dataset_Sales()*100)
})
output$Cat_Top<-renderTable({
temp<-dataset_cat()%>%filter(start==0)%>%group_by(rpgid)%>%dplyr::summarise(n=n())
names(temp)<-c("Category","Tot_views")
temp[order(-temp$Tot_views),]
})
output$Product_Top_mix<-renderTable({
temp<-dataset_pt_mix()
temp[,-3]
})
# output$Product_Top_rest<-renderTable({
# temp<-dataset_pt_QK()
# temp[1:20,]
# })
# output$Product_Top_late<-renderTable({
# temp<-dataset_pt_late()
# temp[1:20,]
# })
# output$Product_Top_massage<-renderTable({
# temp<-dataset_pt_massage()
# temp[1:20,]
# })
# output$Product_Top_manicure<-renderTable({
# temp<-dataset_pt_manicure()
# temp[1:20,]
# })
#
# output$Product_Top_rest_Taichung<-renderTable({
# temp<-dataset_pt_QK_Taichung()
# temp[1:20,]
# })
# output$Product_Top_late_Taichung<-renderTable({
# temp<-dataset_pt_late_Taichung()
# temp[1:20,]
# })
# output$Product_Top_escape<-renderTable({
# temp<-dataset_pt()%>%filter(category=="密室")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname,area_detail),by="branchname")
# temp<-temp[order(-temp$Views),]
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# names(temp)<-c("Merchants","Views","地區","有意圖","銷售","有意圖比例","銷售比例")
# temp[1:10,]
# })
# output$Product_Top_board<-renderTable({
# temp<-dataset_pt()%>%filter(category=="桌遊")%>%group_by(branchname)%>%dplyr::summarise(n=n())
# names(temp)<-c("branchname","Views")
# temp<-merge(temp,select(branch,branchname,area_detail),by="branchname")
# temp<-temp[order(-temp$Views),]
# temp$intention_count<-0
# temp$order_count<-0
#
# for (i in 1:length(temp$branchname)){
# temp$order_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Paid")))
# temp$intention_count[i]<-as.numeric(sum((order_browsing()$branchname==temp$branchname[i])&(order_browsing()$status_name=="Intention")))
# }
# temp$intention_percentage<-temp$intention_count/temp$Views
# temp$order_percentage<-temp$order_count/temp$Views
#
# names(temp)<-c("Merchants","Views","地區","有意圖","銷售","有意圖比例","銷售比例")
# temp[1:10,]
# })
output$Product_Top_bar<-renderTable({
temp<-dataset_pt_bar()
temp[1:20,]
})
output$Search_Top<-renderTable({
temp<-dataset_search()%>%group_by(search)%>%dplyr::summarise(n=n())
names(temp)<-c("Search","Counts")
temp<-temp[order(-temp$Counts),]
temp<-na.omit(temp)
temp$user_order<-0
for (i in 1:length(temp$Search)){
temp$user_order[i]<-sum((dataset_search()%>%filter(search==temp$Search[i])%$%uid)%in%(orders%>%filter(status_name=="Paid")%$%uid))
}
temp[1:30,]
})
output$Ave_items<-renderTable({
temp<-data.frame()
temp[1,1]<-(dataset_view())
names(temp)<-c("Ave pages view")
temp
})
output$Category_table<-renderTable({
Cat_matrix<-data.frame(matrix(data = 0,nrow = length(unique(dataset_category()%$%create_month)),ncol = 3))
month<-unique(dataset_category()%$%create_month)
for (i in 1:length(month)){
Cat_matrix[i,1]<-month[i]
old_uid<-dataset_category()%>%filter(status_name=="Paid"&create_month<month[i])%$%uid
temp<-dataset_category()%>%filter(status_name=="Paid"&create_month==month[i])
temp_old<-temp[temp$uid%in%old_uid,]
temp<-temp[!(temp$uid%in%old_uid),]
Cat_matrix[i,2]<-nrow(temp_old)
Cat_matrix[i,3]<-nrow(temp)
}
Cat_matrix[,4]<-Cat_matrix[,2]/(Cat_matrix[,3]+Cat_matrix[,2])
colnames(Cat_matrix)<-c("month","rep","New","rep ratio")
Cat_matrix[order(Cat_matrix$month),]
})
output$Category_cross_table<-renderTable({
temp<-orders%>%filter(cd>=input$dates_cat[1]&cd<=input$dates_cat[2]&status_name=="Paid")
cat_type<-na.omit(unique(temp$category))
Cat_cross_matrix<-data.frame(matrix(data = 0,nrow = length(cat_type),ncol = length(cat_type)))
for (i in 1:length(cat_type)){
temp_cat<-temp%>%filter(category==cat_type[i])
for (j in 1:length(cat_type)){
temp_cat_2<-temp%>%filter(category==cat_type[j])
Cat_cross_matrix[i,j]<-length(unique(temp_cat_2[temp_cat_2$uid%in%temp_cat$uid,]%$%uid))
}
}
colnames(Cat_cross_matrix)<-cat_type
rownames(Cat_cross_matrix)<-cat_type
Cat_cross_matrix
})
#===============Plot================
output$Member_plot <- renderPlot({
if(input$plot_type=="Line"){
if(input$y!="Total_Member"){
if(input$cum==F){
p <- ggplot(dataset_member(), aes_string(x=input$x, y="n",color=input$y))+
geom_line()+
labs(y="Freq",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_member(),aes(label=n))+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$cum==T){
p <- ggplot(dataset_member(), aes_string(x=input$x, y="Cumul",color=input$y))+
geom_line()+
labs(y="Freq",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_member(),aes(label=Cumul))+scale_x_continuous(breaks=seq(0, 52, 1))
}
}
else{
if(input$cum==F){
p <- ggplot(dataset_member(), aes_string(x=input$x, y="n")) + geom_line()+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_member(),aes(label=n))+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$cum==T){
p <- ggplot(dataset_member(), aes_string(x=input$x, y="Cumul"))+
geom_line()+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_member(),aes(label=Cumul))+scale_x_continuous(breaks=seq(0, 52, 1))
}
}
p
} else if (input$plot_type=="Stack_Area"){
if(input$y!="Total_Member"){
if(input$cum==F){
p <- ggplot(dataset_member(), aes_string(x=input$x,y="n",group=input$y,fill=input$y)) +
geom_area(position="fill")+
labs(y="Percentage",x="week")+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$cum==T){
p <- ggplot(dataset_member(), aes_string(x=input$x,y="Cumul",group=input$y,fill=input$y)) +
geom_area(position="fill")+
labs(y="Percentage",x="week")+scale_x_continuous(breaks=seq(0, 52, 1))
}
}
else{
if(input$cum==F){
p <- ggplot(dataset_member(), aes_string(x=input$x, y="n")) + geom_line()+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_member(),aes(label=n))+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$cum==T){
p <- ggplot(dataset_member(), aes_string(x=input$x, y="Cumul"))+
geom_line()+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_member(),aes(label=Cumul))+scale_x_continuous(breaks=seq(0, 52, 1))
}
}
p
}
})
output$Login_plot <- renderPlot({
if(input$plot_type_L=="Line"){
if(input$cum_L==F){
p <- ggplot(dataset_login(), aes_string(x=input$x_L, y="n",color=input$y_L))+
geom_line()+
labs(y="Freq",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_login(),aes(label=n))+
theme_grey(base_family = "STKaiti")+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$cum_L==T){
p <- ggplot(dataset_login(), aes_string(x=input$x_L, y="Cumul",color=input$y_L))+
geom_line()+
labs(y="Freq",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_login(),aes(label=Cumul))+
theme_grey(base_family = "STKaiti")+scale_x_continuous(breaks=seq(0, 52, 1))
}
p
} else if (input$plot_type_L=="Stack_Area"){
if(input$cum_L==F){
p <- ggplot(dataset_login(), aes_string(x=input$x_L,y="n",group=input$y_L,fill=input$y_L)) +
geom_area(position="fill")+
labs(y="Percentage",x="Week")+
theme_grey(base_family = "STKaiti")+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$cum_L==T){
p <- ggplot(dataset_login(), aes_string(x=input$x_L,y="Cumul",group=input$y_L,fill=input$y_L)) +
geom_area(position="fill")+
labs(y="Percentage",x="Week")+
theme_grey(base_family = "STKaiti")+scale_x_continuous(breaks=seq(0, 52, 1))
}
p
}
})
#Orders Plot
output$Orders_plot <- renderPlot({
if(input$y_O=="Total_Order"){
p <- ggplot(dataset_orders_member(), aes_string(x=input$x_O, y="Orders"))+
geom_line()+
labs(y="Orders",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_orders_member(),aes(label=Orders))+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$y_O=="Heads"){
p <- ggplot(dataset_orders_member(), aes_string(x=input$x_O, y="Head_Counts",color="Type"))+
geom_line()+
labs(y="purchase members",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_orders_member(),aes(label=Head_Counts))+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$y_O=="Orders"){
p <- ggplot(dataset_orders_member(), aes_string(x=input$x_O, y="Counts",color="Type"))+
geom_line()+
labs(y="Orders",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_orders_member(),aes(label=Counts))+scale_x_continuous(breaks=seq(0, 52, 1))
}
else if(input$y_O=="conversion"){
p <- ggplot(dataset_orders_member(), aes_string(x=input$x_O, y="conversion",color="Type"))+
geom_line()+
labs(y="conversion",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_orders_member(),aes(label=sprintf("%.2f%%",conversion*100)))+scale_x_continuous(breaks=seq(0, 52, 1))
}
p
})
#Category Plot
output$Category_plot <- renderPlot({
temp<-orders%>%filter(status_name=="Paid")%>%group_by(create_month,category)%>%summarise(orders=n())
p <- ggplot(temp, aes_string(x="create_month", y="orders",color="category"))+
geom_line()+
labs(y="Orders",x="month")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=temp,aes(label=orders))+theme_grey(base_family = "STKaiti")
p
})
#Merchant
output$Merchant_plot <- renderPlot({
temp<-branch%>%group_by(area,type)%>%dplyr::summarise(n=n())
p<- ggplot(data = temp) +
geom_bar(aes(x = "", y = n, fill = type), stat = "identity") +
facet_wrap(~area) +theme_grey(base_family = "STKaiti")
p
})
output$Merchant_line_plot <- renderPlot({
if(input$cum_M==F){
p <- ggplot(dataset_Merchant_Line(), aes(x=week, y=n))+
geom_line()+
labs(y="Merchant on board",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_Merchant_Line(),aes(label=n))+scale_x_continuous(breaks=seq(-10, 52, 1))
}
else{
p <- ggplot(dataset_Merchant_Line(), aes(x=week, y=Cumul))+
geom_line()+
labs(y="Merchant on board",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=dataset_Merchant_Line(),aes(label=Cumul))+scale_x_continuous(breaks=seq(-10, 52, 1))
}
p
})
output$Orders_time_plot <- renderPlot({
temp<-orders%>%filter(status_name=="Paid")%>%group_by(time_diff)%>%dplyr::summarise(n=n())
p<- ggplot(data = temp) +
geom_bar(aes(x = "", y = n, fill = time_diff), stat = "identity") +
coord_polar(theta="y")+theme_grey(base_family = "STKaiti")
p
})
#Frequency
output$frequency_plot <- renderPlot({
temp<-orders
if (input$frequency_category!="all"){
temp%<>%filter(type==input$frequency_category)
}
orders_stats<-temp%>%filter(status_name=="Paid"&cd>=as.Date("2016-02-01"))%>%group_by(uid)%>%summarise(n=n())%>%filter(n>=2)
interval<-""
for (i in 1:nrow(orders_stats)){
orders_uid<-temp%>%filter(uid==orders_stats$uid[i])
if (length(unique(orders_uid))!=1){
new_interval<-as.integer(max(orders_uid$cd)-min(orders_uid$cd))/(length(unique(orders_uid$cd))-1)
interval<-c(interval,new_interval)
}
}
interval<-as.integer(interval)
hist(interval,breaks=200,col = 'red',xlim=range(0,100))
})
#MAU
output$MAU_plot <- renderPlot({
if(input$MAU_OS=="ALL"){
temp<-filter(MAU,funnel=="retention")
}else if (input$MAU_OS=="IOS"){
temp<-filter(MAU_IOS,funnel=="retention")
}else if(input$MAU_OS=="ANDROID"){
temp<-filter(MAU_ANDROID,funnel=="retention")
}
if(input$MAU_var=="MAU"){
p <- ggplot(data=temp)+geom_line(aes(x=Date,y=MAU_count))
}else if(input$MAU_var=="DAU"){
p <- ggplot(data=temp)+geom_line(aes(x=Date,y=DAU_count))
}else if(input$MAU_var=="DAU_MAU_ratio"){
p <- ggplot(data=temp)+geom_line(aes(x=Date,y=DAU_MAU_percentage))
}
p
})
output$MAU_stacked <- renderPlot({
if(input$MAU_OS=="ALL"){
p <- ggplot(MAU, aes(x=Date,y=MAU_percentage,group=funnel,fill=funnel)) +
geom_area()+
labs(y="Percentage",x="Date")
}else if (input$MAU_OS=="IOS"){
p <- ggplot(MAU_IOS, aes(x=Date,y=MAU_percentage,group=funnel,fill=funnel)) +
geom_area()+
labs(y="Percentage",x="Date")
}else if(input$MAU_OS=="ANDROID"){
p <- ggplot(MAU_ANDROID, aes(x=Date,y=MAU_percentage,group=funnel,fill=funnel)) +
geom_area()+
labs(y="Percentage",x="Date")
}
p
})
output$MAU_table<- renderTable({
if(input$MAU_OS=="ALL"){
MAU
}else if (input$MAU_OS=="IOS"){
MAU_IOS
}else if(input$MAU_OS=="ANDROID"){
MAU_ANDROID
}
},digit=3)
#User Behavior
# output$Category_browsing <- renderPlot({
# temp<-dataset_cat()%>%filter(start==0)%>%group_by(rpgid,forMap)%>%dplyr::summarise(n=n())
# p<- ggplot(data = temp) +
# geom_bar(aes(x = "", y = n, fill = forMap), stat = "identity") +
# facet_wrap(~rpgid) +theme_grey(base_family = "STKaiti")
# p
# })
# output$Map_Browsing <- renderPlot({
# temp<-dataset_cat()%>%filter(start==0)%>%group_by(rpgid,forMap)%>%dplyr::summarise(n=n())
# p<- ggplot(data = temp) +
# geom_bar(aes(x = "", y = n, fill = rpgid), stat = "identity") +
# facet_wrap(~forMap) +theme_grey(base_family = "STKaiti")
# p
# })
output$Cat_Browsing <- renderPlot({
if(input$Cat_Browsing_variable=="Week"){
if(input$remove_first==F){
temp<-dataset_cat()%>%group_by(week,rpgid)%>%dplyr::summarise(n=n())
}else{
temp<-merge(dataset_cat(),select(member,uid,Create_Time),by="uid")
temp%<>%filter(createtime!=Create_Time)%>%group_by(week,rpgid)%>%dplyr::summarise(n=n())
}
p <- ggplot(temp, aes(x=week, y=n,color=rpgid))+
geom_line()+
labs(y="Freq",x="week")+
theme(panel.grid.minor.x=element_blank())+
geom_text(data=temp,aes(label=n))+scale_x_continuous(breaks=seq(0, 52, 1)) +theme_grey(base_family = "STKaiti")
} else if(input$Cat_Browsing_variable=="Day"){
if(input$remove_first==F){
temp<-dataset_cat()%>%group_by(createtime,rpgid)%>%dplyr::summarise(n=n())
}else{
temp<-merge(dataset_cat(),select(member,uid,Create_Time),by="uid")
temp%<>%filter(createtime!=Create_Time)%>%group_by(createtime,rpgid)%>%dplyr::summarise(n=n())
}
p <- ggplot(temp, aes(x=createtime, y=n,color=rpgid))+
geom_line()+
labs(y="Freq",x="week")+
theme(panel.grid.minor.x=element_blank())+theme_grey(base_family = "STKaiti")
}
p
})
# output$first_shopping_dist <- renderPlot({
# hist(as.numeric(first_shopping$time_diff),col="red",breaks=50,xlab="Days between account created time and first shopping time")
# })
# output$login_vs_first_time_dist <- renderPlot({
# plot(first_shopping$time_diff,first_shopping$browse_count)
# })
#order time
output$order_time_plot<-renderPlot({
hours_matrix<-data.frame(matrix(data = 0,nrow = 1,ncol = 2))
colnames(hours_matrix)<-c("hours","n")
order_time_plot_temp<-dataset_orders_time()
order_time_plot_temp%<>%group_by(hours)%>%summarise(n=n())
if(!("1-3"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"1-3"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("4-6"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"4-6"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("7-9"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"7-9"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("10-12"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"10-12"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("13-15"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"13-15"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("16-18"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"16-18"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("19-21"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"19-21"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
if(!("22-0"%in%order_time_plot_temp$hours)){
hours_matrix[1,1]<-"22-0"
order_time_plot_temp<-rbind(order_time_plot_temp,hours_matrix)
}
order_time_plot_temp$hours<- factor(order_time_plot_temp$hours, levels= c("1-3", "4-6", "7-9", "10-12", "13-15", "16-18","19-21","22-0"))
ggplot(order_time_plot_temp,aes(x=hours,y=n))+geom_point()
})
#map
output$GPS_supply_plot<-renderPlot({
if (input$area_DS=="台北"){
gps_lon<-121.5219634
gps_lat<-25.0389007
} else if((input$area_DS=="台中")){
gps_lon<-120.630577
gps_lat<- 24.1406094
}
zoom_par<-12
mapgilbert <- get_map(location = c(lon=gps_lon,lat= gps_lat), zoom = zoom_par,
maptype = "roadmap", scale = 2)
p<-ggmap(mapgilbert) +
stat_density2d(dataset_GPS_supply(), mapping=aes(x=lng, y=lat, fill=..level..), geom="polygon", alpha=0.5)+
scale_fill_gradient(low = "green", high = "red")
p
})
output$GPS_views_plot<-renderPlot({
if (input$area_DS=="台北"){
gps_lon<-121.5219634
gps_lat<-25.0389007
} else if((input$area_DS=="台中")){
gps_lon<-120.630577
gps_lat<- 24.1406094
}
zoom_par<-12
mapgilbert <- get_map(location = c(lon=gps_lon,lat= gps_lat), zoom = zoom_par,
maptype = "roadmap", scale = 2)
p<-ggmap(mapgilbert) +
stat_density2d(dataset_GPS_view(), mapping=aes(x=lng, y=lat, fill=..level..), geom="polygon", alpha=0.5)+
scale_fill_gradient(low = "green", high = "red")
p
})
output$merchant_com_plot_sales<-renderPlot({
if (input$hr_com=='All'){
main<-orders%>%filter(branchname==input$merchant_com&cd>=input$date_com[1]&cd<=input$date_com[2])
if (input$include_com){
comp<-orders%>%filter(area_detail==input$area_com&cd>=input$date_com[1]&cd<=input$date_com[2])
}else{
comp<-orders%>%filter(area_detail==input$area_com&branchname!=input$merchant_com&cd>=input$date_com[1]&cd<=input$date_com[2])
}
}
else{
main<-orders%>%filter(branchname==input$merchant_com&duration==input$hr_com&cd>=input$date_com[1]&cd<=input$date_com[2])
if (input$include_com){
comp<-orders%>%filter(area_detail==input$area_com&duration==input$hr_com&cd>=input$date_com[1]&cd<=input$date_com[2])
}else{
comp<-orders%>%filter(area_detail==input$area_com&branchname!=input$merchant_com&duration==input$hr_com&cd>=input$date_com[1]&cd<=input$date_com[2])
}
}
main_stats<-main%>%group_by(hours)%>%summarise(sales=n())
comp_stats<-comp%>%group_by(hours)%>%summarise(sales=n())
comp_stats$sales<-comp_stats$sales/(length(unique(comp$branchname)))
main_stats$name<-'本店'
comp_stats$name<-'比較商家'
total<-rbind(main_stats,comp_stats)
total$hours<-factor(total$hours,c("1-3","4-6","7-9","10-12","13-15","16-18","19-21","22-0"))
p <- ggplot(total, aes_string(x="hours", y="sales",color="name"))+
geom_point()+
labs(y="Orders",x="hours")+
theme(panel.grid.minor.x=element_blank())+
theme_grey(base_family = "STKaiti")
p
})
output$merchant_com_plot_sales_amount<-renderPlot({
if (input$hr_com=='All'){
main<-orders%>%filter(branchname==input$merchant_com&cd>=input$date_com[1]&cd<=input$date_com[2]&category==input$category_com)
if (input$include_com){
comp<-orders%>%filter(area_detail==input$area_com&cd>=input$date_com[1]&cd<=input$date_com[2]&category==input$category_com)
}else{
comp<-orders%>%filter(area_detail==input$area_com&branchname!=input$merchant_com&cd>=input$date_com[1]&cd<=input$date_com[2]&category==input$category_com)
}
}
else{
main<-orders%>%filter(branchname==input$merchant_com&duration==input$hr_com&cd>=input$date_com[1]&cd<=input$date_com[2]&category==input$category_com)
if (input$include_com){
comp<-orders%>%filter(area_detail==input$area_com&duration==input$hr_com&cd>=input$date_com[1]&cd<=input$date_com[2]&category==input$category_com)
}else{
comp<-orders%>%filter(area_detail==input$area_com&branchname!=input$merchant_com&duration==input$hr_com&cd>=input$date_com[1]&cd<=input$date_com[2]&category==input$category_com)
}
}
main_stats<-aggregate(main$amount,by=list(main$hours),FUN=mean)
comp_stats<-aggregate(comp$amount,by=list(comp$hours),FUN=mean)
colnames(main_stats)<-c("hours","amount")
colnames(comp_stats)<-c("hours","amount")
main_stats$name<-'本店'
comp_stats$name<-'比較商家'
total<-rbind(main_stats,comp_stats)
total$hours<-factor(total$hours,c("1-3","4-6","7-9","10-12","13-15","16-18","19-21","22-0"))
p <- ggplot(total, aes_string(x="hours", y="amount",color="name"))+
geom_point()+
labs(y="amount",x="hours")+
theme(panel.grid.minor.x=element_blank())+
theme_grey(base_family = "STKaiti")
p
})
#Download files
output$downloadData_mix <- downloadHandler(
filename = function() {
paste("mix", '.csv', sep='')
},
content = function(file) {
temp<-dataset_pt_mix()
temp<-temp[,-1]
write.csv( temp,file,fileEncoding = "big5")
}
)
# output$downloadData_QK_Taipei <- downloadHandler(
# filename = function() {
# paste("Taipei_QK", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_QK(), file,fileEncoding = "big5")
# }
# )
# output$downloadData_late_Taipei <- downloadHandler(
# filename = function() {
# paste("Taipei_Late", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_late(), file,fileEncoding = "big5")
# }
# )
# output$downloadData_massage_Taipei <- downloadHandler(
# filename = function() {
# paste("Taipei_massage", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_massage(), file,fileEncoding = "big5")
# }
# )
# output$downloadData_manicure_Taipei <- downloadHandler(
# filename = function() {
# paste("Taipei_manicure", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_manicure(), file,fileEncoding = "big5")
# }
# )
# output$downloadData_bar_Taipei <- downloadHandler(
# filename = function() {
# paste("Taipei_bar", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_bar(), file,fileEncoding = "big5")
# }
# )
# output$downloadData_late_Taipei <- downloadHandler(
# filename = function() {
# paste("Taichung_late", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_late_Taichung(), file,fileEncoding = "big5")
# }
# )
# output$downloadData_QK_Taipei <- downloadHandler(
# filename = function() {
# paste("Taichung_QK", '.csv', sep='')
# },
# content = function(file) {
# write.csv(dataset_pt_QK_Taichung(), file,fileEncoding = "big5")
# }
# )
##### push
dataset_push <- reactive({
temp<-user_cat%>%filter(createtime>=input$dates_push[1]&createtime<=input$dates_push[2]&rpgid==input$dataset_push_category)
cat_stats<-temp%>%group_by(uid)%>%summarise(n=n())%>%filter(n>=input$view_click)
paste(unique(cat_stats$uid),collapse=",")
})
output$downloadData_push <- downloadHandler(
filename = function() {
paste("push","_",input$dates_push[1],"_",input$dates_push[2])
},
content = function(file) {
write(dataset_push(), file)
}
)
##### Sales item edited
dataset_sie<-reactive({
temp<-sales%>%filter(cd>=input$dates_sie[1]&cd<=input$dates_sie[2]&createtime!=lastmodifiedtime)
upload<-sales%>%filter(cd>=input$dates_sie[1]&cd<=input$dates_sie[2])
sales_upload<-upload%>%group_by(branchname)%>%summarise(upload=n())
sales_edit<-temp%>%group_by(branchname)%>%summarise(edit=n())
sales_delete<-temp%>%filter(deleted==1)%>%group_by(branchname)%>%summarise(delete=n())
temp<-merge(sales_upload,sales_edit,by='branchname',all.x=TRUE)
temp<-merge(temp,sales_delete,by='branchname',all.x=TRUE)
temp2<-sales_daily_setting%>%filter(cd>=input$dates_sie[1]&cd<=input$dates_sie[2]&createtime!=lastmodifiedtime)
sales_edit<-temp2%>%group_by(branchname)%>%summarise(auto_edit=n())
sales_delete<-temp2%>%filter(enabled==0)%>%group_by(branchname)%>%summarise(auto_disable=n())
temp<-merge(temp,sales_edit,by='branchname',all.x=TRUE)
temp<-merge(temp,sales_delete,by='branchname',all.x=TRUE)
temp[order(-temp$edit),]
})
output$sie_table<-renderTable({
temp<-dataset_sie()
temp[1:20,]
})
output$downloadData_sie <- downloadHandler(
filename = function() {
paste("Sales_item_edited_Stats",input$dates_sie[1],"_to_",input$dates_sie[2],".csv")
},
content = function(file) {
write.csv(dataset_sie(), file,fileEncoding = "big5")
}
)
})
|
9a6f2c000fcd8469152ee64aa250501bb36245dd
|
6591be39877bf07b7f901864a87bd80d00cdbc23
|
/plot3.R
|
b2d2f044341f04cb169b6701cd2b1d98f1a8fcc7
|
[] |
no_license
|
winnchow/ExData_Plotting2
|
acea75dc59ad7f720031e21f5735ecc608c80d39
|
d8a114d8be1938b4b10ca297a5aa365189504215
|
refs/heads/master
| 2020-04-22T06:34:32.710717
| 2014-07-21T12:26:26
| 2014-07-21T12:26:26
| null | 0
| 0
| null | null | null | null |
IBM852
|
R
| false
| false
| 949
|
r
|
plot3.R
|
# Plot 3
# 3. Of the four types of sources indicated by the type (point, nonpoint, onroad,
# nonroad) variable, which of these four sources have seen decreases in emissions
# from 1999íV2008 for Baltimore City? Which have seen increases in emissions from
# 1999íV2008? Use the ggplot2 plotting system to make a plot answer this question.
library(ggplot2)
# This first line will likely take a few seconds. Be patient!
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Calculate the sum of emissions per year and type
data <- aggregate(Emissions ~ year + type, FUN=sum, na.rm=TRUE, data=NEI[NEI$fips == "24510", ])
# Generate the plot
png(file = "plot3.png", width=600)
qplot(year, Emissions, data = data, geom=c("point", "smooth"), method="lm", facets = . ~ type, main=expression("Total Emissions from PM"[2.5]*" in the Baltimore City, Maryland"), xlab="Year", ylab="Total Emissions (tons)")
dev.off()
|
077df18b9a11628cef6242da7e2a2246bdc433a6
|
2d47450c41c23f6d008bfca5bf08d3161bb13491
|
/man/ba_locations_details.Rd
|
77222e910ebad65d8f1404d1beefc9e695ceaf89
|
[] |
no_license
|
khaled-alshamaa/brapi
|
2c14727d65fc82a77d243bdc40c10b67955a04d5
|
5f2a5caa48d72e2412ead128b9143cc1882a060c
|
refs/heads/master
| 2022-03-21T20:19:07.470329
| 2019-10-16T15:51:00
| 2019-10-16T15:51:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,407
|
rd
|
ba_locations_details.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ba_locations_details.R
\name{ba_locations_details}
\alias{ba_locations_details}
\title{ba_locations_details}
\usage{
ba_locations_details(con = NULL, locationDbId = "",
rclass = c("tibble", "data.frame", "list", "json"))
}
\arguments{
\item{con}{list, brapi connection object}
\item{locationDbId}{character, the internal database identifier for a
location of which the details are to be retrieved;
\strong{REQUIRED ARGUMENT} with default: ""}
\item{rclass}{character, class of the object to be returned; default: "tibble"
, possible other values: "json"/"list"/"data.frame"}
}
\value{
An object of class as defined by rclass containing the location
details.
}
\description{
Gets details for a location given by a required database identifier.
}
\details{
All standard attributes are always listed. However, attributes in the additionalInfo
only when at least one record has data.
}
\note{
Tested against: test-server
BrAPI Version: 1.0, 1.1, 1.2
BrAPI Status: active
}
\examples{
if (interactive()) {
library(brapi)
# Need to connect to a database with genetic data
con <- ba_db()$testserver
loc <- ba_locations_details(con = con, "1")
}
}
\references{
\href{https://github.com/plantbreeding/API/blob/V1.2/Specification/Locations/LocationDetails.md}{github}
}
\author{
Reinhard Simon, Maikel Verouden
}
|
c52192cf12bb6ef3e018c753dde290db827d9ed4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/crqanlp/examples/clean_text.Rd.R
|
4914fc7e3ee8181e25eb38c98b1097b70c3e14e0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 507
|
r
|
clean_text.Rd.R
|
library(crqanlp)
### Name: clean_text
### Title: Clean text
### Aliases: clean_text
### Keywords: misc
### ** Examples
library(gutenbergr)
## let's get Alice's Adventures in Wonderland by Carroll
# gutenberg_works(author == "Carroll, Lewis")
rawText = gutenberg_download(11) ## take the text
rawText = as.vector(rawText$text) ## vectorize the text
rawText = paste(rawText, collapse = " ") ## collapse the text
cleanText = clean_text(rawText, removeStopwords = TRUE)
text = cleanText$content
|
f217909fce2884dd46ceecbe205be86265423c3c
|
e2fa06c4f96f07a872b6f5c8952bb42d00e4f8f0
|
/man/plot.ml_g_fit.Rd
|
37d4b658d58ba8bf8d7f57629bf7515a5dd9bf98
|
[] |
no_license
|
cran/msme
|
8db2ef035cb84e72c56ad668e833ead148af0887
|
e9f7649fc57ece8fcc178289196dc3e4707fbd35
|
refs/heads/master
| 2020-07-01T01:54:53.055045
| 2018-03-18T21:31:41
| 2018-03-18T21:31:41
| 17,697,696
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 895
|
rd
|
plot.ml_g_fit.Rd
|
\name{plot.ml_g_fit}
\alias{plot.ml_g_fit}
\title{A plot method for objects of class ml_g_fit.}
\description{
This function provides a four-way plot for fitted models.
}
\usage{
\method{plot}{ml_g_fit}(x, ...)
}
\arguments{
\item{x}{
the fitted model.
}
\item{\dots}{
other arguments, retained for compatibility with generic method.
}
}
\details{
The function plots a summary. The output is structured to broadly
match the default options of the plot.lm function.
}
\value{
Run for its side effect of producing a plot object.
}
\references{
Hilbe, J.M., and Robinson, A.P. 2013. Methods of Statistical Model
Estimation. Chapman & Hall / CRC.
}
\author{
Andrew Robinson and Joe Hilbe.
}
\seealso{
\code{\link{ml_g}}
}
\examples{
data(ufc)
ufc <- na.omit(ufc)
ufc.g.reg <- ml_g(height.m ~ dbh.cm, data = ufc)
plot(ufc.g.reg)
}
\keyword{ models }
\keyword{ htest }
|
f729067e724ebbb481622814ccc02ab07dc30c00
|
aee0ef401ae2be8b3bf3edfe6c71b4bbb4abd018
|
/man/Jac.Rd
|
a3bf6ee9b17874b450a5cfc46a63f8e674d96e3c
|
[
"MIT"
] |
permissive
|
yl2883/yixiaopackage
|
625aa43ec3031a8c18d4a03d715acd670fff2ba9
|
f71143bb8a873b59b5c2c1682561e62047359a7d
|
refs/heads/master
| 2023-05-05T06:06:59.105255
| 2021-05-23T19:39:25
| 2021-05-23T19:39:25
| 369,407,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 304
|
rd
|
Jac.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/solve_ols.R
\name{Jac}
\alias{Jac}
\title{Jacobi Method}
\usage{
Jac(A, b, x)
}
\arguments{
\item{A}{Linear System of interest}
\item{b}{Target Vector}
\item{x}{Initial guess of the solution}
}
\description{
Jacobi Method
}
|
787b8226b17204eed0c42de99e8b889d58466a56
|
8f7fc1147fe90b857d6d7b17c7cfaaa4986f613f
|
/man/asypow.noncent.Rd
|
55fd76377bf8ee5646a0a84070ba6aa524634cff
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
cran/asypow
|
29ff0945ff524649f26c86bd3ada827488b47de5
|
c8840aad7c1bc9af790051a7c45c330f3a56a967
|
refs/heads/master
| 2021-01-01T15:50:58.487649
| 2015-06-25T00:00:00
| 2015-06-25T00:00:00
| 17,694,518
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,851
|
rd
|
asypow.noncent.Rd
|
\name{asypow.noncent}
\alias{asypow.noncent}
\title{
Asymptotic Noncentrality Parameter
}
\description{
Given an information matrix, alternative hypothesis parameter values, and
constraints that create the null hypothesis from the alternative,
calculates noncentrality parameter, degrees of freedom and parameter
value estimates under the null hypothesis.
}
\usage{
asypow.noncent(theta.ha, info.mat, constraints,
nobs.ell=1, get.ho=TRUE)
}
\arguments{
\item{theta.ha}{
Array of parameter values under the alternative hypothesis.
}
\item{info.mat}{
The information matrix, the second derivate matrix of the
expected log likelihood under the alternative hypothesis.
The negative of the hessian matrix.
}
\item{constraints}{
The constraints which set the null hypothesis from the
alternative hypothesis. They are in matrix form.
CONSTRAINT[,1] is 1 for setting parameter to a value
2 for equality of two parameters
CONSTRAINT[,2] is case on CONSTRAINT[,1]
(1) Index of parameter to set to value
(2) Index of one of two parameters to
be set equal
CONSTRAINT[,3] is case on CONSTRAINT[,1]
(1) Value to which parameter is set
(2) Index of other of two parameters
to be set equal
}
\item{nobs.ell}{
The number of observations used in computing the information
matrix. That is, info.mat is that for nobs.ell observations.
Default is 1, which is the correct value for all of the 'info.'
routines supplied here.
}
\item{get.ho}{
If TRUE, estimates of the parameter values under the null
hypothesis are calculated and returned, otherwise not.
Default is TRUE.
}}%end \arguments
\value{
Returns a list including
\item{w}{
The noncentrality parameter for 1 observation.
}
\item{df}{
The degrees of freedom of the test
}
\item{theta.ho}{
Estimates of the parameter values under the null hypothesis.
}}
\references{
Cox, D.R. and Hinkley, D.V. (1974).
\emph{Theoretical Statistics}
Chapman and Hall, London.
}
\seealso{
\code{\link{asypow.n}},
\code{\link{asypow.sig}},
\code{\link{asypow.power}}
}
\examples{
# Three Sample Poisson Example :
# Three independent Poisson processes produce events at
# mean rates of 1, 2 and 3 per day.
# Find the information matrix
pois.mean <- c(1,2,3)
info.pois <- info.poisson.kgroup(pois.mean,group.size=3)
# Create the constraints matrix
constraints <- matrix(c(2,1,2,2,2,3),ncol=3,byrow=TRUE)
# Calculate noncentrality parameter, degrees of freedom and parameter
# value estimates under the null hypothesis for the test.
poisson.object <- asypow.noncent(pois.mean,info.pois,constraints)
}
\keyword{htest}
\concept{noncentrality}
% Converted by Sd2Rd version 1.21.
|
9f4d446b1541d895e505689ac6f25dff27dec5db
|
516b43cbce39caf6640c3fef8a47ff7d46d1dd6e
|
/man/Rcpp_Lcm.Rd
|
c8eb2cede1dd144596e8352df3f86e946cde3c38
|
[] |
no_license
|
QuanliWang/NPBayesImpute
|
d302b6e75dca461682dcd8d4d50d9e687cd221ad
|
e151964ee26bb21184e0867e0668b0f159bffa75
|
refs/heads/master
| 2021-06-04T05:24:45.981912
| 2018-09-13T17:40:40
| 2018-09-13T17:40:40
| 20,529,100
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 123
|
rd
|
Rcpp_Lcm.Rd
|
\name{Rcpp_Lcm}
\alias{Rcpp_Lcm}
\title{
RCPP implemenation of the library
}
\description{
\link{Rcpp_Lcm-class}
}
|
b1610e96847940b7401e5ef596611aeb1c3a37f4
|
d4409010fa25433c80c2dd98cda5ad547f36f8de
|
/Simulation/dhmm_model_selection.R
|
540fa50b2fba78e92645c18c1d87582dfd9cfba4
|
[] |
no_license
|
lruijin/pHMM
|
014bc8326b9bcff127f6e90bf1534e46975865e0
|
093c9e703ff213ac6fd67ebca4d50944c44c5ca5
|
refs/heads/master
| 2023-09-01T17:23:20.492224
| 2021-10-27T17:49:48
| 2021-10-27T17:49:48
| 267,655,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,888
|
r
|
dhmm_model_selection.R
|
suppressMessages({
library(doMPI)
library(foreach)
})
#library(doMC)
nrun=1000
sim_each <- function(no.run, seed = 2256,burnin = 3000, nsim. = 5000){
source("DHMM_v1.R",local=T)
#source('utils.R',local=T)
set.seed(seed + no.run);
daf <- genSim(N = 390, rx. = rep(c(0,1),each = 390/2),fitRx = c(FALSE,TRUE,FALSE));
pars_hs2 <- c(0.932,
0.426,0.024,
0.953,0.024,
0.977,0.048,
34.082,-3.257,3.366,-0.238,
38.430,-5.149,3.318,-0.160,
13.425,0.051,0.684,-0.027,0.051,0.025,0.033,0.010,0.684,0.033,13.190,0.086,-0.027,0.010,0.086,0.028,
0.2, 0.443, 20, 145.159,
0.2,0.252, 30, 62.988,
0.512,0.042)
pars_hs4 <- c(0.495,0.218,0.218,
0.190,0.190,0.047,0.029,0.233,0.506,0.029,0.233,0.506,0.010,0.007,0.007,
0.531,0.051,0.051,0.025,0.216,0.372,0.372,0.126,0.216,0.372,0.372,0.126,
0.740,0.021,0.021,0.015,0.119,0.471,0.471,0.012,0.119,0.471,0.471,0.012,
34.082,0.206,0.206,-3.669,3.366,-0.072,-0.072,0.095,
38.430,-0.961,-0.961,-3.227,3.318,-0.049,-0.049,-0.063,
13.425,0.051,0.684,-0.027,0.051,0.025,0.033,0.010,0.684,0.033,13.190,0.086,-0.027,0.010,0.086,0.028,
0.048, 0.436, 0.436, 0.443, 8.911, 75.614, 75.614, 145.159,
0.139, 0.333, 0.333, 0.252, 19.069, 135.208, 135.208, 62.988,
0.233,0.233,0.048,0.026,0.325,0.323,0.026,0.325,0.323,0.020,0.011,0.011)
pars_hs5 <- c(0.248,0.247,0.218,0.219,
0.287,0.190,0.190,0.047,0.287,0.190,0.190,0.047,0.015,0.014,0.233,0.506,0.015,0.014,0.233,0.506,0.005,0.005,0.007,0.007,
0.267,0.266,0.025,0.025,0.025,0.267,0.266,0.025,0.025,0.025,0.216,0.216,0.372,0.372,0.126,0.216,0.216,0.372,0.372,0.126,
0.370,0.370,0.021,0.021,0.015,0.370,0.370,0.021,0.021,0.015,0.059,0.060,0.471,0.471,0.012,0.059,0.060,0.471,0.471,0.012,
33.776,0.206,0.206,0.206,-3.669,3.438,-0.072,-0.072,-0.072,0.095,
39.391,-0.961,-0.961,-0.961,-3.227,3.367,-0.049,-0.049,-0.049,-0.063,
13.425,0.051,0.684,-0.027,0.051,0.025,0.033,0.010,0.684,0.033,13.190,0.086,-0.027,0.010,0.086,0.028,
0.048, 0.048, 0.436, 0.436, 0.443, 8.911, 8.911, 75.614, 75.614, 145.159,
0.139, 0.139, 0.333, 0.333, 0.252, 19.069, 19.069, 135.208, 135.208, 62.988,
0.265,0.233,0.233,0.048,0.265,0.233,0.233,0.048,0.013,0.013,0.325,0.323,0.013,0.013,0.325,0.323,0.010,0.010,0.011,0.011)
x_hs2 <- simulated(y = daf$y, inits.= pars_hs2, nsim.= nsim., burnin = burnin, ksamp.= 1, Km = c(2,2), hs = c(2,2,2),
N.=daf$N, ni.=daf$ni, rx. = daf$rx, fitRx = daf$fitRx, report1.=1000,id=rep(1:daf$N,5),run. = no.run)
x <- simulated(y = daf$y, inits. = c(daf$pars), nsim.= nsim., burnin = burnin, ksamp.= 1, Km = c(2,2), hs = c(3,3,3),
N.=daf$N, ni.=daf$ni, rx. = daf$rx, fitRx = daf$fitRx, report1.=1000,id=rep(1:daf$N,5),run. = no.run)
x_hs4 <- simulated(y = daf$y, inits. = pars_hs4, nsim.= nsim., burnin = burnin, ksamp.= 1, Km = c(2,2), hs = c(4,4,4),
N.=daf$N, ni.=daf$ni, rx. = daf$rx, fitRx = daf$fitRx, report1.=1000,id=rep(1:daf$N,5),run. = no.run)
x_hs5 <- simulated(y = daf$y, inits. = pars_hs5, nsim.= nsim., burnin = burnin, ksamp.= 1, Km = c(2,2), hs = c(5,5,5),
N.=daf$N, ni.=daf$ni, rx. = daf$rx, fitRx = daf$fitRx, report1.=1000,id=rep(1:daf$N,5),run. = no.run)
WAIC = c(x_hs2$WAIC, x$WAIC, x_hs4$WAIC, x_hs5$WAIC)
return(WAIC)
}
cl <- doMPI::startMPIcluster()
registerDoMPI(cl)
out <- foreach(i = 1: nrun,.combine="rbind",.multicombine=T,.errorhandling="pass")%dopar%{
sim_each(i)
}
save('out',file=paste0("dhmm_hs_sel",nrun,".Rdata"))
stopCluster(cl)
mpi.exit()
|
3feac763587107d801fac1fed09aed1391b79344
|
b2bd46d3bea50ada3aff0ce47a98fb7e8bfd969c
|
/FrequencyApp/app.R
|
27b5ff4830629ec8a3a148945ef12d201a740191
|
[] |
no_license
|
Ighina/FrequencyApp
|
105c1e6dfa870996668f4c868b6ff7847a5c5820
|
3dce9d55ff986ced1ea46223d863de61ef10e09e
|
refs/heads/master
| 2020-04-09T15:13:17.983384
| 2019-01-19T12:53:36
| 2019-01-19T12:53:36
| 160,419,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40,632
|
r
|
app.R
|
list.of.packages <- c("shinythemes", "shiny", "igraph", "tm", "tokenizers","stringr","readr","DT")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library(shiny)
library(shinythemes)
ui_prova<-navbarPage('Sections',theme = shinytheme("journal"),
tabPanel('Plot',
titlePanel(h1("Occurrences of concepts with R",h6("by",em("Iacopo Ghinassi")))),
sidebarLayout(
sidebarPanel(h2("Text Input",align='center'),width=3,
fluidRow(
fileInput("file1",h6("File input")),
fileInput("file2",h6("File input")),
fileInput("file3",h6("File input")),
fileInput("file4",h6("File input")),
fileInput("file5",h6("File input"))),
h4('Press Submit to include dictionaries and parameters'),
submitButton("Submit")),
mainPanel(column(12,h2("Occurrences Plot"),align="center"),
column(12,plotOutput(outputId = "graphfreq"),align="center",style="background-color:#ccccff"),
column(12,br()),
column(4, selectInput("type_of_frequency","Type of Frequency:",c(absolute="absolute",relative="relative"))),
column(4,selectInput('language','Language:',c('english','spanish', 'german','italian'))),
column(4,selectInput('stem','Apply stemming?',c('yes','no'))),
br(),
column(12,h3('Dictionaries (Concepts)'),align='center'),
fluidRow(column(3,textInput("dictionary1",label = "Insert 1st dictionary's words",
value = "write something"),
textInput("dictionary2",label = "Insert 2nd dictionary's words",
value = "write something")),
column(3,
textInput("dictionary3",label = "Insert 3rd dictionary's words",
value = "write something"),
textInput("dictionary4",label = "Insert 4th dictionary's words",
value = "write something")),
column(3,textInput("dictionary5",label = "Insert 5th dictionary's words",
value = "write something"),
textInput("dictionary6",label = "Insert 6th dictionary's words",
value = "write something")),
column(3,textInput("dictionary7",label = "Insert 7th dictionary's words",
value = "write something"),textInput("dictionary8",label = "Insert 8th dictionary's words",
value = "write something")),
column(6,
textInput("dictionary9",label = "Insert 9th dictionary's words",
value = "write something"),align='center'),
column(6,
textInput("dictionary10",label = "Insert 10th dictionary's words",
value = "write something"),align='center'))))),
tabPanel("Data",
mainPanel(DT::dataTableOutput('data'))))
server_prova<-function(input,output,session){
output$graphfreq <- renderPlot({
library(tm)
library(stringr)
library(readr)
library(ggplot2)
if(is.null(input$file1)){
return(title("No text"))
}
else{
num_texts<-1
if(length(input$file2))(num_texts<-num_texts+1)
if(length(input$file3))(num_texts<-num_texts+1)
if(length(input$file4))(num_texts<-num_texts+1)
if(length(input$file5))(num_texts<-num_texts+1)
print(num_texts)
if(num_texts==1){
text<-readLines(input$file1$datapath)
text<-paste(text,collapse = " ")
preproc_text<-gsub("((?:\b| )?([.,:;!?]+)(?: |\b)?)", " \\1 ", text, perl=T)
if(input$stem=='yes'){
preproc_text<-stemDocument(preproc_text)}
preproc_text<-paste(preproc_text, collapse = " ")
print(head(preproc_text))
dict_list<-list()
dict_list[[1]]<-vector()
dict_list[[1]]<-input$dictionary1
dict_list[[1]]<-strsplit(dict_list[[1]], ",")
dict_list[[1]]<-unlist(dict_list[[1]])
dict_list[[1]]<-str_trim(dict_list[[1]])
dict_list[[2]]<-vector()
dict_list[[2]]<-input$dictionary2
dict_list[[2]]<-strsplit(dict_list[[2]], ",")
dict_list[[2]]<-unlist(dict_list[[2]])
dict_list[[2]]<-str_trim(dict_list[[2]])
dict_list[[3]]<-vector()
dict_list[[3]]<-input$dictionary3
dict_list[[3]]<-strsplit(dict_list[[3]], ",")
dict_list[[3]]<-unlist(dict_list[[3]])
dict_list[[3]]<-str_trim(dict_list[[3]])
dict_list[[4]]<-vector()
dict_list[[4]]<-input$dictionary4
dict_list[[4]]<-strsplit(dict_list[[4]], ",")
dict_list[[4]]<-unlist(dict_list[[4]])
dict_list[[4]]<-str_trim(dict_list[[4]])
dict_list[[5]]<-vector()
dict_list[[5]]<-input$dictionary5
dict_list[[5]]<-strsplit(dict_list[[5]], ",")
dict_list[[5]]<-unlist(dict_list[[5]])
dict_list[[5]]<-str_trim(dict_list[[5]])
dict_list[[6]]<-vector()
dict_list[[6]]<-input$dictionary6
dict_list[[6]]<-strsplit(dict_list[[6]], ",")
dict_list[[6]]<-unlist(dict_list[[6]])
dict_list[[6]]<-str_trim(dict_list[[6]])
dict_list[[7]]<-vector()
dict_list[[7]]<-input$dictionary7
dict_list[[7]]<-strsplit(dict_list[[7]], ",")
dict_list[[7]]<-unlist(dict_list[[7]])
dict_list[[7]]<-str_trim(dict_list[[7]])
dict_list[[8]]<-vector()
dict_list[[8]]<-input$dictionary8
dict_list[[8]]<-strsplit(dict_list[[8]], ",")
dict_list[[8]]<-unlist(dict_list[[8]])
dict_list[[8]]<-str_trim(dict_list[[8]])
dict_list[[9]]<-vector()
dict_list[[9]]<-input$dictionary9
dict_list[[9]]<-strsplit(dict_list[[9]], ",")
dict_list[[9]]<-unlist(dict_list[[9]])
dict_list[[9]]<-str_trim(dict_list[[9]])
dict_list[[10]]<-vector()
dict_list[[10]]<-input$dictionary10
dict_list[[10]]<-strsplit(dict_list[[10]], ",")
dict_list[[10]]<-unlist(dict_list[[10]])
dict_list[[10]]<-str_trim(dict_list[[10]])
for (i in 1:10) {
if(length(dict_list[[i]])==0){
dict_list[[i]][1]<-"write something"
}
}
if (dict_list[[1]][1]=="write something") {
title(warning("WRITE AT LEAST ONE DICTIONARY!"))
}
else{
for (i in length(dict_list):1) {
if (dict_list[[i]][1]=="write something") {
dict_list[[i]]<-NULL
}
}
if(input$stem=='yes'){
for (i in 1:length(dict_list)) {
dict_list[[i]]<-append(dict_list[[i]],stemDocument(dict_list[[i]],language = "english"))# Select the language that apply
}
}
All_dict_words_to1term <- function(starting_doc,dict,sub_term){
for (i in 1:length(dict)) {
if (i==1){
new_doc<-str_replace_all(starting_doc,dict[i],sub_term)
}
else{
new_doc<-str_replace_all(new_doc,dict[i],sub_term)
}
}
return(new_doc)
}
# Function to iterate the previous function over several dictionaries and create a list of the texts thus processed (the final element of the list is the one processed after all of the dictionaries)
All_dict_words_to1term_fin <- function(starting_doc,dictionaries){
result<-list()
for (i in 1:length(dictionaries)) {
if (i==1) {
result[[i]]<-All_dict_words_to1term(starting_doc,dictionaries[[i]],dictionaries[[i]][1])
}
else{
result[[i]]<-All_dict_words_to1term(result[[i-1]],dictionaries[[i]],dictionaries[[i]][1])
}
}
return(result)
}
processed_text<-All_dict_words_to1term_fin(preproc_text,dict_list)
processed_text<-processed_text[[length(dict_list)]]
dict_new<-vector()
for (i in 1:length(dict_list)) {
dict_new[i] <- dict_list[[i]][1]
}
dict_new<-tolower(dict_new)
print(dict_new)
node_reference<- data.frame(dict_new, c(1:length(dict_new)))
node_reference<-node_reference[,2]
node_reference<-`names<-`(node_reference,dict_new)
strsplit_space_tokenizer <- function(x)
unlist(strsplit(as.character(x), "[[:space:]]+"))
if(input$language=='english'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="en", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='spanish'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="spanish", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='german'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="german", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='italian'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="italian", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
termFreq2<-function(x){
termFreq(x,
control = ctrl)
}
processed_text<-iconv(processed_text, "UTF-8",'latin1', sub = "") #change the parameters of conversion as needed, esepcially if errors are thrown at this stage
Text_freq<-termFreq2(processed_text)
Text_freq<-Text_freq[dict_new]
nodes_df<-data.frame(dict_new,node_reference,Text_freq)
Occurrences<-vector()
texts<-vector()
dictionaries<-vector()
texts<-append(texts,replicate(length(dict_new),paste('text',1)))
dictionaries<-append(dictionaries,dict_new)
Occurrences<- append(Occurrences,nodes_df[,3])
newer_df<-data.frame(as.factor(texts),
as.factor(dictionaries),Occurrences)
ggplot(data = newer_df,aes
(x=newer_df$as.factor.texts.,y=newer_df$Occurrences,
fill= newer_df$as.factor.dictionaries.))+geom_col()+
theme_classic()+xlab('Texts')+
ylab('Absolute Frequency')+labs(fill='Dictionaries')}}
else{
list_text<-list()
list_text[[1]]<-readLines(input$file1$datapath)
list_text[[2]]<-readLines(input$file2$datapath)
if(num_texts>2){list_text[[3]]<-readLines(input$file3$datapath)}
if(num_texts>3){list_text[[4]]<-readLines(input$file4$datapath)}
if(num_texts>4){list_text[[5]]<-readLines(input$file5$datapath)}
for(i in 1:num_texts) {
preproc_text_list[[i]]<-paste(list_text[[i]], collapse = ' ')
preproc_text_list[[i]]<-gsub("((?:\b| )?([.,:;!?]+)(?: |\b)?)", " \\1 ", preproc_text_list[[i]], perl=T)#Add space between words and punctuation
if(input$stem=='yes'){
preproc_text_list[[i]]<-stemDocument(preproc_text_list[[i]])}
preproc_text_list[[i]]<-paste(preproc_text_list[[i]], collapse = " ")
preproc_text_list[[i]]<-`names<-`(preproc_text_list[[i]],paste("text",i,"_c",sep = ""))
}
dict_list<-list()
dict_list[[1]]<-vector()
dict_list[[1]]<-input$dictionary1
dict_list[[1]]<-strsplit(dict_list[[1]], ",")
dict_list[[1]]<-unlist(dict_list[[1]])
dict_list[[1]]<-str_trim(dict_list[[1]])
dict_list[[2]]<-vector()
dict_list[[2]]<-input$dictionary2
dict_list[[2]]<-strsplit(dict_list[[2]], ",")
dict_list[[2]]<-unlist(dict_list[[2]])
dict_list[[2]]<-str_trim(dict_list[[2]])
dict_list[[3]]<-vector()
dict_list[[3]]<-input$dictionary3
dict_list[[3]]<-strsplit(dict_list[[3]], ",")
dict_list[[3]]<-unlist(dict_list[[3]])
dict_list[[3]]<-str_trim(dict_list[[3]])
dict_list[[4]]<-vector()
dict_list[[4]]<-input$dictionary4
dict_list[[4]]<-strsplit(dict_list[[4]], ",")
dict_list[[4]]<-unlist(dict_list[[4]])
dict_list[[4]]<-str_trim(dict_list[[4]])
dict_list[[5]]<-vector()
dict_list[[5]]<-input$dictionary5
dict_list[[5]]<-strsplit(dict_list[[5]], ",")
dict_list[[5]]<-unlist(dict_list[[5]])
dict_list[[5]]<-str_trim(dict_list[[5]])
dict_list[[6]]<-vector()
dict_list[[6]]<-input$dictionary6
dict_list[[6]]<-strsplit(dict_list[[6]], ",")
dict_list[[6]]<-unlist(dict_list[[6]])
dict_list[[6]]<-str_trim(dict_list[[6]])
dict_list[[7]]<-vector()
dict_list[[7]]<-input$dictionary7
dict_list[[7]]<-strsplit(dict_list[[7]], ",")
dict_list[[7]]<-unlist(dict_list[[7]])
dict_list[[7]]<-str_trim(dict_list[[7]])
dict_list[[8]]<-vector()
dict_list[[8]]<-input$dictionary8
dict_list[[8]]<-strsplit(dict_list[[8]], ",")
dict_list[[8]]<-unlist(dict_list[[8]])
dict_list[[8]]<-str_trim(dict_list[[8]])
dict_list[[9]]<-vector()
dict_list[[9]]<-input$dictionary9
dict_list[[9]]<-strsplit(dict_list[[9]], ",")
dict_list[[9]]<-unlist(dict_list[[9]])
dict_list[[9]]<-str_trim(dict_list[[9]])
dict_list[[10]]<-vector()
dict_list[[10]]<-input$dictionary10
dict_list[[10]]<-strsplit(dict_list[[10]], ",")
dict_list[[10]]<-unlist(dict_list[[10]])
dict_list[[10]]<-str_trim(dict_list[[10]])
for (i in 1:10) {
if(length(dict_list[[i]])==0){
dict_list[[i]][1]<-"write something"
}
}
if (dict_list[[1]][1]=="write something") {
title(warning("WRITE AT LEAST ONE DICTIONARY!"))
}
else{
for (i in length(dict_list):1) {
if (dict_list[[i]][1]=="write something") {
dict_list[[i]]<-NULL
}
}
if(input$stem=='yes'){
for (i in 1:length(dict_list)) {
dict_list[[i]]<-append(dict_list[[i]],stemDocument(dict_list[[i]],language = "english"))# Select the language that apply
}
}
All_dict_words_to1term <- function(starting_doc,dict,sub_term){
for (i in 1:length(dict)) {
if (i==1){
new_doc<-str_replace_all(starting_doc,dict[i],sub_term)
}
else{
new_doc<-str_replace_all(new_doc,dict[i],sub_term)
}
}
return(new_doc)
}
# Function to iterate the previous function over several dictionaries and create a list of the texts thus processed (the final element of the list is the one processed after all of the dictionaries)
All_dict_words_to1term_fin <- function(starting_doc,dictionaries){
result<-list()
for (i in 1:length(dictionaries)) {
if (i==1) {
result[[i]]<-All_dict_words_to1term(starting_doc,dictionaries[[i]],dictionaries[[i]][1])
}
else{
result[[i]]<-All_dict_words_to1term(result[[i-1]],dictionaries[[i]],dictionaries[[i]][1])
}
}
return(result)
}
processed_text_list<-list()
for (i in 1:num_texts) {
processed_text_list[[i]]<-All_dict_words_to1term_fin(preproc_text_list[[i]], dict_list)
processed_text_list[[i]]<-processed_text_list[[i]][[length(dict_list)]]}
dict_new<-vector()
for (i in 1:length(dict_list)) {
dict_new[i] <- dict_list[[i]][1]
}
dict_new<-tolower(dict_new)
print(dict_new)
node_reference<- data.frame(dict_new, c(1:length(dict_new)))
node_reference<-node_reference[,2]
node_reference<-`names<-`(node_reference,dict_new)
strsplit_space_tokenizer <- function(x)
unlist(strsplit(as.character(x), "[[:space:]]+"))
if(input$language=='english'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="en", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='spanish'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="spanish", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='german'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="german", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='italian'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="italian", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
termFreq2<-function(x){
termFreq(x,
control = ctrl)
}
# Creating texts' frequencies (i.e. absolute occurrences) vector for the concepts of interest
Text_freq<-list()
for (i in 1:num_texts) {
processed_text_list[[i]]<-iconv(processed_text_list[[i]], "UTF-8",'latin1', sub = "") #change the parameters of conversion as needed, esepcially if errors are thrown at this stage
Text_freq[[i]]<-termFreq2(processed_text_list[[i]])
Text_freq[[i]]<-Text_freq[[i]][dict_new]
}
Text_freq_rel<-list()
for (i in 1:num_texts) {
processed_text_list[[i]]<-iconv(processed_text_list[[i]], "UTF-8",'latin1', sub = "") #change the parameters of conversion as needed, esepcially if errors are thrown at this stage
Text_freq_rel[[i]]<- Text_freq[[i]]/sum(termFreq2(processed_text_list[[i]]))*100
}
Nodes_df_list<-list()
for (i in 1:num_texts) {
Nodes_df_list[[i]]<- data.frame(dict_new,node_reference,Text_freq[[i]],Text_freq_rel[[i]])
Nodes_df_list[[i]]<-`names<-`(Nodes_df_list[[i]], c('Label','ID','Absolute Occurence', 'Relative Occurrence'))
}
Occurrences<-vector()
texts<-vector()
dictionaries<-vector()
texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file1[1]),1,nchar(as.character(input$file1[1]))-4)))
texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file2[1]),1,nchar(as.character(input$file2[1]))-4)))
if(length(input$file3))texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file3[1]),1,nchar(as.character(input$file3[1]))-4)))
if(length(input$file4))texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file4[1]),1,nchar(as.character(input$file4[1]))-4)))
if(length(input$file5))texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file5[1]),1,nchar(as.character(input$file5[1]))-4)))
for (i in 1:num_texts) {
#texts<-append(texts,replicate(length(dict_new),paste('text',i)))
dictionaries<-append(dictionaries,dict_new)
Occurrences<- append(Occurrences,Nodes_df_list[[i]]$`Absolute Occurence`)
}
newer_df<-data.frame(as.factor(texts),
as.factor(dictionaries),Occurrences)
# Same of before but with relative occurrences
Rel_occurrences<-vector()
for (i in 1:num_texts) {
Rel_occurrences<-append(Rel_occurrences,Nodes_df_list[[i]]$`Relative Occurrence`)
}
newer_rel_df<-data.frame(as.factor(texts),
as.factor(dictionaries),Rel_occurrences)
#Create Plots with ggplot2
#Absolute Occurrences
Abs_plot<-ggplot(data = newer_df,aes
(x=newer_df$as.factor.texts.,y=newer_df$Occurrences,
fill= newer_df$as.factor.dictionaries.))+geom_col()+
theme_classic()+xlab('Texts')+
ylab('Absolute Frequency')+labs(fill='Dictionaries')
#Relative Occurrences
Rel_plot<-ggplot(data = newer_rel_df,aes
(x=newer_rel_df$as.factor.texts.,y=newer_rel_df$Rel_occurrences,
fill= newer_rel_df$as.factor.dictionaries.))+geom_col()+
theme_classic()+xlab('Texts')+
ylab('Relative Frequency')+labs(fill='Dictionaries')
if(input$type_of_frequency=='absolute'){
Abs_plot
}
else{
Rel_plot
}
}}}})
output$data<-DT::renderDataTable({
library(tm)
library(stringr)
library(readr)
library(ggplot2)
if(is.null(input$file1)){
return(title("No text"))
}
else{
num_texts<-1
if(length(input$file2))(num_texts<-num_texts+1)
if(length(input$file3))(num_texts<-num_texts+1)
if(length(input$file4))(num_texts<-num_texts+1)
if(length(input$file5))(num_texts<-num_texts+1)
print(num_texts)
if(num_texts==1){
text<-readLines(input$file1$datapath)
text<-paste(text,collapse = " ")
preproc_text<-gsub("((?:\b| )?([.,:;!?]+)(?: |\b)?)", " \\1 ", text, perl=T)
if(input$stem=='yes'){
preproc_text<-stemDocument(preproc_text)}
preproc_text<-paste(preproc_text, collapse = " ")
dict_list<-list()
dict_list[[1]]<-vector()
dict_list[[1]]<-input$dictionary1
dict_list[[1]]<-strsplit(dict_list[[1]], ",")
dict_list[[1]]<-unlist(dict_list[[1]])
dict_list[[1]]<-str_trim(dict_list[[1]])
dict_list[[2]]<-vector()
dict_list[[2]]<-input$dictionary2
dict_list[[2]]<-strsplit(dict_list[[2]], ",")
dict_list[[2]]<-unlist(dict_list[[2]])
dict_list[[2]]<-str_trim(dict_list[[2]])
dict_list[[3]]<-vector()
dict_list[[3]]<-input$dictionary3
dict_list[[3]]<-strsplit(dict_list[[3]], ",")
dict_list[[3]]<-unlist(dict_list[[3]])
dict_list[[3]]<-str_trim(dict_list[[3]])
dict_list[[4]]<-vector()
dict_list[[4]]<-input$dictionary4
dict_list[[4]]<-strsplit(dict_list[[4]], ",")
dict_list[[4]]<-unlist(dict_list[[4]])
dict_list[[4]]<-str_trim(dict_list[[4]])
dict_list[[5]]<-vector()
dict_list[[5]]<-input$dictionary5
dict_list[[5]]<-strsplit(dict_list[[5]], ",")
dict_list[[5]]<-unlist(dict_list[[5]])
dict_list[[5]]<-str_trim(dict_list[[5]])
dict_list[[6]]<-vector()
dict_list[[6]]<-input$dictionary6
dict_list[[6]]<-strsplit(dict_list[[6]], ",")
dict_list[[6]]<-unlist(dict_list[[6]])
dict_list[[6]]<-str_trim(dict_list[[6]])
dict_list[[7]]<-vector()
dict_list[[7]]<-input$dictionary7
dict_list[[7]]<-strsplit(dict_list[[7]], ",")
dict_list[[7]]<-unlist(dict_list[[7]])
dict_list[[7]]<-str_trim(dict_list[[7]])
dict_list[[8]]<-vector()
dict_list[[8]]<-input$dictionary8
dict_list[[8]]<-strsplit(dict_list[[8]], ",")
dict_list[[8]]<-unlist(dict_list[[8]])
dict_list[[8]]<-str_trim(dict_list[[8]])
dict_list[[9]]<-vector()
dict_list[[9]]<-input$dictionary9
dict_list[[9]]<-strsplit(dict_list[[9]], ",")
dict_list[[9]]<-unlist(dict_list[[9]])
dict_list[[9]]<-str_trim(dict_list[[9]])
dict_list[[10]]<-vector()
dict_list[[10]]<-input$dictionary10
dict_list[[10]]<-strsplit(dict_list[[10]], ",")
dict_list[[10]]<-unlist(dict_list[[10]])
dict_list[[10]]<-str_trim(dict_list[[10]])
for (i in 1:10) {
if(length(dict_list[[i]])==0){
dict_list[[i]][1]<-"write something"
}
}
if (dict_list[[1]][1]=="write something") {
title(warning("WRITE AT LEAST ONE DICTIONARY!"))
}
else{
for (i in length(dict_list):1) {
if (dict_list[[i]][1]=="write something") {
dict_list[[i]]<-NULL
}
}
if(input$stem=='yes'){
for (i in 1:length(dict_list)) {
dict_list[[i]]<-append(dict_list[[i]],stemDocument(dict_list[[i]],language = "english"))# Select the language that apply
}
}
All_dict_words_to1term <- function(starting_doc,dict,sub_term){
for (i in 1:length(dict)) {
if (i==1){
new_doc<-str_replace_all(starting_doc,dict[i],sub_term)
}
else{
new_doc<-str_replace_all(new_doc,dict[i],sub_term)
}
}
return(new_doc)
}
# Function to iterate the previous function over several dictionaries and create a list of the texts thus processed (the final element of the list is the one processed after all of the dictionaries)
All_dict_words_to1term_fin <- function(starting_doc,dictionaries){
result<-list()
for (i in 1:length(dictionaries)) {
if (i==1) {
result[[i]]<-All_dict_words_to1term(starting_doc,dictionaries[[i]],dictionaries[[i]][1])
}
else{
result[[i]]<-All_dict_words_to1term(result[[i-1]],dictionaries[[i]],dictionaries[[i]][1])
}
}
return(result)
}
processed_text<-All_dict_words_to1term_fin(preproc_text,dict_list)
processed_text<-processed_text[[length(dict_list)]]
dict_new<-vector()
for (i in 1:length(dict_list)) {
dict_new[i] <- dict_list[[i]][1]
}
dict_new<-tolower(dict_new)
print(dict_new)
node_reference<- data.frame(dict_new, c(1:length(dict_new)))
node_reference<-node_reference[,2]
node_reference<-`names<-`(node_reference,dict_new)
strsplit_space_tokenizer <- function(x)
unlist(strsplit(as.character(x), "[[:space:]]+"))
if(input$language=='english'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="en", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='spanish'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="spanish", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='german'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="german", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='italian'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="italian", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
termFreq2<-function(x){
termFreq(x,
control = ctrl)
}
processed_text<-iconv(processed_text, "UTF-8",'latin1', sub = "") #change the parameters of conversion as needed, esepcially if errors are thrown at this stage
Text_freq<-termFreq2(processed_text)
Text_freq<-Text_freq[dict_new]
nodes_df<-data.frame(dict_new,node_reference,Text_freq)
Occurrences<-vector()
texts<-vector()
dictionaries<-vector()
texts<-append(texts,replicate(length(dict_new),paste('text',1)))
dictionaries<-append(dictionaries,dict_new)
Occurrences<- append(Occurrences,nodes_df[,3])
newer_df<-data.frame(as.factor(texts),
as.factor(dictionaries),Occurrences)
DT::datatable(newer_df)}}
else{
list_text<-list()
list_text[[1]]<-readLines(input$file1$datapath)
list_text[[2]]<-readLines(input$file2$datapath)
if(num_texts>2){list_text[[3]]<-readLines(input$file3$datapath)}
if(num_texts>3){list_text[[4]]<-readLines(input$file4$datapath)}
if(num_texts>4){list_text[[5]]<-readLines(input$file5$datapath)}
for(i in 1:num_texts) {
preproc_text_list[[i]]<-paste(list_text[[i]], collapse = ' ')
preproc_text_list[[i]]<-gsub("((?:\b| )?([.,:;!?]+)(?: |\b)?)", " \\1 ", preproc_text_list[[i]], perl=T)#Add space between words and punctuation
if(input$stem=='yes'){
preproc_text_list[[i]]<-stemDocument(preproc_text_list[[i]])}
preproc_text_list[[i]]<-paste(preproc_text_list[[i]], collapse = " ")
preproc_text_list[[i]]<-`names<-`(preproc_text_list[[i]],paste("text",i,"_c",sep = ""))
}
dict_list<-list()
dict_list[[1]]<-vector()
dict_list[[1]]<-input$dictionary1
dict_list[[1]]<-strsplit(dict_list[[1]], ",")
dict_list[[1]]<-unlist(dict_list[[1]])
dict_list[[1]]<-str_trim(dict_list[[1]])
dict_list[[2]]<-vector()
dict_list[[2]]<-input$dictionary2
dict_list[[2]]<-strsplit(dict_list[[2]], ",")
dict_list[[2]]<-unlist(dict_list[[2]])
dict_list[[2]]<-str_trim(dict_list[[2]])
dict_list[[3]]<-vector()
dict_list[[3]]<-input$dictionary3
dict_list[[3]]<-strsplit(dict_list[[3]], ",")
dict_list[[3]]<-unlist(dict_list[[3]])
dict_list[[3]]<-str_trim(dict_list[[3]])
dict_list[[4]]<-vector()
dict_list[[4]]<-input$dictionary4
dict_list[[4]]<-strsplit(dict_list[[4]], ",")
dict_list[[4]]<-unlist(dict_list[[4]])
dict_list[[4]]<-str_trim(dict_list[[4]])
dict_list[[5]]<-vector()
dict_list[[5]]<-input$dictionary5
dict_list[[5]]<-strsplit(dict_list[[5]], ",")
dict_list[[5]]<-unlist(dict_list[[5]])
dict_list[[5]]<-str_trim(dict_list[[5]])
dict_list[[6]]<-vector()
dict_list[[6]]<-input$dictionary6
dict_list[[6]]<-strsplit(dict_list[[6]], ",")
dict_list[[6]]<-unlist(dict_list[[6]])
dict_list[[6]]<-str_trim(dict_list[[6]])
dict_list[[7]]<-vector()
dict_list[[7]]<-input$dictionary7
dict_list[[7]]<-strsplit(dict_list[[7]], ",")
dict_list[[7]]<-unlist(dict_list[[7]])
dict_list[[7]]<-str_trim(dict_list[[7]])
dict_list[[8]]<-vector()
dict_list[[8]]<-input$dictionary8
dict_list[[8]]<-strsplit(dict_list[[8]], ",")
dict_list[[8]]<-unlist(dict_list[[8]])
dict_list[[8]]<-str_trim(dict_list[[8]])
dict_list[[9]]<-vector()
dict_list[[9]]<-input$dictionary9
dict_list[[9]]<-strsplit(dict_list[[9]], ",")
dict_list[[9]]<-unlist(dict_list[[9]])
dict_list[[9]]<-str_trim(dict_list[[9]])
dict_list[[10]]<-vector()
dict_list[[10]]<-input$dictionary10
dict_list[[10]]<-strsplit(dict_list[[10]], ",")
dict_list[[10]]<-unlist(dict_list[[10]])
dict_list[[10]]<-str_trim(dict_list[[10]])
for (i in 1:10) {
if(length(dict_list[[i]])==0){
dict_list[[i]][1]<-"write something"
}
}
if (dict_list[[1]][1]=="write something") {
title(warning("WRITE AT LEAST ONE DICTIONARY!"))
}
else{
for (i in length(dict_list):1) {
if (dict_list[[i]][1]=="write something") {
dict_list[[i]]<-NULL
}
}
if(input$stem=='yes'){
for (i in 1:length(dict_list)) {
dict_list[[i]]<-append(dict_list[[i]],stemDocument(dict_list[[i]],language = "english"))# Select the language that apply
}
}
All_dict_words_to1term <- function(starting_doc,dict,sub_term){
for (i in 1:length(dict)) {
if (i==1){
new_doc<-str_replace_all(starting_doc,dict[i],sub_term)
}
else{
new_doc<-str_replace_all(new_doc,dict[i],sub_term)
}
}
return(new_doc)
}
# Function to iterate the previous function over several dictionaries and create a list of the texts thus processed (the final element of the list is the one processed after all of the dictionaries)
All_dict_words_to1term_fin <- function(starting_doc,dictionaries){
result<-list()
for (i in 1:length(dictionaries)) {
if (i==1) {
result[[i]]<-All_dict_words_to1term(starting_doc,dictionaries[[i]],dictionaries[[i]][1])
}
else{
result[[i]]<-All_dict_words_to1term(result[[i-1]],dictionaries[[i]],dictionaries[[i]][1])
}
}
return(result)
}
processed_text_list<-list()
for (i in 1:num_texts) {
processed_text_list[[i]]<-All_dict_words_to1term_fin(preproc_text_list[[i]], dict_list)
processed_text_list[[i]]<-processed_text_list[[i]][[length(dict_list)]]}
dict_new<-vector()
for (i in 1:length(dict_list)) {
dict_new[i] <- dict_list[[i]][1]
}
dict_new<-tolower(dict_new)
print(dict_new)
node_reference<- data.frame(dict_new, c(1:length(dict_new)))
node_reference<-node_reference[,2]
node_reference<-`names<-`(node_reference,dict_new)
strsplit_space_tokenizer <- function(x)
unlist(strsplit(as.character(x), "[[:space:]]+"))
if(input$language=='english'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="en", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='spanish'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="spanish", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='german'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="german", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
if(input$language=='italian'){
ctrl<- list(
tokenize = strsplit_space_tokenizer,
language="italian", # Change the language if needed
removePunctuation = TRUE,
removeNumbers =TRUE,
stopwords=TRUE)}
termFreq2<-function(x){
termFreq(x,
control = ctrl)
}
# Creating texts' frequencies (i.e. absolute occurrences) vector for the concepts of interest
Text_freq<-list()
for (i in 1:num_texts) {
processed_text_list[[i]]<-iconv(processed_text_list[[i]], "UTF-8",'latin1', sub = "") #change the parameters of conversion as needed, esepcially if errors are thrown at this stage
Text_freq[[i]]<-termFreq2(processed_text_list[[i]])
Text_freq[[i]]<-Text_freq[[i]][dict_new]
}
Text_freq_rel<-list()
for (i in 1:num_texts) {
processed_text_list[[i]]<-iconv(processed_text_list[[i]], "UTF-8",'latin1', sub = "") #change the parameters of conversion as needed, esepcially if errors are thrown at this stage
Text_freq_rel[[i]]<- Text_freq[[i]]/sum(termFreq2(processed_text_list[[i]]))*100
}
Nodes_df_list<-list()
for (i in 1:num_texts) {
Nodes_df_list[[i]]<- data.frame(dict_new,node_reference,Text_freq[[i]],Text_freq_rel[[i]])
Nodes_df_list[[i]]<-`names<-`(Nodes_df_list[[i]], c('Label','ID','Absolute Occurence', 'Relative Occurrence'))
}
Abs_Occurrences<-vector()
Rel_Occurrences<-vector()
texts<-vector()
dictionaries<-vector()
texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file1[1]),1,nchar(as.character(input$file1[1]))-4)))
texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file2[1]),1,nchar(as.character(input$file2[1]))-4)))
if(length(input$file3))texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file3[1]),1,nchar(as.character(input$file3[1]))-4)))
if(length(input$file4))texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file4[1]),1,nchar(as.character(input$file4[1]))-4)))
if(length(input$file5))texts<-append(texts,replicate(length(dict_new),substr(as.character(input$file5[1]),1,nchar(as.character(input$file5[1]))-4)))
for (i in 1:num_texts) {
#texts<-append(texts,replicate(length(dict_new),paste('text',i)))
dictionaries<-append(dictionaries,dict_new)
Abs_Occurrences<- append(Abs_Occurrences,Nodes_df_list[[i]]$`Absolute Occurence`)
}
for (i in 1:num_texts) {
#texts<-append(texts,replicate(length(dict_new),paste('text',i)))
dictionaries<-append(dictionaries,dict_new)
Rel_Occurrences<- append(Rel_Occurrences,paste(round(Nodes_df_list[[i]]$`Relative Occurrence`,digits = 3),'%',sep = " "))
}
newer_df<-data.frame(texts,
dictionaries,Abs_Occurrences,Rel_Occurrences)
DT::datatable(newer_df[1:(nrow(newer_df)/2),])}}}
})}
shinyApp(ui=ui_prova,server=server_prova)
|
dcf0117b6309a205aff17605bcb7361637e94863
|
c115602521d415d90914c31c3a7b160964e7d0fd
|
/man/makeCustomResampledMeasure.Rd
|
f3f74bdb8592e3e5e6aa63b0fc7a005c2299ccf2
|
[] |
no_license
|
praneesh12/mlr
|
295b5aefa72b8c56b070ac768de828d044ce0087
|
6069b98e8edb79148c005af08037ef89ea0d7cd0
|
refs/heads/master
| 2021-04-29T23:04:30.865508
| 2018-02-13T22:05:00
| 2018-02-13T22:05:00
| 121,545,775
| 1
| 0
| null | 2018-02-14T18:28:00
| 2018-02-14T18:28:00
| null |
UTF-8
|
R
| false
| true
| 2,716
|
rd
|
makeCustomResampledMeasure.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Measure_custom_resampled.R
\name{makeCustomResampledMeasure}
\alias{makeCustomResampledMeasure}
\title{Construct your own resampled performance measure.}
\usage{
makeCustomResampledMeasure(measure.id, aggregation.id, minimize = TRUE,
properties = character(0L), fun, extra.args = list(), best = NULL,
worst = NULL, measure.name = measure.id,
aggregation.name = aggregation.id, note = "")
}
\arguments{
\item{measure.id}{(`character(1)`)\cr
Short name of measure.}
\item{aggregation.id}{(`character(1)`)\cr
Short name of aggregation.}
\item{minimize}{(`logical(1)`)\cr
Should the measure be minimized?
Default is `TRUE`.}
\item{properties}{([character])\cr
Set of measure properties. For a list of values see [Measure].
Default is `character(0)`.}
\item{fun}{(`function(task, group, pred, extra.args)`)\cr
Calculates performance value from [ResamplePrediction] object.
For rare cases you can also use the task, the grouping or the extra arguments `extra.args`.
\describe{
\item{`task` ([Task])}{
The task.}
\item{`group` ([factor])}{
Grouping of resampling iterations. This encodes whether specific iterations
'belong together' (e.g. repeated CV).}
\item{`pred` ([Prediction])}{
Prediction object.}
\item{`extra.args` ([list])}{
See below.}
}}
\item{extra.args}{([list])\cr
List of extra arguments which will always be passed to `fun`.
Default is empty list.}
\item{best}{(`numeric(1)`)\cr
Best obtainable value for measure.
Default is -`Inf` or `Inf`, depending on `minimize`.}
\item{worst}{(`numeric(1)`)\cr
Worst obtainable value for measure.
Default is `Inf` or -`Inf`, depending on `minimize`.}
\item{measure.name}{(`character(1)`)\cr
Long name of measure.
Default is `measure.id`.}
\item{aggregation.name}{(`character(1)`)\cr
Long name of the aggregation.
Default is `aggregation.id`.}
\item{note}{([character]) \cr
Description and additional notes for the measure. Default is \dQuote{}.}
}
\value{
[Measure].
}
\description{
Construct your own performance measure, used after resampling.
Note that individual training / test set performance values will be set to `NA`, you
only calculate an aggregated value. If you can define a function that makes sense
for every single training / test set, implement your own [Measure].
}
\seealso{
Other performance: \code{\link{ConfusionMatrix}},
\code{\link{calculateConfusionMatrix}},
\code{\link{calculateROCMeasures}},
\code{\link{estimateRelativeOverfitting}},
\code{\link{makeCostMeasure}}, \code{\link{makeMeasure}},
\code{\link{measures}}, \code{\link{performance}},
\code{\link{setAggregation}},
\code{\link{setMeasurePars}}
}
|
c86cdad0d0cb428f3606a381a692a3742002e092
|
9fe15d7b960415851bba647bd4b9ad31dfd76dd1
|
/Code/CDP_marques.R
|
96339804864c43e2efa3133b36b856a617648d18
|
[
"MIT"
] |
permissive
|
micnngo/CDP
|
3a432fb077d6061245c3d08a973eed1d5bc9e457
|
6a7a1372cb601880c5d2f00e062a60ac54a68b6c
|
refs/heads/master
| 2020-12-29T20:00:49.463794
| 2020-03-10T16:31:05
| 2020-03-10T16:31:05
| 238,714,918
| 1
| 1
|
MIT
| 2020-03-10T16:31:06
| 2020-02-06T15:03:39
|
R
|
UTF-8
|
R
| false
| false
| 6,865
|
r
|
CDP_marques.R
|
### CDP: Marques data set
### Load libraries and functions
source('postDPMManalysis_functions.R')
Rcpp::sourceCpp('reassign_obs_parallel.cpp')
library(SingleCellExperiment)
### Load data
marques <- readRDS('../Data/marques.rds')
# condense genes
g_idx = grep("loc", rownames(marques))
g_names = rownames(marques)[g_idx]
g_renames = unlist(lapply(sapply(g_names, function(x) strsplit(x, "_")), `[[`, 1) )
rownames(marques)[g_idx] = g_renames
marques_rest = counts(marques)[!(rownames(marques) %in% rownames(marques)[g_idx]),]
marques_collapsed = aggregate(x = counts(marques)[g_idx,], by = list(rownames(marques)[g_idx]), FUN = "sum")
rownames(marques_collapsed) = marques_collapsed$Group.1
marques_collapsed = marques_collapsed[, -1]
marques_new = rbind(marques_rest, marques_collapsed)
# remove genes/cells with no expression
which(colSums(marques_new) == 0) # all cells have expression
zero_rows = which(rowSums(marques_new) == 0)
dat = marques_new[!(rownames(marques_new) %in% rownames(marques_new)[zero_rows]),]
### Run DPMM in Julia
row_hyper = 0.1
row_alpha = 10
row_iter = 500
col_hyper = 1
col_alpha = 10
col_iter = 200
# Run on rows (assigns columns)
julia_assign("tdat", t(dat))
julia_assign("row_hyper", row_hyper)
julia_assign("row_alpha", row_alpha)
julia_assign("row_iter", row_iter)
row_dp = julia_eval("DPMMSubClusters.fit(Matrix(tdat),
DPMMSubClusters.multinomial_hyper(fill(row_hyper, size(tdat)[1])),
row_alpha, iters = row_iter, seed = 1234)")
# save log likelihood and total number of clusters
row_LL <- get_LL_and_NClust(row_dp)
# save per iteration cluster assignments
row_z <- do.call(rbind, row_dp[[8]])
# Run on columns (assigns rows)
julia_assign("dat", dat)
julia_assign("col_hyper", col_hyper)
julia_assign("col_alpha", col_alpha)
julia_assign("col_iter", col_iter)
col_dp = julia_eval("DPMMSubClusters.fit(Matrix(dat),
DPMMSubClusters.multinomial_hyper(fill(col_hyper, size(dat)[1])),
col_alpha, iters = col_iter, seed = 1234)")
col_LL <- get_LL_and_NClust(col_dp)
col_z <- do.call(rbind, col_dp[[8]])
# Prepare file to save
fheader = paste0("Results/marques_DPMM_rH_", row_hyper, "_rA_", row_alpha, "_cH_", col_hyper, "_cA_", col_alpha)
h5fname = paste0(fheader, ".h5")
h5createFile(h5fname)
h5createGroup(h5fname, "row")
h5createGroup(h5fname, "col")
h5ls(h5fname) # check file structure
h5write(as.matrix(row_LL), h5fname, "row/LL")
h5write(row_z, h5fname, "row/iter_z")
h5write(as.matrix(col_LL), h5fname, "col/LL")
h5write(col_z, h5fname, "col/iter_z")
### Calculate MAP and obtain initial cluster assignments z^r and z^c
# calculate most probable total number of clusters
top_n = 5
top_r = 5 # choose a number from 1 to top_n
top_c = 1 # choose a number from 1 to top_n
z_r_unsorted = data.frame("row_assignment" = row_z[get_z_idx(row_LL$total_num_clust, top_n = top_n, top = top_r), ])
z_c_unsorted = data.frame("col_assignment" = col_z[get_z_idx(col_LL$total_num_clust, top_n = top_n, top = top_c), ])
rtop = get_top_clusters(row_LL$total_num_clust, top_n = top_n)
ctop = get_top_clusters(col_LL$total_num_clust, top_n = top_n)
# rename z to be in order of ascending labels by convention and then save
z_r = z_name_convention(z_r_unsorted)
z_c = z_name_convention(z_c_unsorted)
starttime = proc.time()
### Calculate theta, phi_r and phi_c
# update assignments using data as weights
nCores <- detectCores() - 2
cl <- makeCluster(nCores)
clusterExport(cl, varlist=c("z_r", "z_c", "dat"), envir=environment())
z_r_list = parApply(cl=cl, dat, 2, function(x) rep(z_r$row_assignment, x))
z_c_list = parApply(cl=cl, dat, 1, function(x) rep(z_c$col_assignment, x))
stopCluster(cl)
z_r_updated = z_list_reassign(z_r_list, max(unique(z_r$row_assignment)), 100)
z_c_updated = z_list_reassign(z_c_list, max(unique(z_c$col_assignment)), 100)
# need to split into correct dimensions
z_r_split <- splitvec(unlist(z_r_updated), rowSums(dat))
z_c_split <- splitvec(unlist(z_c_updated), colSums(dat))
# tabulate updated assignments
r_table_count = lapply(z_r_split, function(x) tabulate(x, nbins = max(unique(z_r$row_assignment))) )
c_table_count = lapply(z_c_split, function(x) tabulate(x, nbins = max(unique(z_c$col_assignment))) )
# Calculate phi and theta
# assume that alpha = 0 for both phi_r and phi_c
phi_r = as.data.frame(do.call(rbind, lapply(r_table_count, function(x) x/sum(x))))
phi_c = as.data.frame(do.call(rbind, lapply(c_table_count, function(x) x/sum(x))))
z_r_expand = splitz(z_r_updated, dat, 'col')
z_c_expand = splitz(z_c_updated, dat, 'row')
# get frequencies for each pairing and transform into theta
freq_listoflists = calc_frequency_list(z_r_expand, z_c_expand, dat)
freq_df = get_freq_list(freq_listoflists)
theta_df = get_freq_table(freq_df)
theta_table = theta_into_table(theta_df, "Prob")
endtime = proc.time()
print(endtime - starttime)
### Save intermediary steps
saveRDS(z_r, paste0(fheader, "_row_z_MAP_", rtop$NumClusters[top_r], ".rds"))
saveRDS(z_c, paste0(fheader, "_col_z_MAP_", ctop$NumClusters[top_c], ".rds"))
saveRDS(z_r_list, paste0(fheader, "_row_z_list_MAP_", rtop$NumClusters[top], ".rds"))
saveRDS(z_c_list, paste0(fheader, "_col_z_list_MAP_", ctop$NumClusters[top], ".rds"))
saveRDS(z_r_updated, paste0(fheader, "_row_z_updated_MAP_", rtop$NumClusters[top], ".rds"))
saveRDS(z_c_updated, paste0(fheader, "_col_z_updated_MAP_", ctop$NumClusters[top], ".rds"))
saveRDS(r_table_count, paste0(fheader, "_row_tabulated_MAP_", rtop$NumClusters[top], ".rds"))
saveRDS(c_table_count, paste0(fheader, "_col_tabulated_MAP_", ctop$NumClusters[top], ".rds"))
saveRDS(phi_r, paste0(fheader, "_row_phi_MAP_", rtop$NumClusters[top], ".rds"))
saveRDS(phi_c, paste0(fheader, "_col_phi_MAP_", ctop$NumClusters[top], ".rds"))
saveRDS(z_r_expand, paste0(fheader, "_row_expanded_MAP_", rtop$NumClusters[top], ".rds"))
saveRDS(z_c_expand, paste0(fheader, "_col_expanded_MAP_", ctop$NumClusters[top], ".rds"))
saveRDS(theta_df, paste0(fheader, "_theta_Kr_", rtop$NumClusters[top_r], "_Kc_", ctop$NumClusters[top_c], ".rds"))
### Visualizations
pdir = "Plots/marques/"
pheader = paste0("marques_DPMM_rH_", row_hyper, "_rA_", row_alpha, "_cH_", col_hyper, "_cA_", col_alpha, "_")
# plots histogram of total num of clusters and log likelihood
plot_HGLL(row_LL, pdir, paste0(pheader, "row_"))
plot_HGLL(col_LL, pdir, paste0(pheader, "col_"))
# plots heatmap of theta
theta_fname = paste0(pdir, pheader, "theta_Kr_", rtop$NumClusters[top_r], "_Kc_", ctop$NumClusters[top_c], "_heatmap.pdf")
plot_theta(theta_df, "Prob", theta_fname)
### Post-Analysis
# top biclusters
theta_df %>% arrange(desc(Prob))
rgroup = most_probable_z(phi_r)
cgroup = most_probable_z(phi_c)
# example of one bicluster
dat[get_group_idx(rgroup, 5), get_group_idx(cgroup, 5)]
|
e6e17b1e9f8ead16d91e6d7acd413ec7268db430
|
beda4301d4b6b6dcd1db053e3f20cf24bd223ff6
|
/R/pub_hlth_status_by_cnty_shp.R
|
94e49147e2ec8f7576e09103a761bfa984e26725
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Fredo-XVII/R.COVID.19
|
b587b582c8fa6a30dcb04fa46e35643bdbfe528b
|
b43412625a2469055085db0055f3519f281ef681
|
refs/heads/master
| 2022-08-23T15:34:31.518645
| 2022-08-04T04:37:47
| 2022-08-04T04:37:47
| 250,618,422
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,582
|
r
|
pub_hlth_status_by_cnty_shp.R
|
# Functions Roxygen format
#' @title COVID-19 Public Health Status by County shapefiles from ESRI - DEFUNCT
#'
#' @description
#' This functions pulls the public health status by county in the US as of the
#' current date with the shapefiles, which are saved to the current working
#' directory. The data comes from ESRI open data website. The data is 1 row
#' per county.
#'
#' @details
#' Website: https://coronavirus-disasterresponse.hub.arcgis.com/datasets/97792521be744711a291d10ecef33a61/data?geometry=76.921%2C-16.701%2C-109.056%2C72.161
#'
#' @return A dataframe/tibble. This functions has side effects in that it downloads
#' shapefiles corresponding to the counties to the current working directory.
#'
#>' @examples
#>' \dontrun{
#>' pub_status_cnty_shp <- R.COVID.19::pub_hlth_status_by_cnty_shp()
#>' head(pub_status_cnty_shp)
#>' }
#>' @importFrom sf st_read
#>' @importFrom utils download.file unzip
#'
#'
#' @export
pub_hlth_status_by_cnty_shp <- function() {
.Defunct(new = "None", package="None",
msg = "This link is no longer working, now is removed")
#Get data
#.get_shp()
#get_shp <- sf::st_read("COVID19_Public_Health_Emergency_Status_by_County.shp") # old name
# get_shp <- sf::st_read("8acac48c-57be-4cc9-92a2-b932b279b46c2020329-1-gva0h9.tiu36.shp")
# return(invisible(get_shp))
}
# .get_shp <- function(){
# u_shp <- "https://opendata.arcgis.com/datasets/97792521be744711a291d10ecef33a61_0.zip"
# utils::download.file(u_shp, "pub_hlth_status_by_cnty.zip")
# utils::unzip("pub_hlth_status_by_cnty.zip")
# }
|
59c945ebd5db96b581e77fac180a63b96f2bec32
|
e9a5a9e952a9ccac535efe64b96cc730b844677b
|
/man/setHyperlink-methods.Rd
|
e3fc4785fb2859080616c8649321f1d1ef6a80f6
|
[] |
no_license
|
miraisolutions/xlconnect
|
323c22258439616a4d4e0d66ddc62204094196c9
|
ae73bfd5a368484abc36638e302b167bce79049e
|
refs/heads/master
| 2023-09-04T05:27:42.744196
| 2023-08-30T07:10:44
| 2023-08-30T07:10:44
| 8,108,907
| 114
| 35
| null | 2023-08-30T07:10:46
| 2013-02-09T11:17:42
|
R
|
UTF-8
|
R
| false
| false
| 2,782
|
rd
|
setHyperlink-methods.Rd
|
\name{setHyperlink-methods}
\docType{methods}
\alias{setHyperlink}
\alias{setHyperlink-methods}
\alias{setHyperlink,workbook,missing,character-method}
\alias{setHyperlink,workbook,missing,numeric-method}
\alias{setHyperlink,workbook,character,missing-method}
\title{Setting hyperlinks}
\description{
Sets hyperlinks for specific cells in a \code{\linkS4class{workbook}}.
}
\usage{
\S4method{setHyperlink}{workbook,missing,character}(object,formula,sheet,row,col,type,address)
\S4method{setHyperlink}{workbook,missing,numeric}(object,formula,sheet,row,col,type,address)
\S4method{setHyperlink}{workbook,character,missing}(object,formula,sheet,row,col,type,address)
}
\arguments{
\item{object}{The \code{\linkS4class{workbook}} to use}
\item{formula}{A formula specification in the form Sheet!B8:C17. Use either the argument \code{formula} or the combination of \code{sheet}, \code{row} and \code{col}.}
\item{sheet}{Name or index of the sheet the cell is on. Use either the argument \code{formula} or the combination of \code{sheet}, \code{row} and \code{col}.}
\item{row}{Row index of the cell to apply the cellstyle to.}
\item{col}{Column index of the cell to apply the cellstyle to.}
\item{type}{Hyperlink type. See the corresponding "HYPERLINK.*" constants from the \code{\link{XLC}} object.}
\item{address}{Hyperlink address. This needs to be a valid URI including scheme. E.g. for email \code{mailto:myself@me.org}, for a URL \code{https://www.somewhere.net} or for a file \code{file:///a/b/c.dat}}
}
\details{
Sets a hyperlink for the specified cells. Note that \code{\linkS4class{cellstyle}}s for hyperlinks can be defined independently using
\code{\link[=setCellStyle-methods]{setCellStyle}}. The arguments are vectorized such that multiple hyperlinks can be set in one
method call. Use either the argument \code{formula} or the combination of \code{sheet}, \code{row} and \code{col}.
}
\author{
Martin Studer\cr
Mirai Solutions GmbH \url{https://mirai-solutions.ch}
}
\seealso{
\code{\linkS4class{workbook}}, \code{\link[=setCellStyle-methods]{setCellStyle}}
}
\examples{\dontrun{
# Load workbook (create if not existing)
wb <- loadWorkbook("setHyperlink.xlsx", create = TRUE)
# Create a sheet named 'mtcars'
createSheet(wb, name = "mtcars")
# Write built-in data set 'mtcars' to the above defined worksheet
writeWorksheet(wb, mtcars, sheet = "mtcars", rownames = "Car")
# Set hyperlinks
links <- paste0("https://www.google.com?q=", gsub(" ", "+", rownames(mtcars)))
setHyperlink(wb, sheet = "mtcars", row = seq_len(nrow(mtcars)) + 1, col = 1,
type = XLC$HYPERLINK.URL, address = links)
# Save workbook (this actually writes the file to disk)
saveWorkbook(wb)
# clean up
file.remove("setHyperlink.xlsx")
}
}
\keyword{methods}
\keyword{utilities}
|
198a60dd7398a98f8e5a53b26b0bcf649bf264c6
|
59f01adeda5026f870ae374abfafa3edbffa5c6e
|
/tests/testthat/test-predictions.R
|
37c634f00dd6ddb36b08956336f44a5436b26de7
|
[
"MIT"
] |
permissive
|
dcossyleon/tune
|
00a90451f150c85f61a5b6743367c424888426ec
|
ad58dff5a27a82b3e9aa47a966a65a81355b2eaf
|
refs/heads/master
| 2022-10-11T04:14:12.340091
| 2020-06-05T23:17:17
| 2020-06-05T23:17:17
| 271,087,691
| 1
| 0
| null | 2020-06-09T19:07:13
| 2020-06-09T19:07:12
| null |
UTF-8
|
R
| false
| false
| 3,223
|
r
|
test-predictions.R
|
context("extracting predictions")
source(test_path("../helper-objects.R"))
check_predictions <- function(split, pred, tune_df) {
assess <- rsample::assessment(split)
n_te <- nrow(assess)
n_pm <- nrow(tune_df)
ind_te <- as.integer(split, data = "assessment")
expect_true(tibble::is_tibble(pred))
expect_equal(nrow(pred), n_te * n_pm)
exp_nms <- c(".pred", ".row", names(tune_df), "mpg")
expect_equal(names(pred), exp_nms)
expect_equal(sort(unique(ind_te)), sort(unique(pred$.row)))
TRUE
}
load(test_path("test_objects.RData"))
# ------------------------------------------------------------------------------
test_that("recipe only", {
grid <- collect_metrics(mt_spln_lm_grid) %>%
dplyr::select(deg_free) %>%
dplyr::distinct()
purrr::map2(
mt_spln_lm_grid$splits,
mt_spln_lm_grid$.predictions,
check_predictions,
grid
)
# initial values for Bayes opt
init <- mt_spln_lm_bo %>% dplyr::filter(.iter == 0)
init_grid <-
collect_metrics(mt_spln_lm_bo) %>%
dplyr::filter(.iter == 0) %>%
dplyr::select(deg_free) %>%
dplyr::distinct()
purrr::map2(
init$splits,
init$.predictions,
check_predictions,
init_grid
)
# Now search iterations with a dummy grid
bo <- mt_spln_lm_bo %>% dplyr::filter(.iter > 0)
bo_grid <- init_grid %>% dplyr::slice(1)
purrr::map2(
bo$splits,
bo$.predictions,
check_predictions,
bo_grid
)
})
# ------------------------------------------------------------------------------
test_that("model only", {
grid <-
collect_metrics(mt_knn_grid) %>%
dplyr::select(neighbors) %>%
dplyr::distinct()
purrr::map2(
mt_knn_grid$splits,
mt_knn_grid$.predictions,
check_predictions,
grid
)
# initial values for Bayes opt
init <- mt_knn_bo %>% dplyr::filter(.iter == 0)
init_grid <-
collect_metrics(mt_knn_bo) %>%
dplyr::filter(.iter == 0) %>%
dplyr::select(neighbors) %>%
distinct()
purrr::map2(
init$splits,
init$.predictions,
check_predictions,
init_grid
)
# Now search iterations with a dummy grid
bo <- mt_knn_bo %>% dplyr::filter(.iter > 0)
bo_grid <- init_grid %>% dplyr::slice(1)
purrr::map2(
bo$splits,
bo$.predictions,
check_predictions,
bo_grid
)
})
# ------------------------------------------------------------------------------
test_that("model and recipe", {
grid <-
collect_metrics(mt_spln_knn_grid) %>%
dplyr::select(deg_free, neighbors) %>%
dplyr::distinct()
purrr::map2(
mt_spln_knn_grid$splits,
mt_spln_knn_grid$.predictions,
check_predictions,
grid
)
# initial values for Bayes opt
init <- mt_spln_knn_bo %>% dplyr::filter(.iter == 0)
init_grid <-
collect_metrics(mt_spln_knn_bo) %>%
dplyr::filter(.iter == 0) %>%
dplyr::select(deg_free, neighbors) %>%
dplyr::distinct()
purrr::map2(
init$splits,
init$.predictions,
check_predictions,
init_grid
)
# Now search iterations with a dummy grid
bo <- mt_spln_knn_bo %>% dplyr::filter(.iter > 0)
bo_grid <- init_grid %>% dplyr::slice(1)
purrr::map2(
bo$splits,
bo$.predictions,
check_predictions,
bo_grid
)
})
|
366662b1e66a81e435252f94be48f569440f6722
|
637cb9135b7e092e4cb20c5096064330b1902a16
|
/scripts/src/test_0_install.R
|
cb0dd52e35e532da6dd5d45e9a6dd571b7f33305
|
[
"MIT"
] |
permissive
|
erictleung/dada2HPCPipe
|
a59897ea6568eb1b95dedfd44c90acd1bae2591c
|
c6b26273e6b8e8098d47b4f01acc1a37f0e4deed
|
refs/heads/master
| 2021-07-04T04:20:42.520253
| 2018-06-29T23:09:58
| 2018-06-29T23:09:58
| 96,589,393
| 3
| 1
| null | 2017-09-25T08:37:23
| 2017-07-08T01:56:44
|
R
|
UTF-8
|
R
| false
| false
| 306
|
r
|
test_0_install.R
|
# Title:
# Test Package Installation
# Description:
# This script simply tests the loading of the package into R to make sure it
# does so without any errors
# Load library
cat("Loading dada2HPCPipe package...\n")
library(dada2HPCPipe); packageVersion("dada2HPCPipe"); cat("\n")
cat("No errors!\n")
|
53c01765bf3035b6d1c1ad063775b941261f69c1
|
48bec1ab1d9b96416ac4ce2462c8718ad3271b2e
|
/R/tests/testy.R
|
7962b4da147608e7cad174b13b79384ea76c4a2b
|
[] |
no_license
|
kaletap/spectral-clustering
|
8a0c9550363f03ae5b036247a9166a20500e0ead
|
e013ad0822f9b6558fc0fbcc7aae66a5aeca2de1
|
refs/heads/master
| 2020-04-18T18:06:51.553670
| 2019-02-13T18:49:21
| 2019-02-13T18:49:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,487
|
r
|
testy.R
|
library(mclust)
library(dendextend)
library(genie)
source("spectral.R")
read_data <- function(benchmark, dataset){
matrix_file_name <- paste(dataset, ".data.gz", sep="")
labels_file_name <- paste(dataset, ".labels0.gz", sep="")
matrix_path <- file.path("..", "benchmarks", benchmark, matrix_file_name)
labels_path <- file.path("..", "benchmarks", benchmark, labels_file_name)
X <- as.matrix(read.table(matrix_path))
Y <- as.matrix(read.table(labels_path))
return(list(X=X, Y=Y))
}
plot_data <- function(X, Y, title=""){
plot(X[, 1], X[, 2], col=unlist(Y), pch=20)
title(title)
}
test_spectral_single <- function(benchmark, dataset, M=20, k=NULL, scale=FALSE, plot=TRUE){
data <- read_data(benchmark, dataset)
X <- data$X
if(scale){
X <- scale(X)
}
Y <- data$Y
if(is.null(k)){
k = length(unique(unlist(Y)))
}
set.seed(42) # because kmeans in spectral clustering randomly initializes centers
Y_pred <- spectral_clustering(X, k, M)
if(plot){
plot_data(X, Y_pred, paste(paste(benchmark, dataset, sep="/"), ": spectral ", sep=""))
}
print(paste("FM:", FM_index(Y, Y_pred), " AR:", adjustedRandIndex(Y, Y_pred), sep=" "))
#return(Y_pred)
}
test_hclust <- function(benchmark, dataset, method="complete", k=NULL, scale=FALSE){
data <- read_data(benchmark, dataset)
X <- data$X
if(scale){
X <- scale(X)
}
Y <- data$Y
if(is.null(k)){
k = length(unique(unlist(Y)))
}
hc <- hclust(dist(X), method)
Y_pred <- cutree(hc, k=k)
plot_data(X, Y_pred, paste(paste(benchmark, dataset, sep="/"), ": hclust ", method, sep=""))
print(paste("FM:", FM_index(Y, Y_pred), " AR:", adjustedRandIndex(Y, Y_pred), sep=" "))
#return(Y_pred)
}
test_genie <- function(benchmark, dataset, k=NULL, scale=FALSE){
data <- read_data(benchmark, dataset)
X <- data$X
if(scale){
X <- scale(X)
}
Y <- data$Y
if(is.null(k)){
k = length(unique(unlist(Y)))
}
hc <- hclust2(dist(X))
Y_pred <- cutree(hc, k=k)
plot_data(X, Y_pred, paste(paste(benchmark, dataset, sep="/"), ": genie", sep=""))
print(paste("FM:", FM_index(Y, Y_pred), " AR:", adjustedRandIndex(Y, Y_pred), sep=" "))
#return(Y_pred)
}
# Testing on sample datasets
test_spectral_single("graves", "dense", M=20)
test_spectral_single("graves", "dense", M=20, scale=TRUE)
test_spectral_single("sipu", "flame", M=20)
test_spectral_single("sipu", "flame", M=20, scale=TRUE)
test_spectral_single("fcps", "tetra", M=5)
test_spectral_single("fcps", "tetra", M=5, scale=TRUE)
# hclust
test_hclust("graves", "dense")
test_hclust("graves", "dense", scale=TRUE)
# genie
test_genie("graves", "dense")
test_genie("graves", "dense", scale=TRUE)
# Wygląda na to, że skalowanie działa dość różnie dla algorytmu spectral clustering. Czasami nic nie zmienia, czasami poprawia działanie, ale może się zdarzyć że je pogorszy.
# W przeciwieństwie do Pythona w ogóle nie ufam działaniu kmeans w Rze, dawał mi on dość randomowe wyniki.
# My own datasets
random_dataset <- function(mi1, mi2, sig, n){
x <- rnorm(n, mi1[1], sig)
y <- rnorm(n, mi1[2], sig)
X1 <- cbind(x, y)
Y1 <- rep(1, n)
x <- rnorm(n, mi2[1], sig)
y <- rnorm(n, mi2[2], sig)
X2 <- cbind(x, y)
Y2 <- rep(2, n)
X <- rbind(X1, X2)
Y <- c(Y1, Y2)
return(list(X=X, Y=Y))
}
data <- random_dataset(c(0, 0), c(2, 2), 0.5, 50)
X <- data$X
Y <- data$Y
plot_data(X, Y)
Y_pred <- spectral_clustering(X, 2)
plot_data(X, Y_pred)
FM_index(Y, Y_pred)
adjustedRandIndex(Y, Y_pred)
get_heart <- function(n, x_change=0, y_change=0){
t <- seq(0, 6.29, length.out=n)
x <- 16 * sin(t)^3
y <- 13 * cos(t) - 5 * cos(2*t) - 2 * cos(3*t) - cos(4*t)
return(cbind(x + x_change, y + y_change))
}
save_hearts <- function(n, n_hearts){
X <- matrix(, nrow=0, ncol = 2)
Y <- c()
for(i in 1:n_hearts){
X1 <- get_heart(n, 20*i, 20*i)
Y1 <- rep(i, nrow(X1))
X <- rbind(X, X1)
Y <- c(Y, Y1)
}
file_name <- paste("hearts", n_hearts, sep="_")
write.table(X, file=file.path("datasets", paste(file_name, "data", sep=".")), row.names=FALSE, col.names=FALSE)
write.table(Y, file=file.path("datasets", paste(file_name, "labels0", sep=".")), row.names=FALSE, col.names=FALSE)
return(list(X=X, Y=Y))
}
data <- save_hearts(50, 2)
X <- data$X
Y <- data$Y
plot_data(X, Y)
Y_pred <- spectral_clustering(X, 2)
plot_data(X, Y_pred) # Calkiem niezle
FM_index(Y, Y_pred)
adjustedRandIndex(Y, Y_pred)
for(i in 2:10){
save_hearts(40, i)
}
|
220db8d1aaed96677d132c63eb28061de825a2a1
|
4a65f8536366b0dea196bb6a2f1384089fe425f7
|
/man/moto.Rd
|
bc759880bab876fe72c0e7bf56c7515df5749d41
|
[] |
no_license
|
RobinHankin/hyper2
|
f577043872f55150619a7f04e2c945ce038c85e2
|
f19fcc4eee6882f6ec5f6eef9c553be42a50bee1
|
refs/heads/master
| 2023-08-31T02:53:40.288486
| 2023-08-23T01:22:35
| 2023-08-23T01:22:35
| 105,470,077
| 5
| 3
| null | 2023-06-01T21:57:44
| 2017-10-01T19:55:58
|
TeX
|
UTF-8
|
R
| false
| false
| 1,206
|
rd
|
moto.Rd
|
\name{moto}
\alias{moto}
\alias{motoGP}
\alias{motoGP_2019}
\alias{moto_table}
\alias{moto}
\alias{moto_maxp}
\docType{data}
\title{MotoGP dataset}
\description{Race results from the 2019 Grand Prix motorcycling season}
\usage{data(moto)}
\details{
Object \code{moto_table} is a dataframe of results showing ranks of 28
drivers (riders?) in the 2019 FIM MotoGP World Championship. The format
is standard, that is, can be interpreted by function
\code{ordertable2supp()} if the final points column is removed. The
corresponding support function is \code{motoGP_2019}.
These objects can be generated by running script \code{inst/moto.Rmd},
which includes some further discussion and technical documentation and
creates file \code{moto.rda} which resides in the \code{data/}
directory.
}
\references{
Wikipedia contributors. (2020, February 8). 2019 MotoGP season. In
\emph{Wikipedia, The Free Encyclopedia.} Retrieved 08:16, February 20,
2020, from \url{https://en.wikipedia.org/w/index.php?title=2019_MotoGP_season&oldid=939711064}
}
\note{
Many drivers have names with diacritics, which have been removed from
the dataframe.
}
\seealso{\code{\link{ordertable2supp}}}
\examples{
pie(moto_maxp)
}
|
528571a7006b929f9d50c93ecdb7c6fb25377a56
|
6ac4b51b3bbf24b1f1265a5c58193dda48f21b5d
|
/R Programming for Statistics and Data Science/1. Sections 1-6 - The Basics/1. Vectors,Matrices,Lists.R
|
07d6e09e29e4e18d00ce51cdf4e5a4b33449af4d
|
[] |
no_license
|
DanInacio/R
|
114b08660e2d8d5dc38fc679e6e04c4dc4f6a57e
|
ebcb99d3b704ce71b976c013ddfe9001191339bb
|
refs/heads/main
| 2023-03-29T09:08:05.047602
| 2021-04-07T23:22:50
| 2021-04-07T23:22:50
| 355,334,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,791
|
r
|
1. Vectors,Matrices,Lists.R
|
# ----------------------------------------------------------------------------
# Exercise 1: Creating Objects in R
x <- 10:20
y <- x +2
z <- y*3
answer <- (z-6)/3
# One line of code
x <- (((10:20) + 2)*3 - 6)/3
# ----------------------------------------------------------------------------
# Exercise 2: Data Types in R
cards <- c("Blue-Eyes White Dragon","Exodius",
"The Winged Dragon of Ra","Raigeki",
"Slifer the Sky Dragon","Obelisk the Tormentor",
"Black Luster Soldier","5-Headed Dragon",
"Exodia the Forbidden One","Dragon Master Knight")
typeof(cards)
atk <- c(3000,NA,NA,NA,NA,4000,3000,5000,1000,5000)
typeof(atk)
yugioh <- c(cards,atk)
yugioh
# ----------------------------------------------------------------------------
# Exercise 3: Coercion Rules in R
monster <- c(T,T,T,F,T,T,T,T,T,T)
yugioh <- c(yugioh,monster)
yugioh # monster is of logical type
coerce.check <- c(atk,monster)
coerce.check # monster is of integer type
# String > Integer > Logical
# ----------------------------------------------------------------------------
# Exercise 4: Using Functions in R
lvl <- c(8,10,10,1,10,10,8,12,1,12)
sum(lvl)
mean(lvl)
median(lvl)
length(lvl)
sd(lvl)
round(sd(lvl),2)
print(round(sd(lvl),2))
# ----------------------------------------------------------------------------
# Exercise 5: Arguments of a Function
?sample()
?median()
median(lvl,na.rm=TRUE)
median(atk)
median(atk,na.rm=TRUE)
# ----------------------------------------------------------------------------
# Exercise 6: Building a function in R
# Cheating Coin Flip
draw <- function(){
coin <- c("Heads","Tails")
prob.vector <- c(0.7,0.3)
sample(coin,size=100,replace=TRUE,prob=prob.vector)
}
draw()
# ----------------------------------------------------------------------------
# Exercise 7: Vector Recycling
weight <- c(71,67,83,67)
height <- c(1.75,1.81,1.78,1.82,1.97,2.12,2.75)
BMI <- weight/(height^2)
BMI
# ----------------------------------------------------------------------------
# Exercise 8: Naming Vectors in R
names(atk) <- c("Blue-Eyes White Dragon","Exodius",
"The Winged Dragon of Ra","Raigeki",
"Slifer the Sky Dragon","Obelisk the Tormentor",
"Black Luster Soldier","5-Headed Dragon",
"Exodia the Forbidden One","Dragon Master Knight")
atk
attributes(atk)
# names(atk) <- NULL
# atk
# You can also name when creating the vector!
# ----------------------------------------------------------------------------
# Exercise 9: Indexing and Slicing a Vector
atk[6]
atk[-2]
atk[c(1,3,5,7,9)]
atk[-(4:6)]
atk[atk>2000]
# ----------------------------------------------------------------------------
# Exercise 10: Vector Attributes - Dimensions
s <- seq(from=2,to=30,by=2)
attributes(s) # None
dim(s) <- c(3,5)
s
attributes(s)
# ----------------------------------------------------------------------------
# Exercise 11: Creating a matrix in R
player <- c(rep("dark",5), rep("light",5))
piece <- c("king","queen","pawn","pawn","knight",
"bishop","king","rook","pawn","pawn")
chess <- c(player,piece)
chess
# Method 1: dim()
?dim()
dim(chess) <- c(10,2)
colnames(chess) <- c("Player","Piece")
chess
# Method 2: matrix()
?matrix()
matrix(data=chess,nrow=10,ncol=2,byrow=FALSE,
dimnames=list(NULL,c("Player","Piece")))
# Method 3: cbind()
?cbind()
cbind("Player"=player,"Piece"=piece)
# ----------------------------------------------------------------------------
# Exercise 12: Indexing and Slicing a Matrix
chess <- rbind(t(chess),"Turn"=c(3,5,2,2,7,4,6,5,2,1))
chess
chess <- t(chess)
chess
chess[6,2] # Element
chess[,"Player"] # Column
chess[,"Piece"] # Column
chess[1:5,] # All info on "dark"
chess[,"Piece",drop=FALSE] # Extract and keep as matrix
chess[,-2] # All but "Piece"
chess[2,c(1,3)] # 1st and 3rd elements in 2nd row
chess[7,3] <- 3 # Replace an element
chess
# ----------------------------------------------------------------------------
# Exercise 13: Matrix Arithmetic
?runif()
?matrix()
randomMatrix <- matrix(data=runif(12),nrow=3,ncol=4,byrow=TRUE)
colnames(randomMatrix) <- c("Uno","Dos","Tres","Cuatro")
rownames(randomMatrix) <- c("x","y","z")
randomMatrix
randomMatrix <- randomMatrix*10
randomMatrix
subMatrix <- randomMatrix[1:2,1:4]
subMatrix
randomMatrix-subMatrix # Fails. Inadequate sizes
subMatrix <- randomMatrix[1:3,1:3]
randomMatrix-subMatrix # Fails. Inadequate sizes
uno <- randomMatrix[,"Uno"]
uno
randomMatrix-uno # Works!
# Recycling applies when operations are done with a matrix and a vector!
# NOT with two matrices!!!
# Now with rnorm()
?rnorm()
rMatrix <- matrix(rnorm(12),nrow=3,ncol=4)
rMatrix
randomMatrix*rMatrix # 3x4 * 3x4
# Inner Matrix Multiplication (algebraic)
randomMatrix%*%t(rMatrix)
# ----------------------------------------------------------------------------
# Exercise 14: Matrix Operations
n <- matrix(rnorm(15),nrow=5,ncol=5,byrow=TRUE)
u <- matrix(runif(15),nrow=5,ncol=5,byrow=TRUE)
totalCol_n <- colSums(n)
avgCol_n <- colMeans(n)
totalCol_u <- colSums(u)
avgCol_u <- colMeans(u)
n <- rbind(n,totalCol_n,avgCol_n)
u <- rbind(u,totalCol_u,avgCol_u)
totalRow_n <- rowSums(n)
avgRow_n <- rowMeans(n)
totalRow_u <- rowSums(u)
avgRow_u <- rowMeans(u)
n <- cbind(n,totalRow_n,avgRow_n)
u <- cbind(u,totalRow_u,avgRow_u)
min(n)
min(u)
max(n)
max(u)
min(n[,3])
min(u[,3])
max(n[,3])
max(u[,3])
mean(n)
mean(u)
sd(n)
sd(u)
# Data generated with rnorm() will always have an sd close to 1,
# because this is how the function is defined to work
# rnorm() generates data with default settings mean=0, standard deviation = 1
# runif() generates data within the [0, 1] range
# ----------------------------------------------------------------------------
# Exercise 15: Creating a factor in R
player <- c(rep("dark",5), rep("light",5))
piece <- c("king","queen","pawn","pawn","knight",
"bishop","king","rook","pawn","pawn")
chess <- c(player,piece)
chess
chess.mat <- matrix(data=chess,nrow=10,ncol=2,byrow=FALSE,
dimnames=list(NULL,c("Player","Piece")))
piece_vec <- chess.mat[,"Piece"]
piece_factor <- factor(piece_vec)
# Labelling without ordering
piece_factor <- factor(piece_vec,
levels = c("king","queen",
"rook","bishop",
"knight","pawn"),
labels = c("King","Queen",
"Rook","Bishop",
"Knight","Pawn"))
# Labelling with ordering
levels(piece_factor) <- c("Ki","Q","R","B","Kn","P")
piece_ordered <- factor(piece_factor,ordered=TRUE,
levels = c("Ki","Q","R","B","Kn","P"),
labels = c("King","Queen",
"Rook","Bishop",
"Knight","Pawn"))
# ----------------------------------------------------------------------------
# Exercise 16: Lists in R
l <- list(Numbers=c(1,3,5,7,9,11),Phrases=list("Happy Birthday","Archery"))
l
l[[1]]
l[[2]][1]
l[[2]][2]
l[2]
l[1]
l[[1]]+2
l$Brands <- c("Kellogs","Nike","IPhone")
l
l$Brands <- NULL
# ----------------------------------------------------------------------------
# Exercise 17: Logical Operators
# Explain the difference between | , || , & and &&
# Answer:
# Single operators (|, &) can return a vector
((-2:2) >= 0) & ((-2:2) <= 0)
# FALSE FALSE TRUE FALSE FALSE
# Double operators (||, &&) return a single value
((-2:2) >= 0) && ((-2:2) <= 0)
# FALSE
# ----------------------------------------------------------------------------
# Exercise 18: If, Else, Else If
number <- -1
if((number >= 1) & (number < 60)){
print("Rotten!")
} else if((number >= 60) & (number < 75)){
print("Fresh!")
} else if((number >= 75) & (number <+ 100)){
print("Certified Fresh!")
} else
print("Please input a number between 1 and 100")
# Another One
lottery <- round(runif(6,min=1,max=50),0)
myTry <- c(7,39,20,24,35,32)
if(length(myTry) != 6){
print("Invalid ticket. Choose 6 values")
} else{
if(length(setdiff(myTry,lottery)) == 0){
print("Congrats!")
} else
print("Lost...")
}
# ----------------------------------------------------------------------------
# Exercise 19: For/While/Repeat Loops in R
n <- 10
result <- 0
for(i in 1:n){
result <- result + 1
}
# ----
n <- 0
result <- 0
while(n < 10){
result <- result + 1
n <- n+1
}
# ----
#n <- 0
#result <- 0
#repeat{
# if(){
# break
# }
#}
# ----------------------------------------------------------------------------
# Exercise 20: Functions 2.0
draw <- function(){
coin <- c("Heads","Tails")
prob.vector <- c(0.7,0.3)
sample(coin,size=100,replace=TRUE,prob=prob.vector)
}
|
9e14b72bb504d697f925f9eb9b1f317d5f90c983
|
3ffa653e649a203832994147cb4f73aa9709b16a
|
/bin/ibd-plot-genomefile.r
|
d19f0291dcb14439036d8f39240d077f89046f61
|
[
"MIT"
] |
permissive
|
jkaessens/gwas-assoc
|
fa0ed3e36b2607263a66810b5a83fd266e22c9d4
|
1053c94222701f108362e33c99155cfc148f4ca2
|
refs/heads/master
| 2023-02-07T13:07:05.624146
| 2020-12-20T15:15:28
| 2020-12-20T15:15:28
| 323,265,733
| 0
| 0
|
MIT
| 2020-12-21T07:47:59
| 2020-12-21T07:44:58
| null |
UTF-8
|
R
| false
| false
| 821
|
r
|
ibd-plot-genomefile.r
|
#######################################
### 2010, David Ellinghaus ###
### 2019, Florian Uellendahl-Werth ###
#######################################
rm(list=ls())
file.genome = commandArgs()[4]
inc<-10000
library("stream")
genome<-DSD_ReadCSV(file.genome,sep="",skip=1,take=c(7,8))
wng<-T
lines_n<-0
png(file=paste(file.genome, ".IBD-plot.png", sep=""), width=960, height=960)
plot(get_points(genome,n=1,outofpoints = "warn"), xlim=c(0,1.0), ylim=c(0,1.0), xlab="ZO", ylab="Z1", pch=20, axes=F)
while (wng) {
points_tmp<-get_points(genome,n=inc,outofpoints = "ignore")
points(points_tmp, xlim=c(0,1.0), ylim=c(0,1.0), pch=20)
if(dim(points_tmp)[1]==0){wng<-F}
lines_n<-lines_n+inc
print(lines_n)
flush.console()
}
axis(1, at=seq(0,1.0,0.2), tick=T)
axis(2, at=seq(0,1.0,0.2), tick=T)
dev.off()
|
17bbecd3a104265c5754ae8d7144ef732cc30cc9
|
9113e3269573abcd007a25d3413a3b0ee0ed8e4e
|
/1-layout/example-html.R
|
5f6ada8c67487001eb89765e2a422688e13577de
|
[] |
no_license
|
datasketch/rladies-stl-personalized-shiny-apps
|
ddab860b53cce453783add025d5cb44a14a356f3
|
aa72ed4f0ea3e610a53b86a2453e9a230b41cf59
|
refs/heads/main
| 2023-02-20T04:05:19.813308
| 2021-01-20T01:51:39
| 2021-01-20T01:51:39
| 330,156,512
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
example-html.R
|
library(shiny)
ui <- fluidPage(
h1("Hello"),
tags$blockquote("Lorem teat tesatas tast at at eat"),
div(style = "background-color: red;",
p("Hello R Ladies"),
h2("Subtitle")
),
fluidRow(
column(3,
img(src="https://via.placeholder.com/150"),
),
column(9,
img(src="https://via.placeholder.com/450"),
)
)
)
server <- function(input, output, session){
}
shinyApp(ui, server)
|
a21e1f30816e9e1b7989267a20bdc03bdf731d54
|
dfed572e96372e1a46c367516f24e931145325f0
|
/faza1/kody/analiza_zdarzenia.R
|
5f92c84342312a17ca08901a256bbcd51e65ca8e
|
[] |
no_license
|
flaminka/RandBigData-PROJEKT2
|
db49bab57d5e6e4bc138b6553923fe8891a7c768
|
b0e247a531a922250878820229d73b23d4ac5fea
|
refs/heads/master
| 2021-01-21T14:48:43.211396
| 2016-06-13T21:27:01
| 2016-06-13T21:27:01
| 58,480,439
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 996
|
r
|
analiza_zdarzenia.R
|
#'
#'
#'
#'
#'
#'
analiza_zdarzenia<- function(zdarzenie, dane, mostf, k){
require(dplyr)
date=as.Date(dane$created_a)
dane$created_at=date
date=date%>%unique()%>%sort()
dats=filter(mostf, rzecz==zdarzenie)%>%select(date)%>%arrange(date)%>%t()%>%as.Date
d=as.Date(dats[1]:(dats[1]+k-1), origin = "1970-01-01")
take=c(TRUE,diff(dats)>=k)
take=(1:length(dats))[take]
lapply(1:length(take), function(i, t, d, k, dane, z){
if(i==length(t)){
dd=as.Date(dats[t[i]]:(dats[length(dats)]+k-1), origin="1970-01-01")
}else{
dd=as.Date(dats[t[i]]:(dats[t[i+1]-1]+k-1), origin="1970-01-01")
}
posty=dane%>%select(created_at, body, rzeczownik)%>%filter(rzeczownik==z, created_at %in% dd)
dat=sort(unique(posty$created_at))
zd=lapply(dat, function(d, posty){
posty%>%filter(created_at==d)%>%select(body)%>%t()%>%as.vector()
}, posty=posty)
names(zd)=dat
zd
}, t=take, d=dats, k=k, dane=dane, z=zdarzenie)
}
|
51101f0c0b9db00143c770cf1c387bf34e749cdf
|
b68ab237085809c789b14cbba81e95d6ac03e2ae
|
/chap2_ex2.R
|
40e95257151a948e6e18d370576edd6207e4a259
|
[] |
no_license
|
kgaythorpe/Rtest
|
37f31f9070de48fe6d394c530c407f4fd21cd1ec
|
f17e3411498bd22f6b22733395caeadf4cd21562
|
refs/heads/master
| 2021-06-29T05:15:55.424547
| 2017-09-27T14:29:52
| 2017-09-27T14:29:52
| 104,993,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,983
|
r
|
chap2_ex2.R
|
vignette("chapter2",package="jrShiny")
install.packages("d3heatmap")
library(d3heatmap)
d3heatmap(mtcars)
d3heatmap(x, k_row = 4, k_col = 2, scale = "column")
install.packages("DT")
library(DT)
data(iris, package="datasets")
datatable(iris)
install.packages("edgebundleR")
library(edgebundleR)
cor_mt = cor(mtcars)
cor_mt[cor_mt < 0.5 & cor_mt > -0.5] = 0
edgebundle(cor_mt)
install.packages("formattable")
devtools::install_github("renkun-ken/formattable")
library("formattable")
percent(c(0.1, 0.02, 0.03, 0.12))
#can also do nice tables
install.packages("ggiraph")
install.packages("networkD3")
library(networkD3)
networkData = data.frame(src=c("A", "A", "A", "A", "B", "B", "C", "C", "D"),
target=c("B", "C", "D", "J", "E", "F", "G", "H", "I"))
simpleNetwork(networkData)
data(MisLinks)
data(MisNodes)
# Plot
forceNetwork(Links = MisLinks, Nodes = MisNodes,
Source = "source", Target = "target",
Value = "value", NodeID = "name",
Group = "group", opacity = 0.8)
# Load energy projection data
# Load energy projection data
URL <- paste0(
"https://cdn.rawgit.com/christophergandrud/networkD3/",
"master/JSONdata/energy.json")
Energy <- jsonlite::fromJSON(URL)
# Plot
sankeyNetwork(Links = Energy$links, Nodes = Energy$nodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
units = "TWh", fontSize = 12, nodeWidth = 30)
install.packages("svgPanZoom")
install.packages("svglite")
devtools::install_github("timelyportfolio/svgPanZoom")
library("svgPanZoom")
library("svglite")## For base
library("ggplot2")
data(mtcars, package="datasets")
#install.packages("gridSVG")
d = ggplot(mtcars, aes(mpg, disp, colour=disp)) + geom_jitter() +
theme_bw()
svgPanZoom(d, controlIconsEnabled = TRUE)
devtools::install_github("htmlwidgets/sparkline")
library(htmlwidgets)
library(sparkline)
set.seed(1234)
x = rnorm(10)
y = rnorm(10)
sparkline(x)
|
9ef353e95fd9dc6156c1c250821e0231641e03a1
|
7d31f360f1ece69b09a4b51e3986ac44025efc7c
|
/package/clinUtils/man/getPaletteCDISC.Rd
|
6f9b5b1239090a740de72ccdd9f6a18ed22d8afa
|
[] |
no_license
|
Lion666/clinUtils
|
a0500a773225ffafc29b7d7f7bcc722dd416743c
|
dc6118f32d311657d410bdeba02f3720f01d62df
|
refs/heads/master
| 2023-08-12T08:48:42.923950
| 2021-09-21T14:56:18
| 2021-09-21T16:14:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,031
|
rd
|
getPaletteCDISC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots-palettesCDISC.R
\name{getPaletteCDISC}
\alias{getPaletteCDISC}
\title{Get standard palette for typical CDISC variables.}
\usage{
getPaletteCDISC(x, var, type, palette = NULL)
}
\arguments{
\item{x}{Character vector of factor with
variable to consider.
The palette is built based on the unique elements
of this vector, or levels if \code{x} is a factor.}
\item{var}{String with type of variable, among:
\itemize{
\item{'NRIND': }{Normal Reference Range Indicator}
}}
\item{type}{String with type of palette:
\itemize{
\item{'shape': }{shape/symbol palette}
}}
\item{palette}{(optional) Named vector
with extra palette, e.g. to specify elements
for non-standard categories.
This palette is combined with the standard palette.}
}
\value{
Named vector with palette.
}
\description{
The extraction of the palette elements is case-insensitive.
}
\details{
The order of the palette depends on the type of the input
variable (\code{x}):
\itemize{
\item{if a factor is specified, the palette is ordered based
on its levels}
\item{if a character vector is specified,
the elements from the internal standard palette are used first,
the remaining elements are then sorted alphabetically.
}
}
}
\examples{
## palette for reference range indicator variables
xRIND <- c("LOW", "HIGH", "NORMAL", "NORMAL", "NORMAL", "ABNORMAL")
# get standard palette
getPaletteCDISC(x = xRIND, var = "NRIND", type = "shape")
getPaletteCDISC(x = xRIND, var = "NRIND", type = "color")
# in case extra categories are specified:
xRIND <- c(xRIND, "High Panic")
# the symbols are set to numeric symbols
getPaletteCDISC(xRIND, var = "NRIND", type = "shape")
# use shapePalette to specify symbols for extra categories
getPaletteCDISC(xRIND, var = "NRIND", type = "shape", palette = c("High Panic" = "\u2666"))
# palette is case-insensitive
xRIND <- c("Low", "High", "Normal", "Normal", "Normal")
getPaletteCDISC(xRIND, var = "NRIND", type = "shape")
}
\author{
Laure Cougnaud
}
|
ca5dc01b36a933144869e9f6a2e13e43d2df21f9
|
49bd976847a38b926b0ecab8da3f191cd60c786d
|
/bin/findInterfaces.R
|
aa120b284b24608a555a4affd2ff87dc2660c2ba
|
[] |
no_license
|
jweile/dmsPipeline
|
50f68c59efb85f57661dbd167c625a522d8d0a06
|
512c310fe78822d006ab55f61d9b759e0bf71edc
|
refs/heads/master
| 2023-08-12T08:23:33.701095
| 2017-06-30T17:58:24
| 2017-06-30T17:58:24
| 417,578,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,471
|
r
|
findInterfaces.R
|
#####################################
# Compare Y2H to compl data to find #
# interface positions #
#####################################
source("lib/resultfile.R")
source("lib/liblogging.R")
source("lib/libyogitools.R")
source("lib/cliargs.R")
source("lib/topoScatter.R")
source("lib/pymolCol.R")
library("hash")
options(stringsAsFactors=FALSE)
#get output directory
outdir <- getArg("outdir",default="workspace/test/")
#Initialize logger
logger <- new.logger(paste0(outdir,"findInterfaces.log"))
#Set resultfile
html <- new.resultfile(paste0(outdir,"results.html"))
html$section("Finding interaction interfaces")
logger$info("Reading input")
y2h <- read.csv(paste0(outdir,"y2h_scores_perMut.csv"))
compl <- read.csv(paste0(outdir,"imputed_regularized_UBE2I_scores.csv"))
rownames(compl) <- compl$mut
#remove multi-mutants
data <- y2h[regexpr(",",y2h$mut) < 0,]
#remove controls
data <- data[-which(data$mut %in% c("null","WT","longdel","longdup")),]
data$compl.score <- compl[data$mut,"joint.score"]
data$compl.sd <- compl[data$mut,"joint.sd"]
interactors <- c("SATB1","SUMO1","ZBED1","RNF40")
candidates <- NULL
logger$info("Comparing Y2H to Complementation scores")
html$subsection("Complementation vs Interaction Scores")
html$figure(function() {
op <- par(mfrow=c(2,2))
candidates <<- lapply(interactors,function(ia) {
iasd <- paste0(ia,".sd")
iav <- paste0(ia,".score")
is <- which(!is.na(data[,iasd]) &
data[,iasd] > 0 &
data[,iasd] < 0.5 &
data$compl.sd < 0.3
)
plot(NA,type="n",
main=paste("Complementation vs",ia),
xlab="Complementation score",
ylab=paste(ia,"interaction score"),
xlim=c(-0.5,2),
ylim=c(-1,2)
)
x <- data[is,c("compl.score")]
y <- data[is,c(iav)]
xsd <- data[is,c("compl.sd")]
ysd <- data[is,c(iasd)]
arrows(x-xsd/2,y,x+xsd/2,y,length=0.01,angle=90,code=3)
arrows(x,y-ysd/2,x,y+ysd/2,length=0.01,angle=90,code=3)
abline(h=0:1,v=0:1,col=c("firebrick3","chartreuse3"))
m <- data[is,"mut"]
js <- which(x > 0.5 & y < 0.5 & y < x-0.5)
cand.j <- data.frame(mut=m[js],compl=x[js],ia=y[js],type="interfacial")
ks <- which(x < 0.5 & y > 0.5 & y > x+0.5)
cand.k <- data.frame(mut=m[ks],compl=x[ks],ia=y[ks],type="deadfold")
rbind(cand.j,cand.k)
})
par(op)
},paste0(outdir,"complVinteraction"),10,10)
names(candidates) <- interactors
outfile <- paste0(outdir,"interface_candidates.txt")
con <- file(outfile,open="w")
invisible(lapply(interactors,function(ia) {
writeLines(c(ia,"======="),con)
write.table(format(candidates[[ia]],digits=2),con,quote=FALSE,sep="\t",row.names=FALSE)
writeLines("\n\n",con)
}))
close(con)
html$subsection("Interface candidates")
html$link.data(outfile)
logger$info("Colorizing structures")
#Make colored structure
data$pos <- as.integer(with(data,substr(mut,2,nchar(mut)-1)))
outfiles <- sapply(interactors,function(ia) {
outfile <- paste0(outdir,"y2h_pymol_colors_",ia,".txt")
pycol <- new.pymol.colorizer(outfile)
pycol$define.colors()
iasc <- paste0(ia,".score")
iasd <- paste0(ia,".sd")
is <- which(!is.na(data[,iasc]) & !is.na(data[,iasd]) &
data[,iasd] > 0 & data[,iasd] < 0.2
)
iadata <- data[is,c("mut",iasc,"pos")]
posmed <- tapply(iadata[,2],iadata[,3],median)
pycol$colorize(cbind(index=1:159,fitness=posmed[as.character(1:159)]))
pycol$close()
outfile
})
html$subsection("Structure Colorizations")
invisible(lapply(outfiles, html$link.data))
html$shutdown()
logger$info("Done!")
|
cf7477fac59e77516eb089dd2ff22b12d20f5ffe
|
f86d3a504220a17a39a7250ad8c623c03e78fcf1
|
/man/amnfis.Rd
|
ce437d991ab09531e64f4d191255ed2b7806048c
|
[] |
no_license
|
deybvagm/amnfis
|
0879c19635d9171fd0aba5496ac834a7779fefdf
|
3ae2c825061834e12399490d921031aef6c09316
|
refs/heads/master
| 2020-04-07T06:25:33.469324
| 2016-03-06T16:21:21
| 2016-03-06T16:21:21
| 47,901,230
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 267
|
rd
|
amnfis.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/hello.R
\name{amnfis}
\alias{amnfis}
\title{Hello world package}
\usage{
amnfis(X, d, k)
}
\arguments{
\item{k}{number of clusters}
}
\value{
data
}
\description{
AMNFIS program
}
|
30a8b7ba20d09a5413e7c1e6604d4f042944796f
|
aa5057cc56659f28ecbdd2522fcf6c509d7d671b
|
/man/assignSeason.Rd
|
46033b567669060385ddc5ede9f4f87e738c6523
|
[] |
no_license
|
ksauby/dataproc
|
8fd0640707d4f6ba80e9868bc3f3c98f5d3c74b8
|
24f69d9160b0de4be79487bb11bd7982dd4d3ad6
|
refs/heads/master
| 2021-04-24T18:00:13.990345
| 2019-01-30T00:42:36
| 2019-01-30T00:42:36
| 42,401,878
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 551
|
rd
|
assignSeason.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assignSeason.R
\name{assignSeason}
\alias{assignSeason}
\title{Assign Season based on Date}
\usage{
assignSeason(dat, SeasonStarts)
}
\arguments{
\item{dat}{dataframe including "Date" in \code{POSIXct} format.}
\item{SeasonStarts}{SeasonStarts}
}
\description{
Assign seasons based on date.
}
\examples{
dat = data.frame(
Date = as.POSIXct(strptime(as.Date("2011-12-01", format = "\%Y-\%m-\%d") +
(0:10)*30, format="\%Y-\%m-\%d"))
)
dat \%>\% assignSeason
}
|
4cb260589f91c1470edc762f5e0abbce26b5099f
|
02edc103803e9cdceb22a2c0b8c1c70c5bb7f1c0
|
/data/data_wrangling_human.R
|
c37ae0494bd0837389a7990915df38904d5c75bc
|
[] |
no_license
|
suprajohde/IODS-project
|
5fa15e93cd84e5287c1ae9e5199f98d05ad90c45
|
36396ea351c7f8dd76b0eace8ef46c6a1b713d19
|
refs/heads/master
| 2021-05-01T13:45:38.470093
| 2017-02-24T23:46:34
| 2017-02-24T23:46:34
| 79,580,550
| 0
| 0
| null | 2017-01-20T17:24:23
| 2017-01-20T17:24:23
| null |
UTF-8
|
R
| false
| false
| 1,095
|
r
|
data_wrangling_human.R
|
## Tiina Autio
## W5 Data Wrangling
## Data source: http://hdr.undp.org/en/content/human-development-index-hdi
human <- read.table("http://s3.amazonaws.com/assets.datacamp.com/production/course_2218/datasets/human1.txt", header = TRUE, sep = ",")
names(human)
str(human)
summary(human)
library(stringr)
library(dplyr)
## mutating the data
human$GNI <- str_replace(human$GNI, pattern=",", replace ="") %>% as.numeric
str(human$GNI)
## excluding unneeded variables
keep <- c( "Country", "Edu2.FM", "Labo.FM", "Edu.Exp", "Life.Exp", "GNI", "Mat.Mor", "Ado.Birth", "Parli.F")
human_ <- dplyr::select(human, one_of(keep))
## removing all rows with missing values
human_ <- filter(human_, complete.cases(human_) == TRUE)
## removing regions
last <- nrow(human_) - 7
human_ <- human_[1: (as.numeric(last)), ]
## defining row names and removing country column
rownames(human_) <- human_$Country
human_final <- select(human_, -Country)
dim(human_final)
str(human_final)
glimpse(human_final)
## saving human data
setwd("~/Documents/IODS-project/data")
write.csv(human_final, "human_data.csv")
|
f6492633defad3cec2f583c995c0d2926c96a972
|
88767dec5d03291c4fe6acb377d21ea46608fb6a
|
/03d_Pres_uncty.R
|
5fd0bde5001aab8900bd0ba30be104f938c8ff11
|
[] |
no_license
|
themisbo/Deconvolution-in-well-test-analysis
|
14ba7653214d579a13d071e5f1c2406b09591b1a
|
4590659d9fadbfa597d720b5d341fc046d4b1f1b
|
refs/heads/master
| 2023-08-14T20:50:11.753559
| 2021-09-26T12:47:39
| 2021-09-26T12:47:39
| 226,081,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,564
|
r
|
03d_Pres_uncty.R
|
obsPress <- real_TLS$swPress
obsRate <- real_TLS$swRate
trueResp <- real_TLS$TlsResp
truePress <- real_TLS$TlsPress
which_dd <- which(obsRate$Rate != 0)
N_Nodes = 30
t0 = 1e-3
tN = ceiling( obsPress[nrow(obsPress)]$Time )
tau = seq(log(t0), log(tN), length.out = N_Nodes)
t = exp(tau)
source("functions.R")
p0q_samples_plot <- function(parm) {
sigmap <- parm[3*Region + 4]
# Early time parameters
TM <- 10^parm[1]
PM <- 10^parm[2]
CDe2Skin <- 10^parm[3]
# Late time parameters
RD <- cumsum( 10^( parm[(1:Region) + 3]))
eta <- 10^( parm[Region + 1:Region + 3])
M <- 10^( parm[2*Region + 1:Region + 3])
# Dimensionless time
tD = TM*t
# Dimensionless pressure derivative in Laplace space
dpwD = function(s){
# Wellbore region solution parameters
sqrt_z <- sqrt(s/CDe2Skin)
a11 = s*besselI(sqrt_z, 0, expon.scaled = TRUE) - sqrt_z*besselI(sqrt_z, 1, expon.scaled = TRUE)
a12 = s*besselK(sqrt_z, 0, expon.scaled = TRUE) + sqrt_z*besselK(sqrt_z, 1, expon.scaled = TRUE)
# a11 element sub-determinant
bb = array(dim = c(2*Region, 2*Region, dim(s)) )
RD1_s <- RD[1]*sqrt(s)
RDN_etaN_s <- ( RD[Region]*sqrt(eta[Region]) )*sqrt(s)
# Solution parameters
sqeta <- sqrt(eta[1:(Region-1)])
Msqeta <- M[2:Region]*sqeta
bb[1,1,,] <- besselK(RD1_s, 0, expon.scaled = TRUE) # A - 21
bb[2,1,,] <- -M[1]*besselK(RD1_s, 1, expon.scaled = TRUE) # A - 23
if(Region != 1){
RD1i_etai_s <- outer(sqrt(s), RD[1:(Region-1)]*sqeta, "*")
RD2i_etai_s <- outer(sqrt(s), RD[2:Region]*sqeta, "*")
bb[ind_total[[1]]] <- -besselI(RD1i_etai_s, 0, expon.scaled = TRUE) # A - 26
bb[ind_total[[2]]] <- -besselK(RD1i_etai_s, 0, expon.scaled = TRUE) # A - 27
bb[ind_total[[3]]] <- sweep(besselI(RD1i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3, -sqeta, "*") # A - 30
bb[ind_total[[4]]] <- sweep(besselK(RD1i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3, sqeta, "*") # A - 31
bb[ind_total[[5]]] <- besselI(RD2i_etai_s, 0, expon.scaled = TRUE) # A - 24
bb[ind_total[[6]]] <- besselK(RD2i_etai_s, 0, expon.scaled = TRUE) # A - 25
bb[ind_total[[7]]] <- sweep(besselI(RD2i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3, Msqeta, "*") # A - 28
bb[ind_total[[8]]] <- sweep(besselK(RD2i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3,-Msqeta, "*") # A - 29
}
bb[2*Region-1, 2*Region,,] <- -besselK(RDN_etaN_s, 0, expon.scaled = TRUE) # A - 27
bb[2*Region, 2*Region,,] <- sqrt(eta[Region])*besselK(RDN_etaN_s, 1, expon.scaled = TRUE) # A - 31
# a12 element sub-determinant
cc = bb
cc[1,1,,] <- besselI(RD1_s, 0, expon.scaled = TRUE) # A - 20
cc[2,1,,] <- M[1]*besselI(RD1_s, 1, expon.scaled = TRUE) # A - 22
scale_exp = exp(2*sqrt_z - 2*RD1_s)*( detBTM(bb,s)/detBTM(cc,s) )
Bscale = scale_exp*a11 - a12
Ascale = Bscale/scale_exp
# Solution of the Dimensionless pressure derivative in Laplace space
return( besselI(sqrt_z, 0, expon.scaled = TRUE)/Ascale
-besselK(sqrt_z, 0, expon.scaled = TRUE)/Bscale )
}
# Dimensionless pressure derivative in time space
dpwD_time <- stehfest(dpwD, tD)
# Proposed Response
log_tg <- log(tD*dpwD_time/PM)
if(anyNA(log_tg)) log_tg = rep(1, length(log_tg))
# Response object
resp <- Response(log_tg, tau, 1, 0, tN)
# Convolution
C <- Themis.calc.CMatrix(resp, I, c(m, N) )
A1 = m/(sigmap^2) + 1/(sigmap0^2)
A_1 = 1/A1
B1 = - as.matrix(rep(1/sigmap^2, m)%*%C)
D1 = as.matrix(diag(1/sigmaq^2, N) + t(C)%*%C/sigmap^2)
A_matrix = rbind(cbind(A1, B1), cbind(t(B1), D1))
b_sq1 <- sum(p)/sigmap^2 + p0/(sigmap0^2)
b_sq2 <- t(q/sigmaq^2) - t(p/sigmap^2) %*% C
b_sq = cbind(b_sq1, b_sq2)
Sig_22 = chol2inv(chol(D1-t(B1)%*%A_1%*%B1))
Sig_12 = -A_1%*%B1%*%Sig_22
Sig_21 = t(Sig_12)
Sig_11 = A_1 - Sig_12 %*% t(B1) %*% A_1
Sigma_cond <- rbind(cbind(Sig_11, Sig_12), cbind(Sig_21, Sig_22))
mu_cond = Sigma_cond %*% t(b_sq)
p0q = as.numeric( mvrnorm(1, mu_cond, Sigma_cond) )
return(list("p0q" = p0q, "mu_cond" = as.numeric(mu_cond)))
}
df_Resp <- dat_full %>%
dplyr::select(1:(3*(Region + 1))) %>%
slice(seq(1, nrow(.), by = 20))
df_Rate <- dat_full %>%
dplyr::select(starts_with("q")) %>%
slice(seq(1, nrow(.), by = 20))
df_p0 <- dat_full %>%
dplyr::select("p0") %>%
slice(seq(1, nrow(.), by = 20))
#p0 <- max(obsPress$Press)
get_z <- function(parm){
TM <- 10^parm[1]
PM <- 10^parm[2]
CDe2Skin <- 10^parm[3]
RD <- cumsum( 10^( parm[(1:Region) + 3] ) )
eta <- 10^( parm[Region + 1:Region + 3] )
M <- 10^( parm[2*Region + 1:Region + 3] )
# Dimensionless time
tD = TM*t
# Dimensionless pressure derivative in Laplace space
dpwD = function(s){
# Wellbore region solution parameters
sqrt_z <- sqrt(s/CDe2Skin)
a11 = s*besselI(sqrt_z, 0, expon.scaled = TRUE) - sqrt_z*besselI(sqrt_z, 1, expon.scaled = TRUE)
a12 = s*besselK(sqrt_z, 0, expon.scaled = TRUE) + sqrt_z*besselK(sqrt_z, 1, expon.scaled = TRUE)
# a11 element sub-determinant
bb = array(dim = c(2*Region, 2*Region, dim(s)) )
RD1_s <- RD[1]*sqrt(s)
RDN_etaN_s <- ( RD[Region]*sqrt(eta[Region]) )*sqrt(s)
# Solution parameters
sqeta <- sqrt(eta[1:(Region-1)])
Msqeta <- M[2:Region]*sqeta
bb[1,1,,] <- besselK(RD1_s, 0, expon.scaled = TRUE) # A - 21
bb[2,1,,] <- -M[1]*besselK(RD1_s, 1, expon.scaled = TRUE) # A - 23
if(Region != 1){
RD1i_etai_s <- outer(sqrt(s), RD[1:(Region-1)]*sqeta, "*")
RD2i_etai_s <- outer(sqrt(s), RD[2:Region]*sqeta, "*")
bb[ind_total[[1]]] <- -besselI(RD1i_etai_s, 0, expon.scaled = TRUE) # A - 26
bb[ind_total[[2]]] <- -besselK(RD1i_etai_s, 0, expon.scaled = TRUE) # A - 27
bb[ind_total[[3]]] <- sweep(besselI(RD1i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3, -sqeta, "*") # A - 30
bb[ind_total[[4]]] <- sweep(besselK(RD1i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3, sqeta, "*") # A - 31
bb[ind_total[[5]]] <- besselI(RD2i_etai_s, 0, expon.scaled = TRUE) # A - 24
bb[ind_total[[6]]] <- besselK(RD2i_etai_s, 0, expon.scaled = TRUE) # A - 25
bb[ind_total[[7]]] <- sweep(besselI(RD2i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3, Msqeta, "*") # A - 28
bb[ind_total[[8]]] <- sweep(besselK(RD2i_etai_s, 1, expon.scaled = TRUE), MARGIN = 3,-Msqeta, "*") # A - 29
}
bb[2*Region-1, 2*Region,,] <- -besselK(RDN_etaN_s, 0, expon.scaled = TRUE) # A - 27
bb[2*Region, 2*Region,,] <- sqrt(eta[Region])*besselK(RDN_etaN_s, 1, expon.scaled = TRUE) # A - 31
# a12 element sub-determinant
cc = bb
cc[1,1,,] <- besselI(RD1_s, 0, expon.scaled = TRUE) # A - 20
cc[2,1,,] <- M[1]*besselI(RD1_s, 1, expon.scaled = TRUE) # A - 22
scale_exp = exp(2*sqrt_z - 2*RD1_s)*( detBTM(bb,s)/detBTM(cc,s) )
Bscale = scale_exp*a11 - a12
Ascale = Bscale/scale_exp
# Solution of the Dimensionless pressure derivative in Laplace space
return( besselI(sqrt_z, 0, expon.scaled = TRUE)/Ascale
-besselK(sqrt_z, 0, expon.scaled = TRUE)/Bscale )
}
# Dimensionless pressure derivative in time space
dpwD_time <- stehfest(dpwD,tD)
# Proposed Response
return(log(tD*dpwD_time/PM))}
z <- apply(df_Resp, 1, get_z)
MAP_z = get_z(MAP_point)
# Response object
Rate_MAP <- MAP_point[(3*Region + 6):( 3*Region + 5 + N )]
resp_MAP <- Response(MAP_z, tau, 1, 0, tN)
# Convolution
C_MAP <- Themis.calc.CMatrix(resp_MAP, I, c(m, N) )
dp_MAP <- C_MAP %*% Rate_MAP
# True Pressures
p0_MAP <- MAP_point[3*Region + 5]
true_pres_MAP <- as.numeric(p0_MAP-dp_MAP)
C0 = list()
for (i in (1:ncol(z))){
C0[[i]] <- Themis.calc.CMatrix(Response(z[,i], tau, 1, 0, tN), I, c(m, N) )
}
Rate_mat = as.matrix(df_Rate)
dp1 = matrix(0, nrow = m, ncol = ncol(z))
for (i in (1:ncol(z))){
dp1[,i] <- as.numeric(df_p0$p0[i] - C0[[i]] %*% Rate_mat[i,])
}
df_mpres <- as_tibble(t(dp1))
names(df_mpres) <- p_t <- obsPress$Time
df_mpres2 <- df_mpres %>%
gather(p_t, value, convert = TRUE)
df_mpres2$id <- 1:ncol(z)
df_pres <- tibble(t = obsPress$Time, obsPress = obsPress$Press, MapPress = true_pres_MAP)#, truePress = truePress$Press)
ggplot(df_mpres2, aes(x = p_t, y = value)) +
geom_line(aes(group = id, col = "Uncty")) +
geom_line(data = df_pres, aes(x = t, y=obsPress, col = "obsPress")) +
geom_line(data = df_pres, aes(x = t, y=MapPress, col = "MapPress")) +
scale_color_manual(values = c(Uncty = uncty_col, obsPress = 'red', MapPress = MAP_col)) +
labs(x = "Time", y = "Pressure")
df_mresid <- as_tibble(t(obsPress$Press - dp1))
names(df_mresid) <- p_t <- obsPress$Time
df_mresid2 <- df_mresid %>%
gather(p_t, value, convert = TRUE)
df_mresid2$id <- 1:ncol(z)
df_pres <- df_pres %>%
mutate(MapResids = obsPress - MapPress, trueResids = obsPress - true_pres_MAP)
df_pres <- df_pres %>%
mutate(MapRsq = MapResids^2, trueRsq = trueResids^2)
df_sum <-df_pres %>% summarise_all(list(sum))
ggplot(df_mresid2, aes(x = p_t, y = value)) +
geom_line(aes(group = id, col = "Samples"), alpha = 0.1) +
geom_line(data = df_pres, aes(x = t, y=trueResids, col = "TLS_Residuals")) +
geom_line(data = df_pres, aes(x = t, y=MapResids, col = "MapResids")) +
geom_hline(yintercept = 0, linetype = 2) +
scale_color_manual(values = c(Samples = uncty_col, TLS_Residuals = 'red', MapResids = MAP_col),
labels=c("MAP", "TLS", "Samples"))+
labs(x = "Time", y = "Pressure residuals")
ggplot(df_mresid2, aes(x = p_t, y = value)) +
geom_line(aes(group = id), col = uncty_col, alpha = 0.1) +
geom_line(data = df_pres, aes(x = t, y=trueResids), col = 'red') +
geom_line(data = df_pres, aes(x = t, y=MapResids), col = MAP_col) +
geom_hline(yintercept = 0, linetype = 2) +
theme_bw()+ theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
labs(x = "Time", y = "Pressure residuals")
|
48ca1501b0e6f026c3fe0da0afc8b4fa21f8da8f
|
60b03bddbdbb611a850842d5a3fc983311804b7a
|
/00 - Recording NYC Cases.R
|
27914b29771d058e59dd34a2b5b023b8641e3499
|
[] |
no_license
|
timkiely/covid-19-model
|
8facdc4479215e0007426cf5a6e52f1e4e125efd
|
b31301ba05dd43d1744a7a6212ad4e6802265f6a
|
refs/heads/master
| 2021-02-16T11:29:26.956184
| 2020-11-05T20:15:44
| 2020-11-05T20:15:44
| 245,000,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,226
|
r
|
00 - Recording NYC Cases.R
|
# COLLECTIONG NYC COVID DATA ----
# Tim Kiely, March 2020
# Daily stat source:
# NOTE: as of 4/6 this page no longer displays data
# https://www1.nyc.gov/assets/doh/downloads/pdf/imm/covid-19-daily-data-summary.pdf
# browseURL("https://www1.nyc.gov/assets/doh/downloads/pdf/imm/covid-19-daily-data-summary.pdf")
# NEW DATA 4/5:
# https://github.com/nychealth/coronavirus-data
# browseURL("https://github.com/nychealth/coronavirus-data")
suppressPackageStartupMessages({
library(tabulizer)
library(tidyverse)
library(pdftools)
})
CAGR_formula <- function(FV, PV, n = 1) {
values <- ((FV/PV)^(1/n)-1)
return(values)
}
# 1.0 MANUAL RECORDING ----
NYC_reports <-
dplyr::mutate(
tibble::tribble(~days_since_reported, ~Confirmed, ~Deaths
, 1, 0, NA
, 4, 43, NA # Wednesday 3/11
, 5, 100, NA # Thursday 3/12
, 6, 170, NA # Friday 3/13
, 7, 213, NA # Saturday 3/14
, 8, 329, NA # Sunday 3/15
, 9, 463, NA # Monday 3/16
, 10, 814, NA # Tuesday 3/17
, 11, 1339, 10 # Wednesday 3/18 at 3:00 pm
, 12, 3615, 22 # Thursday 3/19 at 4:00 pm
, 13, 5151, 29 # Friday 3/20
, 17, 12339, 99 # 3/24
, 18, 14776, 131 # 3/25
, 19, 15597, 192 # 3/26
, 20, 21873, 281 # 3/27
, 22, 33474, 776 # 3/29
, 24, 40900, 932 # 3/31
, 25, 56624, 1139 # 4/1
)
, area = "NYC", Country = "US")
identity <- function(x) x
cumulative <- function(x) cumsum(as.numeric(x))
nyc_daily_data <-
read_csv("https://raw.githubusercontent.com/nychealth/coronavirus-data/master/case-hosp-death.csv") %>%
mutate(DEATH_COUNT= ifelse(is.na(DEATH_COUNT), 0, DEATH_COUNT)) %>%
mutate_at(vars(NEW_COVID_CASE_COUNT:DEATH_COUNT), lst(identity, cumulative)) %>%
select(-contains("identity"))
NYC_reports <-
nyc_daily_data %>%
mutate(`Mortality Rate` = as.numeric(DEATH_COUNT_cumulative)/as.numeric(NEW_COVID_CASE_COUNT_cumulative)) %>%
mutate(`Death Percent Increase` = DEATH_COUNT_cumulative/lag(DEATH_COUNT_cumulative,1)-1) %>%
mutate(`Case Percent Increase` = NEW_COVID_CASE_COUNT_cumulative/lag(NEW_COVID_CASE_COUNT_cumulative,1)-1) %>%
mutate(DATE_OF_INTEREST = mdy(DATE_OF_INTEREST))
# 3.0 SCRAPE NYC DAILY SHEET ----
daily_stat_sheet <- "https://www1.nyc.gov/assets/doh/downloads/pdf/imm/covid-19-daily-data-summary.pdf"
total_nyc_cases <- "https://www1.nyc.gov/assets/doh/downloads/pdf/imm/covid-19-daily-data-summary-04052020-1.pdf"
total_nyc_deaths <- "https://www1.nyc.gov/assets/doh/downloads/pdf/imm/covid-19-daily-data-summary-deaths-04052020-1.pdf"
total_nyc_hospitalizations <- "https://www1.nyc.gov/assets/doh/downloads/pdf/imm/covid-19-daily-data-summary-hospitalizations-04042020-1.pdf"
# browseURL(daily_stat_sheet)
# Extract the table
# out <- extract_tables(daily_stat_sheet)
# parse table
# parsed_table <-
# out[[1]] %>%
# as_tibble() %>%
# select(-V2) %>%
# slice(-1) %>%
# setNames(c("Var","Value")) %>%
# mutate(Value = readr::parse_character(Value)) %>%
# separate(Value, into = c("value","percent"), sep = " ") %>%
# mutate(percent = readr::parse_number(percent)) %>%
# print(n=Inf)
# extracted_data <-
# parsed_table %>%
# filter(!is.na(value), Var!="- Unknown") %>%
# select(-percent) %>%
# mutate(Var = readr::parse_character(str_remove_all(Var, "-"))) %>%
# spread(Var, value) %>%
# mutate(Date = report_date) %>%
# select(
# `Date`
# , `Total`
# , Deaths
# , `Median Age (Range)`
# , `0 to 17`
# , `18 to 44`
# , `45 to 64`
# , `65 to 74`
# , `75 and over`
# , `Bronx`
# , `Brooklyn`
# , `Manhattan`
# , `Male`
# , `Female`
# , `Queens`
# , `Staten Island`
# ) %>%
# mutate_all(as.character)
# write_file_path <- paste0('data/nyc-daily-stat-sheets/nyc-daily-covid-stats-extracted-', format(Sys.Date(),"%Y-%m-%d"),".csv")
# CAGR_formula <- function(FV, PV, n = 1) {
# values <- ((FV/PV)^(1/n)-1)
# return(values)
# }
#
# final_data <-
# bind_rows(latest_file, extracted_data) %>%
# arrange(Date) %>%
# distinct(Date, .keep_all = T) %>%
# mutate(Date = as.Date(Date)) %>%
# mutate(days_elapsed = replace_na(as.numeric(Date - lag(Date, 1)),1)) %>%
# mutate(`Mortality Rate` = scales::percent(as.numeric(Deaths)/as.numeric(Total))) %>%
# mutate(`Death CAGR` = scales::percent(CAGR_formula(as.numeric(Deaths), lag(as.numeric(Deaths),1), n = days_elapsed))) %>%
# mutate(`Case CAGR` = scales::percent(CAGR_formula(as.numeric(Total), lag(as.numeric(Total),1), n = days_elapsed))) %>%
# select(Date, days_elapsed, Total, `Case CAGR`, Deaths, `Death CAGR`, `Mortality Rate`, everything())
#
#
# select(final_data, Date, days_elapsed, Total, `Case CAGR`, Deaths, `Death CAGR`, `Mortality Rate`)
#
# if(!file.exists(write_file_path)){
# message("Writing latest file to: ",write_file_path)
# write_csv(final_data, write_file_path)
# }
|
2ef285684ee2bdd2e8397d8b68179236613981a9
|
73fbcce9fc9b2eaa15098609a012dd40fc594198
|
/notes/arules.R
|
134a164610bfae7b6c441200b3b643626f2e26a4
|
[] |
no_license
|
jacolind/ship-sales
|
536385012b4c5833caa127950541100a45cc2f5c
|
91abc84a3cf18d65c7e11e72c8ea30ad4669e36f
|
refs/heads/master
| 2021-04-15T11:51:47.956925
| 2018-04-06T11:57:06
| 2018-04-06T11:57:06
| 126,536,875
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 298
|
r
|
arules.R
|
install.packages(arules)
library('arules')
data('Epub')
Epub
summary(Epub)
year <- strftime(as.POSIXlt(transactionInfo(Epub)[["TimeStamp"]]), "%Y")
table(year)
Epub2003 <- Epub[year == "2003"]
length(Epub2003)
image(Epub2003)
transactionInfo(Epub2003[size(Epub2003) > 20])
arinspect(Epub2003[1:5])
|
e178073b8b6de40d8a50ec22c4be65a7924c0e69
|
26aba2dfa619c6c6a10b77e7d38e1c313c5bf2f7
|
/man/Output_df-class.Rd
|
b15aad3804cbf68625f837e000643318d457cda4
|
[] |
no_license
|
Pablo-Lopez-Sfi/dependenciesMap
|
4655ea1a732412500dcccda7b7e433d4d666ec68
|
38370adb260bddfa724bc9d8ebf2e87fdc4f9cce
|
refs/heads/master
| 2022-11-16T22:19:49.544103
| 2020-06-26T12:18:22
| 2020-06-26T12:18:22
| 259,566,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,772
|
rd
|
Output_df-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dependencies_code.R
\docType{class}
\name{Output_df-class}
\alias{Output_df-class}
\alias{Output_df}
\title{Outputs in the data preparation program from data frames previously generated (can be used to modified a global variable)}
\description{
Outputs in the data preparation program from data frames previously generated (can be used to modified a global variable)
}
\section{Fields}{
\describe{
\item{\code{name}}{: name of the data frame (to be added by user)}
\item{\code{data}}{: data generated (NULL if omitted)}
\item{\code{process}}{: process where output is generated in the data preparation program (to be added by user)}
}}
\section{Methods}{
\describe{
\item{\code{f_add()}}{does nothing, just allows to keep track of the new data frame created as output
to be used when in a process, a new data frame is created and there is no need to return it
\subsection{Example}{\code{Output_df( name = 'MasterData', data = new_data, process = 'CoV' )$f_add()}}
}
\item{\code{f_return()}}{simply returns the data of the object :
By default: \code{return(data)}
\subsection{Example}{\code{MasterData <- left_join( MasterData, Adj_CoV_Result, by = c('DMDUNIT','LOC','DMDGROUP','STARTDATE') ) } is now replace by
( \code{new_data <- left_join( MasterData, Adj_CoV_Result, by = c('DMDUNIT','LOC','DMDGROUP','STARTDATE'))} )
\code{MasterData <- Output_df( name = 'MasterData', data = new_data, process = 'CoV' )$f_return()}}}
}}
|
c4b38a3950101b58580f886f620f6e952b179489
|
7acfddc8f6e83c086aff89440719ec4f21ebdfa4
|
/man/query_acs.Rd
|
1837c0cc87cddd83760cd5ac449103ff7c4685d1
|
[] |
no_license
|
crazybilly/fundRaising
|
f16cafc02d23fd1fd140ba586cb6940516d19292
|
42f4f4ae725f62e31a7e16c10f172fff801737ea
|
refs/heads/master
| 2021-07-29T20:03:49.287552
| 2021-07-26T14:08:32
| 2021-07-26T14:08:32
| 145,170,311
| 4
| 3
| null | 2019-07-11T13:57:23
| 2018-08-17T22:03:05
|
R
|
UTF-8
|
R
| false
| true
| 1,971
|
rd
|
query_acs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acs_screening.R
\name{query_acs}
\alias{query_acs}
\title{Obtain 5-Year American Community Survey Estimates}
\usage{
query_acs(
var = c("B19013_001E", "B25077_001E"),
year = NULL,
state,
county = NULL,
tract = NULL,
blkgrp = NULL,
key = NULL
)
}
\arguments{
\item{var}{Variables to query from the ACS. For a list of the available
variables and codes (for the 2017 ACS), see the
\href{https://api.census.gov/data/2017/acs/acs5/variables.html}{Official
Documentation}. Defaults to Median Household Income (B19013_00E) and Median
Home Value (Owner-Occupied Units) (B25077_001E). Supports groups.}
\item{year}{Four-digit year. Defaults to the most recent data, for 2017.}
\item{state}{Two-digit state FIPS code. Alternatively, \code{"us"} for
national-level statistics. Supports wildcard string (\code{"*"}).}
\item{county}{Three-digit county FIPS code. Supports wildcard string
(\code{"*"}).}
\item{tract}{Five-digit census tract FIPS code. Supports wildcard string
(\code{"*"}).}
\item{blkgrp}{One-digit blog group FIPS code.}
\item{key}{(optional) Developer key.}
}
\value{
Tibble of data points and FIPS codes, one line per valid input geography.
}
\description{
The U.S. Census Bureau has published 5-year esimates of
demographic data since 2009. The data is aggregated at the national, state,
county, census tract, and census block group levels.
This function queries the
\href{https://www.census.gov/data/developers/data-sets/acs-5year.html}{Census
Bureau API} based on FIPS codes for various geographies. Substituting a
wildcard character \code{"*"} instead of a FIPS code returns all values
within the parent geography (i.e. \code{tract = "*"} will return data for
all tracts within a county).
The API limits the number of queries for users who lack an API key. A key
can be obtained \href{https://api.census.gov/data/key_signup.html}{here}.
}
|
927dbadfdb6318875b9f05dea1189663a69c73c6
|
3b9563287bd7129a0d97d55a94d979799c959499
|
/man/setup_dap.Rd
|
c218ead2be0ef49e5d0b2e28e010844f56b06359
|
[] |
no_license
|
JoFAM/daprojects
|
008241b104eb1267992e24e73917b56490ae75d7
|
21c6972202bf7b83e14383767fd5a290b54cfcce
|
refs/heads/master
| 2021-05-09T01:46:00.807147
| 2018-01-29T10:57:28
| 2018-01-29T10:57:28
| 119,185,747
| 0
| 1
| null | 2018-01-28T15:36:17
| 2018-01-27T17:24:56
|
R
|
UTF-8
|
R
| false
| true
| 1,102
|
rd
|
setup_dap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setup_dap.R
\name{setup_dap}
\alias{setup_dap}
\title{Set up a project for data analysis.}
\usage{
setup_dap(path, desc = character(0), readme = TRUE, addexamples = FALSE,
...)
}
\arguments{
\item{path}{a character vector with the path where the project should be created.}
\item{desc}{a character vector with a short description of the project.
This will be used as title.}
\item{readme}{a logical value indicating whether or not the file README.md
should be created}
\item{addexamples}{a logical value indicating whether or not example
files should be copied into the new project.}
\item{...}{extra arguments captured from the project wizard.
currently ignored.}
}
\value{
NULL invisibly. This function is only called for its side effects.
}
\description{
This function sets up the project template for a simple data analysis.
It creates the directory structure, adds a file ProjectInfo.md and
sets up the main script.
}
\details{
This function is the binding in the dcf file that contains the project
definition.
}
|
c02230d841f6b4b9fc5a8c70e8d178c58ab84e9e
|
f020db9cb67886a11c1b8721b04af1ba22136ed5
|
/tests/testthat/test-purge.R
|
5c9a54d69b2766b51c4016a6af65de84a8fa7c13
|
[] |
no_license
|
slopp/renv
|
2b5ac42ae89682f6cdb022221b53530679484982
|
fd88fbe2d95a0f417a7fae4f188acdeb99691798
|
refs/heads/master
| 2020-04-20T22:14:33.890121
| 2019-07-04T06:53:31
| 2019-07-04T06:53:31
| 169,133,285
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 275
|
r
|
test-purge.R
|
context("Purge")
test_that("we can purge packages from the cache", {
renv_tests_scope("breakfast")
renv::init()
expect_true("breakfast" %in% basename(renv_cache_list()))
renv::purge("breakfast")
expect_false("breakfast" %in% basename(renv_cache_list()))
})
|
356e7c7c508b7dec5cbdb71f7dea5f08504d4ed9
|
a62777fae97b178f476b16941cb91af41c0ca44f
|
/R/get_network.R
|
af82a34b903067b2866ff926b748ef464ecfbed4
|
[] |
no_license
|
dmzonana/asnipe
|
6bbabb3017dea170d62fc06baad876665b54b293
|
76965eca05d4128c9c557ddea069495e96e83141
|
refs/heads/master
| 2020-05-25T14:56:25.650472
| 2016-09-13T14:45:37
| 2016-09-13T14:45:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,448
|
r
|
get_network.R
|
get_network <- function(association_data, data_format = "GBI", association_index = "SRI", identities = NULL, which_identities = NULL, times = NULL, locations = NULL, which_locations = NULL, start_time = NULL, end_time = NULL, classes = NULL, which_classes = NULL) {
#### CHECK INPUTS
if (is.null(association_data)) { stop("No association_data data!") }
if (length(dim(association_data)) != 2 & data_format=="GBI") { stop("Invalid dimensions for association_data") }
if (length(dim(association_data)) != 3 & data_format=="SP") { stop("Invalid dimensions for association_data") }
if ((length(identities) != ncol(association_data) & !is.null(identities)) == TRUE) { stop("Length of identities does not match number of individuals") }
if ((length(times) != nrow(association_data) & !is.null(times)) == TRUE) { stop("Length of times does not match number of groups") }
if ((length(locations) != nrow(association_data) & !is.null(locations)) == TRUE) { stop("Length of locations does not match number of groups") }
if ((length(classes) != ncol(association_data) & !is.null(classes)) == TRUE) { stop("Length of classes does not match number of individuals") }
if ((!is.null(which_identities) & is.null(identities)) == TRUE) { stop("Cannot apply which_identities without identities data") }
if ((!is.null(which_locations) & is.null(locations)) == TRUE) { stop("Cannot apply which_locations without locations data") }
if ((!is.null(start_time) & is.null(times)) == TRUE) { stop("Cannot apply start_time without times data") }
if ((!is.null(end_time) & is.null(times)) == TRUE) { stop("Cannot apply end_time without times data") }
if ((!is.null(which_classes) & is.null(classes)) == TRUE) { stop("Cannot apply which_class without classes data") }
if (!any(association_index %in% c("SRI","HWI"))) { stop("Unknown association_index") }
#### SUBSET THE DATA
# By identity
if (!is.null(which_identities)) {
if (data_format=="GBI") association_data <- association_data[,which(identities %in% which_identities)]
if (data_format=="SP") association_data <- association_data[,which(identities %in% which_identities),which(identities %in% which_identities)]
identities <- identities[which(identities %in% which_identities)]
}
# By time
if (!is.null(start_time) & is.null(end_time)) { end_time <- max(times) }
if (!is.null(end_time) & is.null(start_time)) { start_time <- min(times) }
if (!is.null(start_time) & !is.null(end_time)) {
subs <- which(times >= start_time & times <= end_time)
if (data_format=="GBI") association_data <- association_data[subs,]
if (data_format=="SP") association_data <- association_data[subs,,]
locations <- locations[subs]
times <- times[subs]
}
# By location
if (!is.null(which_locations)) {
subs <- which(locations %in% which_locations)
if (data_format=="GBI") association_data <- association_data[subs,]
if (data_format=="SP") association_data <- association_data[subs,,]
locations <- locations[subs]
times <- times[subs]
}
# By class
if (!is.null(which_classes)) {
if (data_format=="GBI") association_data <- association_data[,which(classes %in% which_classes)]
if (data_format=="SP") association_data <- association_data[,which(classes %in% which_classes),which(classes %in% which_classes)]
identities <- identities[which(classes %in% which_classes)]
}
#### GENERATE NETWORK
### Calculate Network
do.SR <- function(GroupBy,input,association_index){
jumps <- c(seq(0,ncol(input),50))
if (max(jumps) < ncol(input)) {
jumps <- c(jumps,ncol(input))
}
out <- matrix(nrow=0,ncol=1)
for (i in 1:(length(jumps)-1)) {
tmp <- input[ ,GroupBy] + input[,(jumps[i]+1):jumps[i+1]]
if (length(tmp) > nrow(input)) {
x <- colSums(tmp==2)
} else {
x <- sum(tmp==2)
}
if (length(tmp) > nrow(input)) {
yab <- colSums(tmp==1)
} else {
yab <- sum(tmp==1)
}
if (association_index == "SRI") {
out <- c(out, x / (x + yab))
} else if (association_index == "HWI") {
out <- c(out, x / (x + 0.5*yab))
}
}
out
}
do.SR2 <- function (i, a,association_index) {
# how many times 1 seen together with all others
x <- apply(a[,i,],2,sum)
# how many times 1 but not others in a sampling period and vice versa
n <- apply(a,1,rowSums)
n[n>0] <- 1
seen <- t(apply(n,1,function(x) x-n[i,]))
ya <- rowSums(seen<0)
yb <- rowSums(seen>0)
# how many times 1 and others seen but not together
seen <- t(apply(n,1,function(x) x+n[i,]))
yab <- rowSums(seen>1) - x
if (association_index == "SRI") {
out <- x / (x + ya + yb + yab)
} else if (association_index == "HWI") {
out <- x / (x + ya + yb + 0.5*yab)
}
return(out)
}
cat(paste("Generating ", ncol(association_data), " x ", ncol(association_data), " matrix\n"))
if (data_format=="GBI") fradj_sorted <- do.call("rbind",lapply(seq(1,ncol(association_data),1),FUN=do.SR,input=association_data, association_index))
if (data_format=="SP") fradj_sorted <- do.call("rbind",lapply(seq(1,ncol(association_data),1),FUN=do.SR2,a=association_data, association_index))
fradj_sorted[is.nan(fradj_sorted)] <- 0
diag(fradj_sorted) <- 0
if (!is.null(identities)) {
colnames(fradj_sorted) <- identities
rownames(fradj_sorted) <- identities
} else if (!is.null(colnames(association_data))) {
colnames(fradj_sorted) <- colnames(association_data)
rownames(fradj_sorted) <- colnames(association_data)
}
return(fradj_sorted)
}
|
2e3a23945d8ad20a88cd68ce9b1ed4a54167f8e6
|
cb93cf0799e3eedca6f9e720e09bb60e0f77ff10
|
/R/GenericTabularFileSet.R
|
f0e53d0fe3904c6fc658ffc2f4e214ed2f68d38b
|
[] |
no_license
|
HenrikBengtsson/R.filesets
|
254c37b4546e8280b9972d06840b918e12e0b4e9
|
17181ae1c84dbf7bad1214d37e6f133ed2deeba4
|
refs/heads/master
| 2023-01-08T23:58:09.708417
| 2022-07-21T09:52:18
| 2022-07-21T09:52:18
| 20,844,863
| 3
| 1
| null | 2018-04-03T22:12:45
| 2014-06-15T00:25:31
|
R
|
UTF-8
|
R
| false
| false
| 1,319
|
r
|
GenericTabularFileSet.R
|
###########################################################################/**
# @RdocClass GenericTabularFileSet
#
# @title "The GenericTabularFileSet class"
#
# \description{
# @classhierarchy
#
# An GenericTabularFileSet object represents a set of
# @see "GenericTabularFile"s.
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "GenericDataFileSet".}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author
#*/###########################################################################
setConstructorS3("GenericTabularFileSet", function(...) {
extend(GenericDataFileSet(...), "GenericTabularFileSet")
})
setMethodS3("extractMatrix", "GenericTabularFileSet", function(this, ..., drop=FALSE) {
args <- list(...)
nbrOfFiles <- length(this)
data <- NULL
for (kk in seq_len(nbrOfFiles)) {
dataFile <- this[[kk]]
argsKK <- c(list(dataFile), args)
dataKK <- do.call(extractMatrix, args = argsKK)
if (is.null(data)) {
naValue <- vector(storage.mode(dataKK), length=1)
data <- matrix(naValue, nrow=nrow(dataKK), ncol=nbrOfFiles)
colnames(data) <- getNames(this)
}
data[,kk] <- dataKK
# Not needed anymore
dataKK <- NULL
}
# Drop singelton dimensions?
if (drop) {
data <- drop(data)
}
data
})
|
d470cf2c009e033a553fbdc90e8f48c6adefdd9b
|
6bb82b6ec315060653cc675155321b8b53e85ed9
|
/03 R Syntax 1 (Data Typs and Strings)/03_4_Strings.R
|
7ffa7fce3391809b6363a6c0b5358c6ad28af4c9
|
[] |
no_license
|
inkyscope/R-for-Data-Analytics
|
d9d66a58fd67a5c4731c186010de281657bd82d6
|
fd9e04e4184e7edee6ee08ca238ac4c24a83a5c7
|
refs/heads/master
| 2023-01-31T12:43:00.186966
| 2020-12-11T07:11:54
| 2020-12-11T07:11:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,267
|
r
|
03_4_Strings.R
|
# 03-4 Handling Strings ----------------------------------------------
S <- "Welcome to Data Science!"
length(S)
nchar(S)
S1 <- "My name is"
S2 <- "Pilsung Kang"
paste(S1, S2)
paste(S1, S2, sep="-")
paste(S1, S2, sep="")
paste("The value of log10 is", log(10))
S1 <- c("My name is", "Your name is")
S2 <- c("Pilsung")
S3 <- c("Pilsung", "Younho", "Hakyeon")
paste(S1,S2)
paste(S1,S3)
stooges <- c("Dongmin", "Sangkyum", "Junhong")
paste(stooges, "loves", "R.")
paste(stooges, "loves", "R", collapse = ", and ")
substr("Data Science", 1, 4)
substr("Data Science", 6, 10)
stooges <- c("Dongmin", "Sangkyum", "Junhong")
substr(stooges, 1,3)
cities <- c("New York, NY", "Los Angeles, CA", "Peoria, IL")
substr(cities, nchar(cities)-1, nchar(cities))
path <- "C:/home/mike/data/trials.csv"
strsplit(path,"/")
path <- c("C:/home/mike/data/trials1.csv",
"C:/home/mike/data/errors2.txt",
"C:/home/mike/data/report3.doc")
strsplit(path,"/")
strsplit(path, "om")
strsplit(path, "[hm]")
strsplit(path, "i.e")
strsplit(path, "\\.")
strsplit(path, "r{2}")
strsplit(path, "[[:digit:]]")
tmpstring <- "Kim is stupid and Kang is stupid too"
sub("stupid", "smart", tmpstring)
gsub("stupid", "smart", tmpstring)
grep("mike", path)
grep("errors", path)
|
39c0e8287a256af2300a4b14f1cda5a4659892ff
|
6af844903b9d066581408450a1e743c1283468e5
|
/R/windowed_scalogram.R
|
1db8bdc4a183c3f73ac5b55ebc09bd3d0a672bd8
|
[] |
no_license
|
rbensua/wavScalogram
|
8ab5766639ec0154383b0ecc43858cedfe52019c
|
b811b3382de8190c0ed8249bcee04394a007ceda
|
refs/heads/master
| 2021-06-24T13:45:41.879344
| 2019-06-12T09:53:42
| 2019-06-12T09:53:42
| 136,003,978
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,507
|
r
|
windowed_scalogram.R
|
#' @title Windowed scalograms of a signal
#'
#' @description This function computes the normalized windowed scalograms of a signal for
#' the scales given. It is computed using time windows with radius \code{windowrad}
#' centered at a vector of central times with increment of time \code{delta_t}. It is
#' important to note that the notion of scalogram here is analogous to the spectrum of the
#' Fourier transform. It gives the contribution of each scale to the total energy of the
#' signal. For each scale \eqn{s} and central time \eqn{tc}, it is defined as the square
#' root of the integral of the squared modulus of the wavelet transform w.r.t the time
#' variable \eqn{t}, i.e.
#'
#' \deqn{WS_{windowrad}(tc,s):=
#' (\int_{tc-windowrad}^{tc+windowrad}|Wf(t,s)|^2 dt)^{1/2}.}{WS_{windowrad}(tc,s):=
#' (integral_{tc-windowrad}^{tc+windowrad}|Wf(t,s)|^2 dt)^{1/2}.}
#'
#' "Normalized" means that the windowed scalograms are divided by the square root of the
#' length of the respective time windows in order to be comparable between them.
#'
#'
#' @usage windowed_scalogram(signal,
#' dt = 1,
#' scales = NULL,
#' powerscales = TRUE,
#' windowrad = NULL,
#' delta_t = NULL,
#' wname = c("MORLET", "DOG", "PAUL", "HAAR", "HAAR2"),
#' wparam = NULL,
#' waverad = NULL,
#' border_effects = c("BE", "INNER", "PER", "SYM"),
#' energy_density = TRUE,
#' makefigure = TRUE,
#' time_values = NULL,
#' figureperiod = TRUE,
#' xlab = "Time",
#' ylab = NULL,
#' main = "Windowed Scalogram")
#'
#' @param signal A vector containing the signal whose windowed scalogram is wanted.
#' @param dt Numeric. The time step of the signal.
#' @param scales A vector containing the wavelet scales at wich the windowed scalograms
#' are computed. This can be either a vector with all the scales or, following Torrence
#' and Compo 1998, a vector of 3 elements with the minimum scale, the maximum scale and
#' the number of suboctaves per octave (in this case, \code{powerscales} must be TRUE in
#' order to construct power 2 scales using a base 2 logarithmic scale). If \code{scales}
#' is NULL, they are automatically constructed.
#' @param powerscales Logical. It must be TRUE (default) in these cases:
#' \itemize{
#' \item If \code{scales} are power 2 scales, i.e. they use a base 2 logarithmic scale.
#' \item If we want to construct power 2 scales automatically. In this case, \code{scales}
#' must be \code{NULL}.
#' \item If we want to construct power 2 scales from \code{scales}. In this case,
#' \code{length(scales)} must be 3.
#' }
#' @param windowrad Integer. Time radius for the windows, measured in \code{dt}. By
#' default, it is set to \eqn{ceiling(length(signal) / 20)}.
#' @param delta_t Integer. Increment of time for the construction of windows central
#' times, measured in \code{dt}. By default, it is set to
#' \eqn{ceiling(length(signal) / 256)}.
#' @param wname A string, equal to "MORLET", "DOG", "PAUL", "HAAR" or "HAAR2". The
#' difference between "HAAR" and "HAAR2" is that "HAAR2" is more accurate but slower.
#' @param wparam The corresponding nondimensional parameter for the wavelet function
#' (Morlet, DoG or Paul).
#' @param waverad Numeric. The radius of the wavelet used in the computations for the cone
#' of influence. If it is not specified, it is asumed to be \eqn{\sqrt{2}} for Morlet and DoG,
#' \eqn{1/\sqrt{2}} for Paul and 0.5 for Haar.
#' @param border_effects String, equal to "BE", "INNER", "PER" or "SYM", which indicates
#' how to manage the border effects which arise usually when a convolution is performed on
#' finite-lenght signals.
#' \itemize{
#' \item "BE": With border effects, padding time series with zeroes.
#' \item "INNER": Normalized inner scalogram with security margin adapted for each
#' different scale. Although there are no border effects, it is shown as a regular COI
#' the zone in which the length of \eqn{J(s)} (see Benítez et al. 2010) is smaller and
#' it has to be normalized.
#' \item "PER": With border effects, using boundary wavelets (periodization of the
#' original time series).
#' \item "SYM": With border effects, using a symmetric catenation of the original time
#' series.
#' }
#' @param energy_density Logical. If TRUE (default), divide the scalograms by the square
#' root of the scales for convert them into energy density.
#' @param makefigure Logical. If TRUE (default), a figure with the scalograms is plotted.
#' @param time_values A numerical vector of length \code{length(signal)} containing custom
#' time values for the figure. If NULL (default), it will be computed starting at 0.
#' @param figureperiod Logical. If TRUE (default), periods are used in the figure instead
#' of scales.
#' @param xlab A string giving a custom X axis label.
#' @param ylab A string giving a custom Y axis label. If NULL (default) the Y label is
#' either "Scale" or "Period" depending on the value of \code{figureperiod} if
#' \code{length(scales) > 1}, or "Windowed Scalogram" if \code{length(scales) == 1}.
#' @param main A string giving a custom main title for the figure.
#'
#' @return A list with the following fields:
#' \itemize{
#' \item \code{wsc}: A matrix of size \code{length(tcentral)} x \code{length(scales)}
#' containing the values of the windowed scalograms at each scale and at each time window.
#' \item \code{tcentral}: The vector of central times at which the windows are centered.
#' \item \code{scales}: The vector of the scales.
#' \item \code{windowrad}: Radius for the time windows, measured in \code{dt}.
#' \item \code{fourierfactor}: A factor for converting scales into periods.
#' \item \code{coi_maxscale}: A vector of length \code{length(tcentral)} containing the
#' values of the maximum scale from which there are border effects for the respective
#' central time.
#' }
#'
#' @importFrom graphics abline
#'
#' @examples
#' dt <- 0.1
#' time <- seq(0, 50, dt)
#' signal <- c(sin(pi * time), sin(pi * time / 2))
#' wscalog <- windowed_scalogram(signal = signal, dt = dt)
#'
#'
#' @section References:
#'
#' C. Torrence, G. P. Compo. A practical guide to wavelet analysis. B. Am. Meteorol. Soc.
#' 79 (1998), 61–78.
#'
#' V. J. Bolós, R. Benítez, R. Ferrer, R. Jammazi. The windowed scalogram difference: a
#' novel wavelet tool for comparing time series. Appl. Math. Comput., 312 (2017), 49-65.
#'
#' R. Benítez, V. J. Bolós, M. E. Ramírez. A wavelet-based tool for studying
#' non-periodicity. Comput. Math. Appl. 60 (2010), no. 3, 634-641.
#'
#' @export
#'
windowed_scalogram <-
function(signal,
dt = 1,
scales = NULL,
powerscales = TRUE,
windowrad = NULL,
delta_t = NULL,
wname = c("MORLET", "DOG", "PAUL", "HAAR", "HAAR2"),
wparam = NULL,
waverad = NULL,
border_effects = c("BE", "INNER", "PER", "SYM"),
energy_density = TRUE,
makefigure = TRUE,
time_values = NULL,
figureperiod = TRUE,
xlab = "Time",
ylab = NULL,
main = "Windowed Scalogram") {
# require(zoo)
# require(Matrix)
wname <- toupper(wname)
wname <- match.arg(wname)
if (is.null(waverad)) {
if ((wname == "MORLET") || (wname == "DOG")) {
waverad <- sqrt(2)
} else if (wname == "PAUL") {
waverad <- 1 / sqrt(2)
} else { # HAAR
waverad <- 0.5
}
}
border_effects <- toupper(border_effects)
border_effects <- match.arg(border_effects)
if (border_effects == "INNER") {
border_effects_cwt <- "BE"
} else {
border_effects_cwt <- border_effects
}
nt <- length(signal)
if (is.null(delta_t)) {
delta_t <- ceiling(nt / 256)
}
if (is.null(windowrad)) {
windowrad <- ceiling(nt / 20)
} else {
windowrad <- min(windowrad, floor((nt - 1) / 2))
}
fourierfactor <- fourier_factor(wname = wname, wparam = wparam)
if (is.null(scales)) {
scmin <- 2 / fourierfactor
scmax <- floor((nt - 2 * windowrad) / (2 * waverad))
if (powerscales) {
scales <- pow2scales(c(scmin, scmax, ceiling(256 / log2(scmax / scmin))))
} else {
scales <- seq(scmin, scmax, by = (scmax - scmin) / 256)
}
scalesdt <- scales * dt
} else {
if (powerscales && length(scales) == 3) {
scales <- pow2scales(scales)
} else {
if (is.unsorted(scales)) {
warning("Scales were not sorted.")
scales <- sort(scales)
}
}
scalesdt <- scales
scales <- scales / dt
}
ns <- length(scales)
cwt <- cwt_wst(signal = signal,
dt = dt,
scales = scalesdt,
powerscales = FALSE,
wname = wname,
wparam = wparam,
waverad = waverad,
border_effects = border_effects_cwt,
makefigure = FALSE)
coefs <- cwt$coefs
if (border_effects == "INNER") {
wrs <- ceiling(waverad * scales)
tcentral_ini <- max(1 + windowrad, 1 + wrs[1] - windowrad)
tcentral_end <- min(nt - windowrad, nt - wrs[1] + windowrad)
if (tcentral_ini > tcentral_end) {
stop("We need a larger signal")
}
tcentral <- seq(from = tcentral_ini, to = tcentral_end, by = delta_t)
ntcentral <- length(tcentral)
wsc <- matrix(NA, nrow = ntcentral, ncol = ns)
abscoefs2 <- matrix(abs(coefs) ^ 2, nrow = nt, ncol = ns)
# Regular version
for (i in 1:ntcentral) {
for (j in 1:ns) {
t_ini <- max(tcentral[i] - windowrad, 1 + wrs[j])
t_end <- min(tcentral[i] + windowrad, nt - wrs[j])
if (t_ini <= t_end) {
wsc[i, j] <- sqrt(abs(sum(abscoefs2[t_ini:t_end, j]))) # abs: sometimes wsc is negative due to numerical errors
wsc[i, j] <- wsc[i, j] / sqrt(t_end - t_ini + 1) # Normalization
}
}
}
wsc <- as.matrix(wsc)
} else {
tcentral_ini <- 1 + windowrad
tcentral_end <- nt - windowrad
tcentral <- seq(from = tcentral_ini, to = tcentral_end, by = delta_t)
ntcentral <- length(tcentral)
wsc <- matrix(0, nrow = ntcentral, ncol = ns)
abscoefs2 <- matrix(abs(coefs) ^ 2, nrow = nt, ncol = ns)
if (delta_t < windowrad) { # Fast version
for (j in 1:ns) {
wsc[1, j] <- sum(abscoefs2[1:(1 + 2 * windowrad), j])
for (i in 2:ntcentral) {
wsc[i, j] <- wsc[i-1, j] - sum(abscoefs2[(tcentral[i] - windowrad - delta_t):(tcentral[i] - windowrad - 1), j]) + sum(abscoefs2[(tcentral[i] + windowrad - delta_t + 1):(tcentral[i] + windowrad), j])
}
}
} else { # Regular version
for (i in 1:ntcentral) {
for (j in 1:ns) {
wsc[i, j] <- sum(abscoefs2[(tcentral[i] - windowrad):(tcentral[i] + windowrad), j])
}
}
}
wsc <- as.matrix(sqrt(abs(wsc))) # abs: sometimes wsc is negative due to numerical errors
wsc <- wsc / sqrt(2 * windowrad + 1) # Normalization
}
# COI
coi_maxscale <- numeric(ntcentral)
for (i in 1:ntcentral) {
coi_maxscale[i] <- dt * min(tcentral[i] - windowrad - 1, nt - tcentral[i] - windowrad) / waverad
}
tcentraldt <- tcentral * dt
# Energy density
if (energy_density) {
wsc <- t(t(wsc) / sqrt(scalesdt))
}
# Make figure
if (makefigure) {
if (figureperiod) {
Y <- fourierfactor * scalesdt
coi <- fourierfactor * coi_maxscale
if (is.null(ylab)) ylab <- "Period"
} else {
Y <- scalesdt
coi <- coi_maxscale
if (is.null(ylab)) ylab <- "Scale"
}
if (is.null(time_values)) {
X <- tcentraldt
} else {
if (length(time_values) != nt) {
warning("Invalid length of time_values vector. Changing to default.")
X <- tcentraldt
} else {
X <- time_values[tcentral]
}
}
if (length(Y) > 1) {
wavPlot(
Z = wsc,
X = X,
Y = Y,
Ylog = powerscales,
coi = coi,
Xname = xlab,
Yname = ylab,
Zname = main
)
} else {
if (is.null(ylab)) ylab <- "Windowed Scalogram"
plot(X, wsc, type = "l", xlab = xlab, ylab = ylab, main = main, xaxt = "n")
axis(side = 1, at = X[1 + floor((0:8) * (ntcentral - 1) / 8)])
abline(v = range(X[(coi > Y)]), lty = 2)
}
}
return(list(
wsc = wsc,
tcentral = tcentraldt,
scales = scalesdt,
windowrad = windowrad,
fourierfactor = fourierfactor,
coi_maxscale = as.numeric(coi_maxscale)
))
}
|
11469b63781763b45a00f2cb1f1201d6d87a70dc
|
e53f8d45dac571308c04cbd2f06e04c6ce332696
|
/code/animateOptimization.R
|
0c2eaeafffa97479e2b2144e0527df59aa29a0bd
|
[] |
no_license
|
jeanlucj/BO_Budgets
|
beb6963a409be387c4d11fc469ee2d6daf992dce
|
7396b22b55c4ccae5df4abffd8acd1eceda7978c
|
refs/heads/main
| 2023-08-06T06:40:15.752144
| 2021-10-07T18:44:40
| 2021-10-07T18:44:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,409
|
r
|
animateOptimization.R
|
##################################################################################################
# A script with animation examples using R
# by: Isaac J. Faber, examples derived from https://yihui.name/animation/examples/
# WARNING!!!! without dependcies installed you can only render to HTML
# Make sure you install FFmpeg (for videos) and Graphics Magick (for gifs) on the terminal (here in RStuido) first.
# Use these commands for FFmepg
# -----------------------------------------------
# sudo add-apt-repository ppa:jonathonf/ffmpeg-4
# sudo apt-get update
# sudo apt-get install ffmpeg
# -----------------------------------------------
# Use these commands for Graphics Magick
# -----------------------------------------------
# sudo apt-get install python-software-properties
# sudo apt-get install software-properties-common
# sudo add-apt-repository ppa:rwky/graphicsmagick
# sudo apt-get update
# sudo apt-get install graphicsmagick
#################################################################################################
if (FALSE){
# The package is written by Yihui with a companion website here: https://yihui.name/animation/
library(animation)
# Some helper libraries for data munging
library(tidyverse)
# check the animation options with
ani.options()
# set some of the options by specifiying the commands
ani.options(interval = 0.5,
nmax = 200)
## The good animation as a simple GIF
saveGIF({
for (whichOpt in 1:6){
for (nSim in 36 + 0:30 * 50){
plotUpto(whichOpt=whichOpt, nSim=nSim, degree=0)
}
}
},
#close the animation builder
movie.name = '~/optimizationDeg0.gif'
)
## The good animation as a simple GIF
saveGIF({
end_year = 2017 #last year of the plot
num_years = 30 #number of years in the animation
#create a loop the does the subsetting
for(i in 1:num_years){
gdp_subset <- gdp_filtered %>% filter(year <= end_year-(num_years-i))
#write the plot with a subset
p<-ggplot(gdp_subset,aes(x=year,y=gdp,group=`Country Name`,colour=`Country Name`)) +
geom_line(size = 2) +
scale_x_continuous(breaks=c(1960,1980,2000,2017)) +
ylim(0,(2*10^13))+
xlab('') +
ylab('Gross Domestic Product') +
theme_bw() +
theme(legend.title=element_blank())
print(p)
}#close the for loop
#close the animation builder
}, convert = 'gm convert', movie.name = 'good_animation.gif')
}#END FALSE
# ptCol: what colors to have HiMn, HiCt, HiPredGain
# addPoints: Three column matrix with x, y, color
# addLegend: I think it's generally ugly so would want to remove
# Modifying this in a way that doesn't make sense for the general package
plotLoessPred_ <- function(resultMat, nSim=nrow(resultMat),
xlim=NULL, ylim=NULL,
budg1=1, budg2=2, binMeanContrast=3,
plotMn=T, plotHiMn=T, plotHiCt=F, plotHiPredGain=F,
ptCol=2:4, addPoints=NULL, addLegend=F, giveRange=T,
degree=1){
require(hexbin)
require(grid)
lpc <- BreedSimCost::loessPredCount(resultMat=resultMat, nSim=nSim,
xlim=xlim, ylim=ylim,
budg1=budg1, budg2=budg2,
degree=degree)
rangeTitle <- " Simulations"
if (giveRange) rangeTitle <- paste0(" Sims. Range: ", paste(round(range(lpc$binMean), 1), collapse=" to "))
prefTitle <- paste0(rep(" ", 4 - ceiling(log10(nSim))), collapse="")
main <- paste0(prefTitle, nSim, rangeTitle)
if (plotMn){
bmc <- binMeanContrast
binRange <- diff(range(lpc$binMean))^bmc
meanAsCount <- round(99*(lpc$binMean - min(lpc$binMean))^bmc / binRange) + 1
lpc$bins@count <- meanAsCount
}
p <- hexbin::gplot.hexbin(lpc$bins, main=main,
legend=ifelse(addLegend & !plotMn, 1, 0))
pushHexport(p$plot.vp)
if (plotHiMn){
grid::grid.points(lpc$hiMeanXY[1], lpc$hiMeanXY[2], gp=gpar(col=ptCol[1]),
pch=16, size = unit(1, "char"))
}
if (plotHiCt){
grid::grid.points(lpc$hiCtXY[1], lpc$hiCtXY[2], gp=gpar(col=ptCol[2]),
pch=16, size = unit(1, "char"))
}
if (plotHiPredGain){
grid::grid.points(lpc$hiPredXY[1], lpc$hiPredXY[2], gp=gpar(col=ptCol[3]),
pch=16, size = unit(1, "char"))
}
upViewport()
return(lpc)
}#END plotLoessPred_
|
1d0d31b4ef766eda514e790f17523d2abf6893c7
|
7eb128f9b7899c33d4854009edbd38dd566cba72
|
/R Tutorials/data mining/arules.R
|
8127182aa92cd017e07ae20bc25a6d3a815c76c2
|
[] |
no_license
|
chengjun/Research
|
1149add090ec563f544c4b5a886c01b1392a25d4
|
c01e3d2eac2bca74671abb9cd63e1b06e5566fc8
|
refs/heads/master
| 2021-06-15T16:35:15.005107
| 2019-08-06T09:03:29
| 2019-08-06T09:03:29
| 11,498,113
| 4
| 6
| null | 2021-04-15T09:27:11
| 2013-07-18T08:36:12
|
Mathematica
|
GB18030
|
R
| false
| false
| 3,354
|
r
|
arules.R
|
library(arules)
data("AdultUCI"); names(AdultUCI); dim(AdultUCI)
AdultUCI[["fnlwgt"]] <- NULL
AdultUCI[["education-num"]] <- NULL
AdultUCI[[ "age"]] <- ordered(cut(AdultUCI[[ "age"]], c(15,25,45,65,100)),
labels = c("Young", "Middle-aged", "Senior", "Old"))
AdultUCI[[ "hours-per-week"]] <- ordered(cut(AdultUCI[[ "hours-per-week"]],
c(0,25,40,60,168)),
labels = c("Part-time", "Full-time", "Over-time", "Workaholic"))
AdultUCI[[ "capital-gain"]] <- ordered(cut(AdultUCI[[ "capital-gain"]],
c(-Inf,0,median(AdultUCI[[ "capital-gain"]][AdultUCI[[ "capital-gain"]]>0]),Inf)),
labels = c("None", "Low", "High"))
AdultUCI[[ "capital-loss"]] <- ordered(cut(AdultUCI[[ "capital-loss"]],
c(-Inf,0,
median(AdultUCI[[ "capital-loss"]][AdultUCI[[ "capital-loss"]]>0]),Inf)),
labels = c("none", "low", "high"))
Adult <- as(AdultUCI, "transactions")
## data("Adult") # Actually, it has been stored in arules library
nf <- layout(matrix(c(1,1,1,1), 2, 2, byrow = TRUE), respect = TRUE)
layout.show(nf)
itemFrequencyPlot(Adult, support = 0.1, cex.names=0.8)
## Mine association rules.
rules <- apriori(Adult,
parameter = list(supp = 0.5, conf = 0.9,
target = "rules"))
summary(rules)
inspect(rules)
#####################################################################
'arules: Mining association rules'
#####################################################################
library(arules)
library(arulesViz)
dtmm<- as(as.matrix(dt[,2:6]), "transactions")
rules <- apriori(dtmm, parameter = list(supp = 0.5, conf = 0.9, minlen=1, target = "rules"))
itemsets<- apriori(dtmm,parameter = list(supp = 0.5, minlen=1,target = "frequent itemsets"))
# plot itemsets
it = as(itemsets, "data.frame")
subset2<-itemsets[size(itemsets)>1]
subset2<-sort(subset2) # [1:100]
subset3<-itemsets[size(itemsets)>2]
par(mfrow=c(1,2))
plot(subset2, method="graph",control=list(main="至少包含两个词语的前100个项集"))
plot(subset3, method="graph",control=list(main="至少包含三个词语的所有项集"))
# plot items
png(paste("d:/chengjun/honglou/arulesViz_3", ".png", sep = ''),
width=10, height=10,
units="in", res=700,
family="MOESung-Regular",type="cairo")
plot(rules, method="graph", control=list(type="items")) # "items" or "itemsets"
dev.off()
# list(lhs(rules) , decode=T) [[1]]
# list(rhs(rules) , decode=T) [[1]]
parseAssociationItems = function(x, y){
x1 = labels(x(y))$elements # items
x1 = gsub("{", "", x1, , fixed="TRUE")
x1 = gsub("}", "", x1, , fixed="TRUE")
x1 = strsplit(x1, ",")
for (i in 1:length(x1)) x1[[i]] = ifelse(length(x1[[i]]) >= 2, 0, x1[[i]])
return (x1)
}
lhss = unlist(parseAssociationItems(lhs, rules))
rhss = unlist(parseAssociationItems(rhs, rules))
uniItems = labels(rhs(rules))$items
ru = as(rules, "data.frame")
edgelist = data.frame(lhss, rhss, ru)
edgelist = subset(edgelist, edgelist$lhss%in%uniItems & edgelist$rhss%in%uniItems)
g <- graph.data.frame(edgelist, directed=TRUE)
plot(g)
|
4fe4c4e00ee651d3be54fb08407aa1afbd1f661e
|
d008d74a9c473ca61b96923409811ccb47b96406
|
/R/utils.R
|
2371e74fed432b5ba895a9a19028031e0acee4fa
|
[] |
no_license
|
AustralianAntarcticDivision/raadtools
|
0246d25a1480888780aa23ea27c1f9985bdaafa0
|
d8cc9c553f52e4b5a14c7f2fd738d5bc1318c304
|
refs/heads/main
| 2023-07-24T23:03:01.099186
| 2023-07-07T01:28:52
| 2023-07-07T01:28:52
| 34,773,324
| 19
| 5
| null | 2023-03-14T10:06:42
| 2015-04-29T05:00:19
|
HTML
|
UTF-8
|
R
| false
| false
| 5,098
|
r
|
utils.R
|
nc_rawdata <- function(x, var) {
nc <- ncdf4::nc_open(x)
on.exit(ncdf4::nc_close(nc))
ncdf4::ncvar_get(nc, var)
}
xrange <- function(x) c(xmin(x), xmax(x))
yrange <- function(x) c(ymin(x), ymax(x))
update <- function() {
cat('\ndevtools::install_github("AustralianAntarcticDivision/raadtools")\n\n')
}
set_utc_format <- function(x) {
attr(x, "tz") <- "UTC"
x
}
## internal rotate to match old behaviour
## https://r-forge.r-project.org/scm/viewvc.php/pkg/raster/R/rotate.R?root=raster&r1=2782&r2=2981
#' @importFrom raster merge
.rotate <- function(x, ...) {
e <- extent(x)
xrange <- e@xmax - e@xmin
inverse <- FALSE
if (xrange < 350 | xrange > 370 | e@xmin < -10 | e@xmax > 370) {
if (xrange < 350 | xrange > 370 | e@xmin < -190 | e@xmax > 190) {
warning('this does not look like an appropriate object for this function')
} else {
inverse <- TRUE
}
}
hx <- e@xmin + xrange / 2
r1 <- crop(x, extent(e@xmin, hx, e@ymin, e@ymax))
r2 <- crop(x, extent(hx, e@xmax, e@ymin, e@ymax))
if (inverse) {
r1@extent@xmin <- r2@extent@xmax
r1@extent@xmax <- r1@extent@xmin + 0.5 * xrange
} else {
r2@extent@xmin <- r2@extent@xmin - xrange
r2@extent@xmax <- r2@extent@xmax - xrange
}
ln <- names(x)
out <- merge(r1, r2, overlap=FALSE, ...)
names(out) <- names(x)
out@z <- x@z
# suggested by Mike Sumner:
p <- projection(out)
if (length(grep("\\+over", p)) > 0) {
projection(out) <- gsub("[[:space:]]\\+over", "", p)
}
return(out)
}
## shared stuff
## datadir
## normalize input dates - need index and value
## private, but common
## dims, projection, bbox
## files
.processFiles <- function(dt, f, tr) {
findex <- .processDates(dt, f$date, tr)
f[findex, ]
}
# .fastNCvar <- function(x, varname) {
# require(ncdf4)
# ncvar_get(nc_open(x), varname)
# }
.expandFileDateList <- function(x) {
vl <- vector("list", length(x))
for (i in seq_along(x)) {
b <- brick(x[i], quick = TRUE)
dates <- timedateFrom(getZ(b))
vl[[i]] <- data.frame(file = rep(x[i], length(dates)), date = dates, band = seq_along(dates),
stringsAsFactors = FALSE)
}
do.call("rbind", vl)
}
.valiDates <- function(x, allOK = TRUE) {
xs <- timedateFrom(x)
bad <- is.na(xs)
if (all(bad)) stop("no input dates are valid")
if (any(bad)) {
notOK <- "not all input dates are valid"
if (allOK) stop(notOK) else warning(notOK)
}
xs[!bad]
}
.sortDates <- function(x, resortOK = FALSE) {
ord <- order(x)
if (any(diff(ord) < 0)) {
sortOK <- "dates out of order and will be sorted"
if (resortOK) warning(sortOK) else stop(sortOK)
x <- x[ord]
}
x
}
.indexDates <- function(xdate, filedate) {
# windex <- integer(length(xdate))
# for (i in seq_along(xdate)) {
# windex[i] <- which.min(abs(xdate[i] - filedate))
# }
windex <- findInterval(xdate, filedate)
windex[windex < 1] <- 1
windex[windex > length(filedate)] <- length(filedate)
windex
}
.dedupe <- function(index, date, removeDupes = TRUE) {
nondupes <- !duplicated(index)
if (sum(nondupes) < length(index)) {
if (removeDupes) warning("duplicated dates will be dropped") else stop("duplicated dates not allowed")
index <- index[nondupes]
date <- date[nondupes]
}
list(index = index, date = date)
}
.matchFiles <- function(querydate, refdate, index, daytest = 7) {
##
deltatime <- abs(difftime(querydate, refdate, units = "days"))
deltatest <- deltatime > daytest
if (all(deltatest)) {
message(sprintf("\nnearest available date is %s", as.Date(refdate)))
stop(sprintf("no data file within %.1f days of %s", daytest, format(querydate)))
}
if (any(deltatest)) {
warning(sprintf("%i input dates have no corresponding data file within %f days of available files", sum(deltatest), daytest))
index <- index[!deltatest]
}
index
}
.processDates <- function(qdate, fdate, timeres) {
## checks on dates, we drop any that are NA
qdate <- .valiDates(qdate, allOK = FALSE)
## sort dates if need be
qdate <- .sortDates(qdate, resortOK = TRUE)
## mapping of files/dates, so we can process time series
findex <- .indexDates(qdate, fdate)
## check for duplicates
dedupedates <- .dedupe(findex, qdate, removeDupes = TRUE)
findex <- dedupedates$index
date <- dedupedates$date
.matchFiles(date, fdate[findex], findex,
daytest = switch(timeres, "4hourly" = 1/6, "12hourly" = 1/2, "3hourly" = 1/8, "6hourly" = 0.25, daily = 1.5, weekly = 4, monthly = 15, weekly3 = 26, "8daily" = 5, "8D" = 5))
}
##' Stable conversion to POSIXct from character and Date
##'
##' Conversion to POSIXct ensuring no local time zone applied. Currently supported is character, Date and
##' anything understood by \code{\link[base]{as.POSIXct}}.
##'
##' @param x input date-time stamp, character, Date or other supported type.
##' @param \dots ignored
##' @return the vector \code{x} converted (if necessary) to \code{POSIXct}
##' @export
timedateFrom <- function(x, ...) {
as.POSIXct(x, tz = "UTC", ...)
}
|
9278e60377f6fd8091f39d2000a8919b52190d54
|
9326d857c238ff56f993437fb44a5c90961d0753
|
/R/radio_button_input.R
|
7103864e9cd266e3765158f8505f39511ae41e7f
|
[] |
no_license
|
moj-analytical-services/shinyGovstyle
|
e1e9b4062710b229f269f9b0bb58c1398383f7e1
|
a033342e971b9f090c06b6e17b82b20d27dce50c
|
refs/heads/master
| 2023-07-11T05:45:21.430131
| 2022-02-22T10:36:38
| 2022-02-22T10:36:38
| 192,864,104
| 34
| 4
| null | 2022-02-07T12:41:32
| 2019-06-20T06:41:58
|
CSS
|
UTF-8
|
R
| false
| false
| 8,786
|
r
|
radio_button_input.R
|
#' Radio Button Function
#'
#' This function create radio buttons
#' @param inputId The \code{input} slot that will be used to access the value.
#' @param label Input label.
#' @param choices List of values to select from (if elements of the list are
#' named then that name rather than the value is displayed to the user)
#' @param selected The initially selected value.
#' @param inline If you want the radio inline or not, Default is FALSE
#' @param small If you want the smaller versions of radio buttons, Default
#' is FALSE
#' @param choiceNames,choiceValues Same as in
#' \code{\link[shiny]{checkboxGroupInput}}. List of names and values,
#' respectively, that are displayed to the user in the app and correspond to
#' the each choice (for this reason they must have the same length). If either
#' of these arguments is provided, then the other must be provided and choices
#' must not be provided. The advantage of using both of these over a named list
#' for choices is that choiceNames allows any type of UI object to be passed
#' through (tag objects, icons, HTML code, ...), instead of just simple text.
#' @param hint_label Additional hint text you may want to display below the
#' label. Defaults to NULL
#' @param error Whenever you want to include error handle on the component.
#' @param error_message If you want a default error message.
#' @param custom_class If you want to add additional classes to the radio
#' buttons
#' @return radio buttons html shiny object
#' @keywords radiobuttons
#' @export
#' @examples
#' if (interactive()) {
#'
#' ui <- fluidPage(
#' # Required for error handling function
#' shinyjs::useShinyjs(),
#' shinyGovstyle::header(
#' main_text = "Example",
#' secondary_text = "User Examples",
#' logo="shinyGovstyle/images/moj_logo.png"),
#' shinyGovstyle::banner(
#' inputId = "banner", type = "beta", 'This is a new service'),
#' shinyGovstyle::gov_layout(size = "two-thirds",
#' #Simple radio
#' shinyGovstyle::radio_button_Input(
#' inputId = "radio1",
#' choices = c("Yes", "No", "Maybe"),
#' label = "Choice option"),
#' # Error radio
#' shinyGovstyle::radio_button_Input(
#' inputId = "radio2",
#' choices = c("Yes", "No", "Maybe"),
#' label = "Choice option",
#' hint_label = "Select the best fit",
#' inline = TRUE,
#' error = TRUE,
#' error_message = "Select one"),
#' # Button to trigger error
#' shinyGovstyle::button_Input(inputId = "submit", label = "Submit")
#' ),
#' shinyGovstyle::footer(full = TRUE)
#' )
#'
#' server <- function(input, output, session) {
#' #Trigger error on blank submit of eventId2
#' observeEvent(input$submit, {
#' if (is.null(input$radio2)){
#' shinyGovstyle::error_on(inputId = "radio2")
#' } else {
#' shinyGovstyle::error_off(
#' inputId = "radio2")
#' }
#' })
#' }
#' shinyApp(ui = ui, server = server)
#' }
radio_button_Input <- function (inputId, label, choices = NULL,
selected = NULL, inline = FALSE, small = FALSE,
choiceNames = NULL, choiceValues = NULL,
hint_label = NULL, error = FALSE,
error_message = NULL, custom_class = ""){
args <- normalizeChoicesArgs2(choices, choiceNames, choiceValues)
selected <- shiny::restoreInput(id = inputId, default = selected)
# selected <- if (is.null(selected))
# args$choiceValues[[1]]
# else
selected <- as.character(selected)
if (length(selected) > 1)
stop("The 'selected' argument must be of length 1")
options <- generateOptions2(inputId, selected, inline, small, "radio",
args$choiceNames, args$choiceValues)
divClass <- paste("govuk-form-group govuk-radios", custom_class)
govRadio <- shiny::tags$div(id = inputId, class = divClass,
shiny::tags$div(class="govuk-form-group", id=paste0(inputId,"div"),
controlLabel2(inputId, label),
shiny::tags$div(hint_label ,class="govuk-hint"),
if (error == TRUE){
shinyjs::hidden(
shiny::tags$p(error_message,
class="govuk-error-message",
id= paste0(inputId, "error"),
shiny::tags$span("Error:",
class="govuk-visually-hidden")
)
)
},
options))
attachDependency(govRadio, "radio")
}
controlLabel2 <- function(controlName, label) {
label %AND% htmltools::tags$label(class = "govuk-label",
`for` = controlName, label)
}
generateOptions2 <- function (inputId, selected, inline, small,
type = "checkbox", choiceNames,
choiceValues,
session = shiny::getDefaultReactiveDomain()){
options <- mapply(choiceValues, choiceNames, FUN = function(value, name) {
inputTag <- shiny::tags$input(type = type, name = inputId,
value = value, class = "govuk-radios__input")
if (is.null(selected) == FALSE & value %in% selected)
{inputTag$attribs$checked <- "checked"}
pd <- processDeps2(name, session)
shiny::tags$div(class = "govuk-radios__item",
shiny::tags$label(inputTag, shiny::tags$span(
pd$html,
pd$deps,
class = "govuk-label govuk-radios__label")))
},
SIMPLIFY = FALSE, USE.NAMES = FALSE)
class_build <- "govuk-radios"
if (inline){
class_build <- paste(class_build, "govuk-radios--inline")
}
if (small){
class_build <- paste(class_build, "govuk-radios--small")
}
shiny::div(class = class_build, options)
}
`%AND%` <- function (x, y) {
if (!is.null(x) && !anyNA(x))
if (!is.null(y) && !anyNA(y))
return(y)
return(NULL)
}
processDeps2 <- function (tags, session)
{
ui <- htmltools::takeSingletons(
tags, session$singletons, desingleton = FALSE)$ui
ui <- htmltools::surroundSingletons(ui)
dependencies <- lapply(
htmltools::resolveDependencies(htmltools::findDependencies(ui)),
shiny::createWebDependency
)
names(dependencies) <- NULL
list(html = htmltools::doRenderTags(ui), deps = dependencies)
}
normalizeChoicesArgs2 <- function (choices, choiceNames,
choiceValues, mustExist = TRUE)
{
if (is.null(choices)) {
if (is.null(choiceNames) || is.null(choiceValues)) {
if (mustExist) {
stop("Please specify a non-empty vector for `choices` (or, ",
"alternatively, for both `choiceNames` AND `choiceValues`).")
}
else {
if (is.null(choiceNames) && is.null(choiceValues)) {
return(list(choiceNames = NULL, choiceValues = NULL))
}
else {
stop("One of `choiceNames` or `choiceValues` was set to ",
"NULL, but either both or none should be NULL.")
}
}
}
if (length(choiceNames) != length(choiceValues)) {
stop("`choiceNames` and `choiceValues` must have the same length.")
}
if (anyNamed2(choiceNames) || anyNamed2(choiceValues)) {
stop("`choiceNames` and `choiceValues` must not be named.")
}
}
else {
if (!is.null(choiceNames) || !is.null(choiceValues)) {
warning("Using `choices` argument; ignoring `choiceNames`
and `choiceValues`.")
}
choices <- choicesWithNames2(choices)
choiceNames <- names(choices)
choiceValues <- unname(choices)
}
return(list(choiceNames = as.list(choiceNames),
choiceValues = as.list(as.character(choiceValues))))
}
choicesWithNames2 <- function (choices)
{
listify <- function(obj) {
makeNamed <- function(x) {
if (is.null(names(x)))
names(x) <- character(length(x))
x
}
res <- lapply(obj, function(val) {
if (is.list(val))
listify(val)
else if (length(val) == 1 && is.null(names(val)))
as.character(val)
else makeNamed(as.list(val))
})
makeNamed(res)
}
choices <- listify(choices)
if (length(choices) == 0)
return(choices)
choices <- mapply(choices, names(choices), FUN = function(choice,
name) {
if (!is.list(choice))
return(choice)
if (name == "")
stop("All sub-lists in \"choices\" must be named.")
choicesWithNames2(choice)
}, SIMPLIFY = FALSE)
missing <- names(choices) == ""
names(choices)[missing] <- as.character(choices)[missing]
choices
}
anyNamed2 <- function (x)
{
if (length(x) == 0)
return(FALSE)
nms <- names(x)
if (is.null(nms))
return(FALSE)
any(nzchar(nms))
}
|
1be20bb31febaeae61fa9e024a2786366bcaba0d
|
f6c7dd6052f342a919c4f26731ebaace434efb03
|
/man/h5DF.Rd
|
52d2e0e4ff5d4f96d324ae2189db04dad8c28862
|
[
"MIT"
] |
permissive
|
ActiveAnalytics/activeH5
|
9ec5aac0f05a87b8302f013289c4d7ae056bc38c
|
d43bfbb6110a9be5698f3e63eae55023f4ba3fd5
|
refs/heads/master
| 2021-01-10T20:29:00.289895
| 2014-11-30T23:07:49
| 2014-11-30T23:07:49
| 23,437,558
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,192
|
rd
|
h5DF.Rd
|
\name{h5DF}
\alias{h5DF}
\title{Specification of h5DF reference class}
\usage{
h5DF(...)
}
\description{
The h5DF object is an interface to a special type of HDF5
file that holds the data for large data frames in chunks.
Data frames are converted to numeric matrices, characters
are converted to factors and factors to numeric data. It
is designed to allow fast access to chunks of data from
the HDF5 file.
}
\section{Methods in the h5DF object}{
\itemize{ \item h5DF$new() creates a new h5DF object.
Users should use the functions ?newH5DF() and
?openH5DF(). \item h5DF$createH5DF(DF, filePath,
chunkSize, ...) populates a h5DF object with data from a
data frame or csv file containing a data frame DF. It
writes the meta data to the h5DF object and the dataset
to a h5 file. Users should use the functions ?newH5DF()
and ?openH5DF(). \itemize{ \item DF a data frame or path
to a csv file containing a data frame \item filePath path
to a file where the h5 object will be written \item
chunkSize the number of rows that will be written per
chunk \item ... arguments that will be passed to the
read.csv() function } \item h5DF$append(DF, ...) appends
a data frame to the current h5 file and updates the meta
data in the file and on the object \itemize{ \item DF a
data frame or path to a csv file containing a data frame
\item h5DF$readChunk(chunkName) reads chunkName from the
h5DF object returning a data frame chunk. \item
chunkName character name of the chunk to be returned }
\item h5DF$readChunks(chunks) reads the chunkNames from
the h5DF object returning a data frame containing all the
chunks that have been read. \itemize{ \item chunks
character vector name of the chunks to be binded together
and returned as a data frame } \item h5DF$readTable()
reads the whole table back into R. This is a convenience
function and the user must know whether their system has
sufficient memory to accomodate the data frame. \item
h5DF$memorize() this function converts the h5DF object to
a h5MEMDF object. It reads each chunk to memory and
supplies pointers in R to access each chunk. Very useful
when you have lots of memory in your system and need an
efficient way to access chunks of data. }
}
\section{Fields in the h5DF object}{
These are the fields in the h5DF object, they are not to
be directly modified by the user but can be accessed by
h5DF$fieldName.
\itemize{ \item nChunks: The number of chunks in the h5DF
object \item chunkNames: The names of the chunks in the
h5DF object \item colNames: The column names in the
submitted data frame \item colClasses: The classes of the
submitted data frame \item nrows: The number of rows in
the data frame \item ncols: The number of columns in the
data frame \item filePath: A character denoting the path
to the h5 file \item nfactors: The number of factor
columns in the h5DF object \item factors: A list
containing the factor levels for each factor in the data
frame \item chunkSize: The number of rows each chunk will
contain \item MAT: For internal use when manipulating the
data frame \item oldNChunks: For internal use. }
}
|
0808d4fe35a446f9c8e63166f735ce8a76d75cb3
|
b797f4e2b9e0cfe2d9e1a913842f202bc2bfc7fd
|
/mallet/man/MalletLDA.Rd
|
82c19cf9d27f3dffa496f9ee51d5995c7d1ec135
|
[] |
no_license
|
mimno/RMallet
|
680596989de8c4896102cd17f8a6714ab8989bb9
|
cff720fe1a50e2ca6016aed89a18296bdd11cec2
|
refs/heads/master
| 2022-07-31T13:45:38.152176
| 2022-07-21T05:41:28
| 2022-07-21T05:41:28
| 19,386,227
| 36
| 10
| null | 2022-07-21T05:13:00
| 2014-05-02T19:30:23
|
R
|
UTF-8
|
R
| false
| true
| 2,363
|
rd
|
MalletLDA.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mallet.R
\name{MalletLDA}
\alias{MalletLDA}
\title{Create a Mallet topic model trainer}
\usage{
MalletLDA(num.topics = 10, alpha.sum = 5, beta = 0.01)
}
\arguments{
\item{num.topics}{The number of topics to use. If not specified, this defaults to 10.}
\item{alpha.sum}{This is the magnitude of the Dirichlet prior over the topic distribution of a document.
The default value is 5.0. With 10 topics, this setting leads to a Dirichlet with
parameter \eqn{\alpha_k = 0.5}. You can intuitively think of this parameter as a
number of "pseudo-words", divided evenly between all topics, that are present in
every document no matter how the other words are allocated to topics. This is an
initial value, which may be changed during training if hyperparameter
optimization is active.}
\item{beta}{This is the per-word weight of the Dirichlet prior over topic-word distributions.
The magnitude of the distribution (the sum over all words of this parameter) is
determined by the number of words in the vocabulary. Again, this value may change
due to hyperparameter optimization.}
}
\value{
a \code{cc.mallet.topics.RTopicModel} object
}
\description{
This function creates a java cc.mallet.topics.RTopicModel object that wraps a
Mallet topic model trainer java object, cc.mallet.topics.ParallelTopicModel.
Note that you can call any of the methods of this java object as properties.
In the example below, I make a call directly to the
\code{topic.model$setAlphaOptimization(20, 50)} java method,
which passes this update to the model itself.
}
\examples{
\dontrun{
# Read in sotu example data
data(sotu)
sotu.instances <-
mallet.import(id.array = row.names(sotu),
text.array = sotu[["text"]],
stoplist = mallet_stoplist_file_path("en"),
token.regexp = "\\\\p{L}[\\\\p{L}\\\\p{P}]+\\\\p{L}")
# Create topic model
topic.model <- MalletLDA(num.topics=10, alpha.sum = 1, beta = 0.1)
topic.model$loadDocuments(sotu.instances)
# Train topic model
topic.model$train(200)
# Extract results
doc_topics <- mallet.doc.topics(topic.model, smoothed=TRUE, normalized=TRUE)
topic_words <- mallet.topic.words(topic.model, smoothed=TRUE, normalized=TRUE)
top_words <- mallet.top.words(topic.model, word.weights = topic_words[2,], num.top.words = 5)
}
}
|
51bd6d051366850a28b4f6da93cb1c44195590d1
|
3cc4ca2dd309b93fd261a9502830d0955315ef86
|
/R/element_utils.R
|
b979413207cf28fe1430459ffb7d458490a30f8c
|
[
"MIT"
] |
permissive
|
rstudio/remarker
|
198820ee027018c4a524df0e080d7c7484072edc
|
8f46d2463a32e097c2c9c90a9644590cb25cb707
|
refs/heads/main
| 2023-08-15T01:48:19.757759
| 2021-10-05T15:04:13
| 2021-10-05T15:04:13
| 354,125,173
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,325
|
r
|
element_utils.R
|
#' Extract information from the Attr of an Element
#'
#' @param x A Block or Inline Element object which has an Attr child.
#'
#' @examples
#' h <- Header(
#' attr = Attr(
#' classes = Texts("x", "y"),
#' attributes = TextText_s(list("a", "a_value"), list("b", "b_value"))
#' )
#' )
#'
#' el_get_class(h)
#'
#' el_get_keyvals(h)
#'
#' @export
el_get_class <- function(x) {
if (!inherits(x, c("Block", "Inline"))) {
stop("x is not a Block or Inline object")
}
type <- x[["t"]]
type_info <- ast_types[[type]]
child_types <- type_info$children
attr_child_idx <- which("Attr" == child_types)
if (length(attr_child_idx) == 0) {
stop("Block or Inline Element with type ", type,
" does not have a child with type Attr.")
}
attr <- x$c[[attr_child_idx]]
as.character(attr[[2]])
}
#' @rdname el_get_class
#' @export
el_get_keyvals <- function(x) {
if (!inherits(x, c("Block", "Inline"))) {
stop("x is not a Block or Inline object")
}
type <- x[["t"]]
type_info <- ast_types[[type]]
child_types <- type_info$children
attr_child_idx <- which("Attr" == child_types)
if (length(attr_child_idx) == 0) {
stop("Block or Inline Element with type ", type,
" does not have a child with type Attr.")
}
attr <- x$c[[attr_child_idx]]
namify(attr[[3]])
}
|
7e4b8d1f94385a2860a0753806578c9c79db3910
|
8d43decd9ab0411d01e45a539315beef646a9c73
|
/tests/testthat/test-cache.R
|
0ae65c2b8b67e1a6448f9b8827f38cdd9b06dd1d
|
[
"MIT"
] |
permissive
|
robertzk/cachemeifyoucan
|
2137fbaba0c5f21c8ba671815d444af7682983aa
|
7ce4915224d10526c2efdae41c692116304caba8
|
refs/heads/master
| 2021-01-17T09:29:27.531909
| 2016-05-11T18:07:08
| 2016-05-11T18:07:08
| 23,096,482
| 8
| 9
| null | 2017-09-12T20:01:06
| 2014-08-19T04:11:40
|
R
|
UTF-8
|
R
| false
| false
| 4,820
|
r
|
test-cache.R
|
context('cache function')
# Set up test fixture
# Set up local database for now
# https://github.com/hadley/dplyr/blob/master/vignettes/notes/postgres-setup.Rmd
describe("cache function", {
db_test_that("blacklisting NAs retains caching when values are NA", {
expect_cached(blacklist = list(NA), {
df_ref <- batch_data(1:5, na = TRUE)
df_cached <- cached_fcn(key = 1:5, model_version, type, na = TRUE)
# Rows with a single NA value should have been cached.
df_cached <- cached_fcn(key = 1:5, model_version, type)
expect_true(any(is.na(df_cached)))
})
})
db_test_that("blacklisting NAs does not retain caching when all rows are NA", {
expect_cached(blacklist = list(NA), {
df_ref <- batch_data(1:5)
df_cached <- cached_fcn(key = 1:5, model_version, type, na = "all")
# Rows with all NA value should not have been cached.
df_cached <- cached_fcn(key = 1:5, model_version, type)
expect_false(any(is.na(df_cached)))
})
})
db_test_that("calling the cached function for the first time populated a new table", {
# First remove all tables in the local database.
expect_cached({
df_ref <- batch_data(1:5)
df_cached <- cached_fcn(key = 1:5, model_version, type)
})
})
db_test_that("We can cache big tables", {
cached_fcn <- cache(batch_huge_data, key = c(key = "id"), c("version"), con = test_con, prefix = "huge_data")
lapply(list(1:10, 1:20), function(ids) {
# Populate the cache and make sure that the results are equal
expect_equal(dim(bd <- batch_huge_data(ids)), dim(cached_fcn(ids)))
tmp <- cached_fcn(ids)
# And the results are still correct
expect_equal(dim(bd), dim(tmp))
})
# And now everything is so cached
tmp <- cached_fcn(1:20)
})
db_test_that("retrieving partial result from cache works", {
expect_cached({
df_ref <- batch_data(1:5)
cached_fcn(key = 1:5, model_version, type)
expect_almost_equal(df_ref[1, ], cached_fcn(key = 1, model_version, type))
})
})
db_test_that("attempting to populate a new row with a different value fails due to cache hit", {
expect_cached({
df_ref <- batch_data(1:5, switch = TRUE, flip = 4:5)
cached_fcn(key = 1:5, model_version, type, switch = TRUE, flip = 4:5)
cached_fcn(key = 4, model_version, type)
cached_df <- cached_fcn(1:5, switch = TRUE, flip = 4:5)
})
})
db_test_that("appending partially overlapped table adds to cache", {
expect_cached({
df_ref <- batch_data(1:5, model_version, type, switch = TRUE, flip = 1)
df_ref <- rbind(df_ref, batch_data(6, model_version, type))
cached_fcn(key = 1:5, model_version, type, switch = TRUE, flip = 1)
cached_fcn(key = 5:6, model_version, type)
})
})
db_test_that("re-arranging in the correct order happens when using the cache", {
expect_cached({
df_ref <- batch_data(1:5, model_version, type)
cached_fcn(key = 1:5, model_version, type)
expect_almost_equal(without_rownames(df_ref[5:1, ]),
without_rownames(cached_fcn(key = 5:1, model_version, type)))
}, no_check = TRUE)
})
db_test_that("re-arranging in the correct order happens when using the cache with partially new results", {
expect_cached({
df_ref <- batch_data(1:5, model_version, type)
cached_fcn(key = 1:3, model_version, type)
expect_almost_equal(without_rownames(df_ref[5:1, ]),
without_rownames(cached_fcn(key = 5:1, model_version, type)))
}, no_check = TRUE)
})
db_test_that("non-numeric primary keys are supported", {
expect_cached({
df_ref <- batch_data(letters[1:5])
cached_fcn(key = letters[1:5], model_version, type)
expect_almost_equal(df_ref[1, ], cached_fcn(key = 'a', model_version, type))
})
})
db_test_that("if bind_rows doesn't work, rbind.fill will be used", {
with_mock(`dplyr::bind_rows` = function(...) stop("bind_rows is broken!"), {
expect_cached({
df_ref <- batch_data(1:5)
df_cached <- cached_fcn(key = 1:5, model_version, type)
})
})
})
db_test_that("the force. parameter triggers cache re-population", {
# First remove all tables in the local database.
expect_cached({
df_ref <- batch_data(1:5)
testthatsomemore::package_stub("cachemeifyoucan", "write_data_safely", function(...) {
stop("Caching layer should not be used")
}, {
expect_error(df_cached <- cached_fcn(key = 1:5, model_version, type, force. = FALSE),
"Caching layer should not be used")
})
df_cached <- cached_fcn(key = 1:5, model_version, type, force. = TRUE)
})
})
})
|
14ded03659fce5bfb105664d2a515ee1b83bf8d9
|
e1373ea202ba95aeb29cc31d42e23bf4463c147f
|
/01_geocoode_addresses.R
|
9b50d68a438e240b767ca487c9ccf5c739f29d57
|
[
"MIT"
] |
permissive
|
michaelqmaguire/geocoding-with-r
|
a7a70c7004e125a570a0a4550f141f9836d0847f
|
e639e1ea4807ba91f46e0bc1da645d38ca19d2c2
|
refs/heads/main
| 2023-05-05T14:26:31.239383
| 2021-05-26T14:58:43
| 2021-05-26T14:58:43
| 369,616,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,368
|
r
|
01_geocoode_addresses.R
|
library(tidyverse)
library(tidygeocoder)
pharmacies <-
tribble(
~ Name, ~ Address, ~ CityStateZip, ~ Phone,
"Malcom Randall VA Medical Ctr", "1601 SW Archer Rd", "Gainesville,FL 32608", "(352)376-1611",
"Shands Hospital-University FL", "1600 SW Archer Rd", "Gainesville,FL 32610", "(352)265-0111",
"Tacachale", "1621 NE Waldo Rd", "Gainesville,FL 32609", "(352)955-5000",
"North Florida Evaluation/Trtmnt", "1200 NE 55th Blvd", "Gainesville,FL 32641", "(352)375-8484",
"Shands Rehabilitation Hospital", "4101 NW 89th Blvd", "Gainesville,FL 32606", "(352)265-5491",
"Shands Vista", "4101 NW 89th Blvd", "Gainesville,FL 32606", "(352)265-5497",
"N Florida Reg Med Ctr", "6500 W Newberry Rd", "Gainesville,FL 32605", "(352)333-4000",
"Tri County Hospital Williston", "125 SW 7th St", "Williston,FL 32696", "(352)528-2801",
"Reception & Medical Center", "7765 S County Road 231", "Lake Butler,FL 32054", "(386)496-6000",
"Shands Starke Medical Center", "922 E Call St", "Starke,FL 32091", "(904)368-2300",
"Lake Butler Hospital", "850 E Main St", "Lake Butler,FL 32054", "(386)496-2323",
"Ocala Regional Medical Center", "1431 SW 1st Ave", "Ocala,FL 34471", "(352)401-1000",
"Munroe Regional Medical Center", "1500 SW 1st Ave", "Ocala,FL 34471", "(352)351-7200"
)
pharmacies
pharmaciesCSZSeparate <-
pharmacies %>%
tidyr::separate(
data = .,
col = CityStateZip,
into = c("City", "StateZip"),
sep = "\\,",
) %>%
tidyr::separate(
data = .,
col = StateZip,
into = c("State", "Zip"),
sep = "\\s"
)
pharmaciesCSZSeparate
pharmaciesGeocoded <-
pharmaciesCSZSeparate %>%
tidygeocoder::geocode(
.,
street = Address,
city = City,
state = State,
postalcode = Zip,
method = "cascade"
)
library(leaflet)
leaflet(pharmaciesGeocoded) %>%
addTiles() %>%
addMarkers(pharmaciesGeocoded, lng = long, lat = lat)
disp_info_02 <-
disp_info %>%
mutate(
id = 1:n(),
state = "FL",
single_address = paste0(address, ", ", city, ", ", state)
) %>%
tidygeocoder::geocode(street = address, state = state, city = city, method = "cascade")
|
6b291b47090860e5e45616e7cf2139407d3d64b7
|
867b80d5b06016d0ac49856f99cc07318f41dc02
|
/label_mesa_data_with_analysis.R
|
dfd2b022ce05985820e08eed0cef88e535d5d284
|
[] |
no_license
|
sechilds/cirpa2017
|
51e39a70ecd6a05945e931cf1e3aa1a646477ac6
|
fb0d41c236f78b4a23dfdea789158d356fec878a
|
refs/heads/master
| 2021-07-16T19:02:36.219852
| 2017-10-22T22:46:08
| 2017-10-22T22:46:08
| 106,300,275
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,420
|
r
|
label_mesa_data_with_analysis.R
|
library(tidyverse)
library(labelled)
# work through and label the data from the mesa project
# list of useful variables for the workshop
workshop_vars = c('x_id',
'x_idregion_y1',
'proi1_y1',
'proi2_y1',
'proi3_y1',
'proi4_y1',
'proi5_y1',
'proi6_y1',
'proi7_y1',
'proi8_y1',
'inf1_y1',
'inf2_y1',
'inf3_y1',
'enrl1_y1',
'enrl_epi',
'enrl_epi2',
'ad_age',
'ad_gender',
'ad_field',
'ad_inst1',
'ad_need1',
'ad_need2',
'ad_inc1',
'ad_inc2',
'ad_award1',
'ad_award2',
'ad_unmet1',
'ad_unmet2',
'ad_inc3',
'ad_inc4',
'ad_loan1',
'ad_grant1',
'ad_asset1',
'ad_tuition1',
'ad_recip1',
'ad_ncb1',
'ad_cag1',
'ad_loan2',
'ad_bursary1',
'ad_loan3',
'ad_award3',
'enrl1_y2'
)
agree_disagree = c(`Strongly Disagree` = 1,
`Somewhat Disagree` = 2,
`Neutral` = 3,
`Somewhat Agree` = 4,
`Strongly Agree` = 5,
`No Opinion` = 6,
`Refused` = 7)
provinces = c(`Newfoundland and Labrador` = 10,
`Prince Edward Island` = 11,
`Nova Scotia` = 12,
`New Brunswick` = 13,
`Quebec` = 24,
`Ontario` = 35,
`Manitoba` = 46,
`Saskatchewan` = 47,
`Alberta` = 48,
`British Columbia` = 59,
`Yukon` = 60,
`Northwest Territories` = 61,
`Nunavut` = 62)
gender_lbl = c(`Female` = 0, `Male` = 1)
df <- df %>% set_variable_labels(x_id = "Survey ID",
sib2_y2 = "Have any of these brothers or sisters attended university or community college?")
df <- df %>% set_variable_labels(x_id = "Survey ID",
sib1_y1 = "How many brothers and sisters do you have that are older or the same age as you? Include half-, step- and adoptive brothers and sisters",
sib2_y2 = "Have any of these brothers or sisters attended university or community college?",
sib3_y1 = "How many brothers and sisters do you have that are younger than you? Include half-, step and adoptive brothers and sisters",
par1_y1 = "Who were the parents or guardians that you lived with MOST of the time during HIGH SCHOOL? Was it")
df <- read_csv('data/cmsf-mesa-E-2005-08_F1.csv')
df_clean <- df %>% set_variable_labels(x_id = "Survey ID",
cit1_y1 = "Are you a Canadian citizen?",
cit2_y1 = "Are you a laanded immigrant?",
ori1_y1 = "In what country were you born?",
ori2_y1 = "In what year did you come to Canada to live permanently?",
eth1_y1 = "People in Canada come from many different cultural or racial backgrounds. Could yo udescribe your background:",
lang1_y1 = "What language do you speak when you are at home with your parents?",
sib1_y1 = "How many brothers and sisters do you have that are older or the same age as you? Include half-, step- and adoptive brothers and sisters",
sib2_y1 = "Have any of these brothers or sisters attended university or community college?",
sib3_y1 = "How many brothers and sisters do you have that are younger than you? Include half-, step and adoptive brothers and sisters",
par1_y1 = "Who were the parents or guardians that you lived with MOST of the time during HIGH SCHOOL? Was it",
par2_y1 = "You indicated that you mostly lived with just one of your parents during high school. How frequently did you have contact with your other parent?",
paed1_y1 = "What was the highest level of education completed by your female guardian?",
paed2_y1 = "What was the highest level of education completed by your male guardian?",
dwel1_y1 = "Do you currently live...",
proi1_y1 = "Even if a person has to go deep into debt to get a PSE, it will still likely be worth it in the long run in terms of a better job and higher salary.",
proi2_y1 = "The time and money put into a PSE is a good investment in today's job market.",
proi3_y1 = "People who have a PSE get jobs that are much more satisfying.",
proi4_y1 = "The best way to get a prestigious job is through PSE.",
proi5_y1 = "I'm not sure that a PSE would pay off even in the long run, given how costly it is these days.",
proi6_y1 = "People would be better off putting their money into investments like real estate and the stock market than bothering with a PSE",
proi7_y1 = "You can learn enough about the real world without a PSE",
proi8_y1 = "Good jobs can be found without a PSE",
inf1_y1 = "Most of my friends think it's important to get PSE",
inf2_y1 = "My parents would be very disappointed in my if I didn't get a university or college degree",
inf3_y1 = "I have role models at home or at school that reprsent where I hope to go in my career based on my schooling",
enrl1_y1 = "Are you still enrolled in College or University?",
enrl_epi = "CIP Program Code",
enrl_epi2 = "Two Digit CIP Code",
ad_age = "Age",
ad_gender = "Gender",
ad_field = "Field of Study",
ad_inst1 = "Institution Code",
ad_need1 = "Value of Assessed Need",
ad_need2 = "Assessed Need - Total Millenium Bursary",
ad_inc1 = "Spousal Income",
ad_inc2 = "Parental Income",
ad_award1 = "Total Student Aid",
ad_award2 = "Total Aid with Bursary",
ad_unmet1 = "Unmet Need",
ad_unmet2 = "Unmet Need with Bursary",
ad_inc3 = "Reported Previous Year Income",
ad_inc4 = "Expected Work Income",
ad_loan1 = "Loan Amount",
ad_grant1 = "Grant Amount",
ad_asset1 = "Student Assets",
ad_tuition1 = "Tuition",
ad_recip1 = "Recipient status",
ad_ncb1 = "National Child Benefit",
ad_cag1 = "Canada Access Grant Status",
ad_loan2 = "Bursary loan remission",
ad_bursary1 = "Bursary cash award",
ad_loan3 = "Loan amount minus loan reduction",
ad_award3 = "Total award minus loan reduction",
enrl1_y2 = "Are you still enrolled in college or university (year 2)") %>%
add_value_labels(proi1_y1 = agree_disagree,
proi2_y1 = agree_disagree,
proi3_y1 = agree_disagree,
proi4_y1 = agree_disagree,
proi5_y1 = agree_disagree,
proi6_y1 = agree_disagree,
proi7_y1 = agree_disagree,
proi8_y1 = agree_disagree,
inf1_y1 = agree_disagree,
inf2_y1 = agree_disagree,
inf3_y1 = agree_disagree,
x_idregion_y1 = provinces,
ad_gender = gender_lbl) %>%
mutate(link2_y1 = ifelse(is.na(link2_y1), 1, link2_y1)) %>%
filter(link2_y1!=2) %>%
filter(enrl1_y1==1) %>%
filter(!is.na(enrl1_y2)) %>%
select(one_of(workshop_vars))
df %>% mutate(proi_scale = proi1_y1 +
proi2_y1 +
proi3_y1 +
proi4_y1 +
(6 - proi5_y1) +
(6 - proi6_y1) +
(6 - proi7_y1) +
(6 - proi8_y1)) %>%
select(proi_scale) %>%
summary()
df_clean <- df_clean %>% mutate(proi_scale = proi1_y1 +
proi2_y1 +
proi3_y1 +
proi4_y1 +
(6 - proi5_y1) +
(6 - proi6_y1) +
(6 - proi7_y1) +
(6 - proi8_y1)) %>%
set_value_labels(proi_scale = NULL) %>%
set_variable_labels(proi_scale = 'Attitude to PSE Scale')
vis_dat(df_clean)
df_clean %>%
group_by(enrl1_y2) %>%
tally()
df_clean %>%
select(matches("inf[123]._y1"))
df_clean %>%
mutate_at(matches("inf[123]{1}_y1"), as_factor)
df_clean %>%
mutate_at(vars(matches("inf[123]{1}_y1")), as_factor) %>%
model_matrix(enrl1_y1 ~ proi_scale + inf1_y1)
df_clean %>%
mutate_at(vars(matches("inf[123]{1}_y1")), as_factor) %>%
model_matrix(enrl1_y1 ~ proi_scale + inf1_y1)
model_matrix(df_clean, enrl1_y2 ~ proi_scale + as_factor(inf1_y1))
inf_factor <- df_clean %>%
mutate_at(vars(matches("inf[123]{1}_y1")), as_factor)
model_matrix(df, enrl1_y2 ~ proi_scale)
model_matrix(df, ~as_factor(proi1_y1))
mod1 <- lm(enrl1_y2 ~ as_factor(proi1_y1), data=inf_factor)
summary(mod1)
broom::tidy(mod1)
mod1 <- df_clean %>%
mutate_if(is.labelled, as_factor) %>%
lm(enrl1_y2 ~ ad_gender + proi_scale + x_idregion_y1, data=.)
summary(mod1)
#list the columns in the data frame
spec(df)
# have a look at the data for a categorical variable.
# generally these numeric ones don't have variable labels at all.
df %>%
group_by(lang1_y1) %>%
tally()
#So, in order to convert them to factors, you need to actually transform them.
#I will probably have to write some transformation code and set it up.
# This one is a 5 point likert scale
df %>%
group_by(proi1_y1) %>%
tally()
# These are the retention variables
df$enrl1_y1
df$enrl1_y2
df$enrl1_y3
# get the number of observations in my eductation proiority scale & y2 enrolment
# I filter out the NAs because those are non-respondents to Y2
df %>%
filter(!is.na(enrl1_y2)) %>%
group_by(proi1_y1, enrl1_y2) %>%
tally()
# We do a similar thing just using the summarise function
df %>%
filter(!is.na(enrl1_y2)) %>%
filter(!is.na(proi1_y1)) %>%
group_by(proi1_y1, enrl1_y2) %>%
summarise(freq = n())
# then we turn that observation count into a percentage
# the trick here is that the data remain GROUPED after the group_by
# so the sum() function really looks at things within that group.
df %>%
filter(!is.na(enrl1_y2)) %>%
filter(!is.na(proi1_y1)) %>%
group_by(proi1_y1, enrl1_y2) %>%
summarise(freq = n()) %>%
mutate(freq = freq/sum(freq) * 100)
# Here's the same code with an ungroup() inserted,
# so we can look at the percentages over the whole thing.
df %>%
filter(!is.na(enrl1_y2)) %>%
filter(!is.na(proi1_y1)) %>%
group_by(proi1_y1, enrl1_y2) %>%
summarise(freq = n()) %>%
ungroup() %>%
mutate(freq = freq/sum(freq) * 100)
# Or we can just look at the proportion of returners in each group.
# it does look like there is a bit of a relationship there!
df %>%
filter(!is.na(enrl1_y2)) %>%
filter(!is.na(proi1_y1)) %>%
group_by(proi1_y1, enrl1_y2) %>%
summarise(freq = n()) %>%
mutate(freq = freq/sum(freq) * 100) %>%
filter(enrl1_y2==1) %>%
select(proi1_y1, freq)
# running some t-tests
# lets see if there are differences between returners are not
# so let's put together two different vectors
returners <- df %>%
filter(enrl1_y2==1 & !is.na(proi1_y1)) %>%
select(proi1_y1)
nonreturners <- df %>%
filter(enrl1_y2==2 & !is.na(proi1_y1)) %>%
select(proi1_y1)
# so, these are tibbles
# so lets pop out the individual column
returners <- returners$proi1_y1
nonreturners <- nonreturners$proi1_y1
# if you try to get the means of these, it doesn't work
# until you convert them to just vectors.
mean(returners)
mean(nonreturners)
t.test(returners, nonreturners)
# you don't need to create separate vectors
# you can get dplyr to do that for you!!
# note the use of formula syntax AND
# the rest of it!
df %>%
filter(!is.na(enrl1_y2) & !is.na(proi1_y1)) %>%
t.test(proi1_y1 ~ enrl1_y2, data=.)
# The nice thing about the fact that you can pipe things into t.test
# is that now you can actually do that for particular groups of
# your data
# the problem is -- in order to aggregate the results, you need data frames
df %>%
filter(!is.na(enrl1_y2) & !is.na(proi1_y1)) %>%
group_by(x_idregion_y1) %>%
do(t.test(proi1_y1 ~ enrl1_y2, data=.))
# that's where the broom package comes in -- it turns your results into a data frame
df %>%
filter(!is.na(enrl1_y2) & !is.na(proi1_y1)) %>%
group_by(x_idregion_y1) %>%
do(broom::tidy(t.test(proi1_y1 ~ enrl1_y2, data=.)))
df %>%
filter(enrl1_y1==1 & !is.na(enrl1_y2)) %>%
dim()
|
615ef82b93b54ec62bb1603662e042b11056f218
|
97932fb906650536ff644f4b57e1b05a74695e1d
|
/R/fees.R
|
7f6ba3529105ffaee886cf424828f9ae90a69e1e
|
[] |
no_license
|
muschellij2/squareupr
|
350ed186d711182abfb6ad5dde0c068e5325ee29
|
37bf28750127235c09f7f57278faf484b04aac0d
|
refs/heads/master
| 2021-05-26T23:23:03.404648
| 2019-07-11T20:50:36
| 2019-07-11T20:50:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 435
|
r
|
fees.R
|
#' List Fees
#'
#' Lists all of a location's fees.
#'
#' @template location
#' @template verbose
#' @return \code{tbl_df} of fees
#' @details Required permissions: \code{ITEMS_READ}
#' @examples
#' \dontrun{
#' my_fees <- sq_list_fees(location)
#' }
#' @export
sq_list_fees <- function(location, verbose=FALSE){
sq_list_generic_v1(endpoint="fees", location=location, verbose=verbose)
}
#TODO
# Create Fee
# Update Fee
# Delete Fee
|
cd2eb4c7222f606ebdde1c4b80cd3c26f83cca97
|
95abece5eaa8ee7b85a680dc255cd7c657e693d4
|
/pileup/mpileup_wrapper.R
|
9aa0a9ebd145635f3ae28902ed51b0ff57c42cba
|
[] |
no_license
|
inambioinfo/MSK_LeukGen
|
49525d343a68715614da5e1c6537ab23fbbc0395
|
6d0f03a4b19d4257c9a7128dccbb09b17616838d
|
refs/heads/master
| 2020-03-14T13:30:32.020170
| 2016-04-12T19:56:50
| 2016-04-12T19:56:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,512
|
r
|
mpileup_wrapper.R
|
# R wrapper script for mpileup -> nucleotide frequencies
# mpileup_wrapper.R
# Author: Komal S Rathi
# Institute: Memorial Sloan Kettering Cancer Center
# Created: ##------ Fri Nov 6 14:27:25 2015 ------ #
# Last Modified: ##------ Wed Nov 11 16:48:00 2015 ------##
# Function: This script takes in a number of bam files and generates pileups for a given list of positions.
# The pileups are then converted to easy to read format of nucleotide frequencies.
# Usage:
# Rscript mpileup_wrapper.R <input_dir> <positions_file> <reference_fasta> <max_depth> <min_base_quality> <min_mapq> <include_insertions> <include_deletions>
# only the first three arguments are mandatory
timestamp()
suppressMessages(library(Rsamtools,quietly=TRUE))
suppressMessages(library(reshape2,quietly=TRUE))
# command line arguments
args <- commandArgs(trailingOnly = TRUE)
input_dir <- toString(args[1])
positions_file <- toString(args[2])
fasta_file <- toString(args[3])
max_depth <- 1000
min_base_quality <- 0
min_mapq <- 0
include_insertions <- FALSE
include_deletions <- FALSE
if(length(args)>=4){
max_depth <- as.integer(args[4])
if(length(args)>=5){
min_base_quality <- as.integer(args[5])
if(length(args)>=6){
min_mapq <- as.integer(args[6])
if(length(args)>=7){
include_deletions <- as.logical(args[7])
if(length(args)>=8){
include_insertions <- as.logical(args[8])
}
}
}
}
}
# read positions file & bam files
positions_file <- read.delim(positions_file,header=F)
files <- list.files(path=input_dir, pattern="*.bam$", full.names=T, recursive=T)
# read fasta reference
# get reference base
fasta_file <- FaFile(file=fasta_file)
refbase <- getSeq(fasta_file,GRanges(positions_file$V1,IRanges(start=as.numeric(positions_file$V2),end=as.numeric(positions_file$V2))))
refbase <- as.data.frame(refbase)$x
positions_file$REF <- refbase
# get pileup for each file
for(i in files){
print(paste("Processing...",basename(i),sep=''))
bamfile <- i
bf <- BamFile(bamfile)
param <- ScanBamParam(which=GRanges(positions_file$V1,IRanges(start=as.numeric(positions_file$V2),end=as.numeric(positions_file$V2))))
# change max depth, strand specification, various cut-offs
p_param <- PileupParam(distinguish_strand=TRUE,distinguish_nucleotides=TRUE,
max_depth=max_depth,include_deletions=include_deletions,
include_insertions=include_insertions,min_base_quality=min_base_quality,min_mapq=min_mapq)
# call pileup function
res <- pileup(bf, scanBamParam=param, pileupParam=p_param)
# get reference base
res <- merge(res,positions_file,by.x=c('seqnames','pos'),by.y=c('V1','V2'))
# process and write the output
results <- dcast(res,seqnames+pos+REF~nucleotide+strand,value.var="count",fill=0)
results$D <- apply(results[,4:ncol(results)],1,sum)
results$D_forward <- apply(results[,grep('[+]',colnames(results))],1,sum)
results$D_reverse <- apply(results[,grep('[-]',colnames(results))],1,sum)
results <- results[,c(1,2,3,grep('D',colnames(results)),grep('[+]',colnames(results)),grep('[-]',colnames(results)))]
colnames(results) <- sub('[+]','forward',colnames(results))
colnames(results) <- sub('[-]','reverse',colnames(results))
colnames(results)[1:2] <- c('CHR','POS')
# temporary file generation
# outfile <- sub('[.]bam','.out',i)
outfile <- sub('[.]bam','.out',sub('.*/','',i))
write.table(x = results, file = outfile, quote = F, row.names = F, sep = '\t')
}
print('Total time taken...')
time <- proc.time()
print(paste(time[[1]],'secs',sep=' '))
timestamp()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.