blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4e72b6573ef5434ab13ab53c05c65a3ef510dd0 | 3042c6d7464ff109944848fd78c6e6f1b260afb4 | /enhancer_rna/base.r | a09d94f54d29e76a025b025d43aa39ec49e251fb | [
"MIT"
] | permissive | yuifu/Hayashi2018 | b425d84471bda21b9fed4869d39c2226ad6b89d1 | 11456678e6536aac72e35c48564baaa240f344a5 | refs/heads/master | 2021-05-11T12:33:54.863946 | 2018-01-16T09:00:45 | 2018-01-16T09:00:45 | 117,661,111 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,561 | r | base.r |
# path_rdata <- "products/_02_Rdata_NEB_es_enhancer_01_extend/rdata.Rdata"
# odir_base <- "products/_03_count_ngsplot_layered_NEB__es_enhancer_01"
# pathLabel
# experiment_name <- "_01_ngsplot_NEB__es_enhancer_01"
args=(commandArgs(TRUE))
path_rdata = args[1]
odir_base = args[2]
pathLabel = args[3]
experiment_name = args[4]
metalabelPrefix = args[5]
pathSelectBed = args[6]
pathOutliers = args[7]
#########################
timestamp()
library(data.table); library(dplyr); library(magrittr); library(dtplyr)
library(scales)
if(!file.exists(odir_base)){
dir.create(odir_base, recursive = T)
}
selCol = 41:120
xl <- "Position relative to enhancer center (bp)"
yl <- "Averaged coverage"
at <- c(1, 20, 40.5, 60, 80)
lb <- c(-2000, -1000, 0, 1000, 2000)
dtLabel = fread(pathLabel, header = T)
dtOutlier = fread(pathOutliers, header=TRUE)
dtOutlier[group %in% c("G1", "S", "G2M"), label := paste0(label, "_mcf_1")]
dtLabel = dtLabel[! label %in% dtOutlier[, label]]
labels = dtLabel[, label]
groups <- dtLabel[, group]
groups_unique <- unique(groups)
##############
load(path_rdata)
##############
source("scripts/_functionsDeepTools.r")
source("scripts/_functionsHeatmap.r")
##############
str(arr1)
arr1 = arr1[,,labels]
str(arr1)
gc(); gc();
str(arr1)
arr1 = arr1[,selCol,]
str(arr1)
gc(); gc();
str(arr2)
arr2 = arr2[,selCol,]
str(arr2)
gc(); gc();
##############
if(grepl("\\.bed$", pathSelectBed)){
dtSelectBed = fread(pathSelectBed, header = FALSE, sep = "\t")
selRow = dtSelectBed[, V4]
}else{
dtSelectBed = fread(pathSelectBed, header = TRUE, sep = "\t")
selRow = dtSelectBed[, Id]
}
str(arr1[,,])
str(arr1[selRow,,])
######################
#
# for(trimRatio in c(0)){
# for(maxv in c(50, 100, 150, 200)){
# metalabel = sprintf("%s_heatmap_%.2f_%.2f", "timeSeries", trimRatio, maxv)
# heatmapTrimFromArray(arr1[selRow,,], c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio, max_value = maxv)
# metalabel = sprintf("%s_heatmap_bg_%.2f_%.2f", "timeSeries", trimRatio, maxv)
# heatmapTrimFromArray(arr2, c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio, max_value = maxv)
# }
# }
# for(trimRatio in c(0)){
# for(maxv in c(50, 100, 150, 200)){
# metalabel = sprintf("%s_heatmap_%.2f_%.2f", "esCellCycle", trimRatio, maxv)
# heatmapTrimFromArray(arr1[selRow,,], c("G1", "S", "G2M", "AvgG1", "10pg"), metalabel, top = trimRatio, max_value = maxv)
# metalabel = sprintf("%s_heatmap_bg_%.2f_%.2f", "esCellCycle", trimRatio, maxv)
# heatmapTrimFromArray(arr2, c("G1", "S", "G2M", "AvgG1", "10pg"), metalabel, top = trimRatio, max_value = maxv)
# }
# }
######################
# time series
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_ratio_%.2f", "timeSeries", trimRatio)
# layeredRatioPlotTrim(arr1[selRow,,], arr2, c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio)
# }
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_fg_%.2f", "timeSeries", trimRatio)
# layeredFGPlotTrim(arr1[selRow,,], arr2, c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio)
# }
for(trimRatio in c(1, 0)){
for(expTopRatio in c(0.5, 0.6, 0.7, 0.75, 0.8, 0.9, 1.0)){
metalabel = sprintf("%s_fgNorm_%.2f_%.2f", "timeSeries", trimRatio, expTopRatio)
layeredFGPlotTrimNormalize(arr1[selRow,,], arr2, c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio, expTopRatio=expTopRatio)
}
}
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_fracCount_%.2f", "timeSeries", trimRatio)
# layeredFracCountPlotTrim(arr1[selRow,,], c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio)
# # metalabel = sprintf("%s_fracCount_%.2f_neg", "timeSeries", trimRatio)
# # layeredFracCountPlotTrim(arr1[selRow,,], c("00hminus", "12hminus", "24hminus", "48hminus", "72hminus"), metalabel, top = trimRatio)
# }
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_count_%.2f", "timeSeries", trimRatio)
# layeredCountPlotTrim(arr1[selRow,,], c("00h", "12h", "24h", "48h", "72h"), metalabel, top = trimRatio)
# # metalabel = sprintf("%s_count_%.2f_neg", "timeSeries", trimRatio)
# # layeredCountPlotTrim(arr1[selRow,,], c("00hminus", "12hminus", "24hminus", "48hminus", "72hminus"), metalabel, top = trimRatio)
# }
# ######################
# # ES cell cycle
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_ratio_%.2f", "esCellCycle", trimRatio)
# layeredRatioPlotTrim(arr1[selRow,,], arr2, c("G1", "S", "G2M", "AvgG1", "10pg"), metalabel, top = trimRatio)
# }
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_fg_%.2f", "esCellCycle", trimRatio)
# layeredFGPlotTrim(arr1[selRow,,], arr2, c("G1", "S", "G2M", "AvgG1", "10pg"), metalabel, top = trimRatio)
# }
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_fracCount_%.2f", "esCellCycle", trimRatio)
# layeredFracCountPlotTrim(arr1[selRow,,], c("G1", "S", "G2M", "AvgG1", "10pg"), metalabel, top = trimRatio)
# # metalabel = sprintf("%s_fracCount_%.2f_neg", "esCellCycle", trimRatio)
# # layeredFracCountPlotTrim(arr1[selRow,,], c("G1minus", "G2Mminus", "Sminus", "Ntc"), metalabel, top = trimRatio)
# }
# for(trimRatio in c(1, 0)){
# metalabel = sprintf("%s_count_%.2f", "esCellCycle", trimRatio)
# layeredCountPlotTrim(arr1[selRow,,], c("G1", "S", "G2M", "AvgG1", "10pg"), metalabel, top = trimRatio)
# # metalabel = sprintf("%s_count_%.2f_neg", "esCellCycle", trimRatio)
# # layeredCountPlotTrim(arr1[selRow,,], c("G1minus", "G2Mminus", "Sminus", "Ntc"), metalabel, top = trimRatio)
# }
#####################
sessionInfo()
timestamp()
|
8db37797a96014924d15c87343e0d78734b40fad | c115602521d415d90914c31c3a7b160964e7d0fd | /man/getConfMatrix.Rd | 49a9b71818ca3ce687acfd15dc9cb24dbdf11a06 | [] | no_license | praneesh12/mlr | 295b5aefa72b8c56b070ac768de828d044ce0087 | 6069b98e8edb79148c005af08037ef89ea0d7cd0 | refs/heads/master | 2021-04-29T23:04:30.865508 | 2018-02-13T22:05:00 | 2018-02-13T22:05:00 | 121,545,775 | 1 | 0 | null | 2018-02-14T18:28:00 | 2018-02-14T18:28:00 | null | UTF-8 | R | false | true | 1,332 | rd | getConfMatrix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getConfMatrix.R
\name{getConfMatrix}
\alias{getConfMatrix}
\title{Confusion matrix.}
\usage{
getConfMatrix(pred, relative = FALSE)
}
\arguments{
\item{pred}{([Prediction])\cr
Prediction object.}
\item{relative}{(\code{logical(1)})\cr
If \code{TRUE} rows are normalized to show relative frequencies.
Default is \code{FALSE}.}
}
\value{
(\link{matrix}). A confusion matrix.
}
\description{
\code{getConfMatrix} is deprecated. Please use \link{calculateConfusionMatrix}.
Calculates confusion matrix for (possibly resampled) prediction.
Rows indicate true classes, columns predicted classes.
The marginal elements count the number of classification
errors for the respective row or column, i.e., the number of errors
when you condition on the corresponding true (rows) or predicted
(columns) class. The last element in the margin diagonal
displays the total amount of errors.
Note that for resampling no further aggregation is currently performed.
All predictions on all test sets are joined to a vector yhat, as are all labels
joined to a vector y. Then yhat is simply tabulated vs y, as if both were computed on
a single test set. This probably mainly makes sense when cross-validation is used for resampling.
}
\seealso{
\link{predict.WrappedModel}
}
|
b793b749d39485b65fb798319925fc51b7762e2f | 3a7a307ccbff0706bc0f3ed8411f289484eb9df5 | /Curso_R_Aula_1/Aula_1.R | 96a3341bf40b291bc3c76152df1d96b8f7039a55 | [] | no_license | vanderfsouza/cursor.github.io | 65150da7ec88eea8e3de30f9073923d0be1c722b | 461345b4fa35456ee0bd537b189e17607e3200d2 | refs/heads/master | 2020-04-09T20:56:53.980862 | 2019-04-27T14:12:12 | 2019-04-27T14:12:12 | 160,588,448 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,675 | r | Aula_1.R | ########################################
## ##
## INTRODUÇÃO AO PROGRAMA ESTATÍSCO R ##
## ##
########################################
#### Limpar Banco de Dados do R ####
ls()
rm(list=ls())
ls()
##########################
#### Operações Básicas #####
# DICAS:
# use '#' para realizar comentários. O que é escrito após o '#' não é "lido" como comando pelo R.
# abuse dos comentários, um código brilhante não tem utilidade se você não lembrar para que servem os comandos!
4+3
7-3
7*3
7^2; 7**2 # mesmo resultado (PONTO E VIRGULA TERMINA UM COMANDO)
7/2
7%/%2 #divisão de inteiro
1:10
##
x<-7/3
x
x<-round(x,3)
x
sqrt(9) #raiz quadrada
factorial(4)#fatorial
0/0 ### NaN (not a number)
a<- 5; b=5 #ATRIBUIÇÃO!
a; b
a+b #não armazena na memória
a*b
ab<-a+b #objeto criado e armazenado
ab
(axb<-a*b) ##
axb
exemplo<-1
Exemplo<-2
EXEMPLO<-3
exemplo<-4 #sobreposição
ls() # observar workspace
rm(exemplo) # remoção de objeto
ls() # observar workspace
rm(list=ls()) #remover todos os objetos
#### Indentação ####
abc<- (5+3 ### "+" demanda um complemento no código
+4+5)
def<-(3*11+(4-2+
22-11)-
1+100-21-10+1)
def
### Gerar sequencias:
x<-seq(1, 5, 0.5) #começa em 1 e vai até 5, variando em 0.5
x
y<-seq(length=9, from=1, to=5)
y
z<-c(1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5) #concatenar
z
y <- c("A","B","C")
y
rep(1, 30)
rep(1:3, each =10)
rep(1:3, times =10)
##### Estrutura de Dados #####
x <- 10:15
x
y<- c("W","Y","K")
y
g<-c(TRUE,TRUE,FALSE)
g
## Atributos.
class(x) ;class(y); class(g)
mode(x); mode(y) # The (Storage) Mode of an Object
# types "integer" and "double" are returned as "numeric".
# types "special" and "builtin" are returned as "function".
# type "symbol" is called mode "name".
typeof(x)
length(x)
str(x)
attributes(x)
is.vector(x)
is.numeric(x)
is.integer(x)
is.factor(x) # O que é um fator?
x[3:5]
x[-2]
x[c(2,4)]
x[c(2,4)]<-30
x
## Vetor nomeado.
x <- c(Flamengo=7.8, Atletico=4, Santos=8.5, Cruzeiro=10, Democrata=7.1)
x
## Atributos.
class(x)
mode(x)
typeof(x) #double (tem casa decimais)
length(x)
str(x)
attributes(x)
is.vector(x)
is.numeric(x)
is.integer(x)
is.double(x)
x[3:5]
x[-4]
x[c(2,4)]
x[-2]
x["Atletico"]
x[c("Atletico","Cruzeiro")]
##Matriz
## Um arranjo de duas dimensões.
X <- matrix(1:9, nrow=3, ncol=3)
X
class(X)
mode(X)
typeof(X) # tipo de armazenamento é inteiro (integer)
dim(X) ## ncol(X) e nrow(X).
str(X)
attributes(X)
is.matrix(X)
is.array(X)
is.numeric(X)
is.integer(X)
## Como indexar
X[1,]
X[1:2,]
X[-2,]
X[,-2]
X[,c(3,2,1)]
X[3:1,c(3,2,1)]
###########
## Um arranjo de 3 dimensões (cúbico).
X <- array(1:24, dim=c(3,4,2))
X
# uma matriz 3 por 4 na frente e outra atraz
class(X)
mode(X)
typeof(X)
dim(X) ## ncol(X) e nrow(X).
str(X)
attributes(X)
is.matrix(X)
is.array(X)
is.numeric(X)
is.integer(X)
##Como indexar
X
X[1,,] # fixa a primeira linha das duas paginas
X[,1,] # fixa a primeira coluna das duas paginas
X[,,1] # fixa a primeira pagina
######
## Exercícios
##
# 1) Crie 3 objetos com as respectivas sequências:
# a) 1,2,3,4,5,6,7,8,9,10
# b) A,B,C,D,E,F
# c) 10,20,30,40,50
#
# 1.1) Qual o tipo da cada objeto?
# 1.2) Transforme o vetor b em um fator
# 1.3) Acesse o quarto elemento do conjunto b.
# 1.4) Usando a função length, obtenha o tamanho de cada um dos vetores
# 1.5) Crie um objeto D com os vetores a b e c.
#
# 2) Crie um vetor de tamanho 100, sendo uma sequência de 1 a 10 repetida 10 vezes.
# Dica use a função rep().
rm(list=ls())
ls()
#### Data Frame ####
brasileiro <- data.frame(id=1:4,
times=c("Atlético", "Cruzeiro", "São Paulo","Internacional"),
pontos=c(45,80,52,50),
sg=c(6,26,12,9))
class(brasileiro)
mode(brasileiro)
typeof(brasileiro) # tipo de conteudo
dim(brasileiro) ## ncol() e nrow().
str(brasileiro)
attributes(brasileiro)
is.data.frame(brasileiro)
is.list(brasileiro)
##como indexar
brasileiro[1,]
brasileiro[,2]
brasileiro[,"pontos"]
brasileiro[,c("pontos", "sg")]
brasileiro[1:3,c("pontos", "sg")]
brasileiro$pontos
#### Lista ####
X <- array(1:24, dim=c(3,4,2))
X
##
x<-1:20
x
L <- list(item1=x,
item2=X,
item3=brasileiro)
class(L)
mode(L)
typeof(L)
length(L) #mostra o numero de itens (cada item tem um objeto diferente)
str(L)
attributes(L)
is.data.frame(L)
is.list(L)
##como indexar
L$item1
L[[1]]
L$item3
L[["item3"]]
L[["item3"]][,3]
L[["item3"]][1,3]
max(L[["item3"]][,3])
L$item3[,"pontos"]
L[-2] # exlcuir o segundo item da lista
#######################
#### Funções ####
## Mostra informações da sessão, versões e pacotes.
sessionInfo()
## Mais informações.
cbind(Sys.info())
# Criando Funções
media = function(x=0,y=0){
result= (x + y)/2
return(result)
}
media(9,-10)
media()
negativo_ou_positivo = function(x){
if(x == 0){
print("nulo")
} else if (x < 0){
print("negativo")
} else {
print("positivo")
}
}
negativo_ou_positivo(3)
negativo_ou_positivo(-3)
negativo_ou_positivo(0)
negativo_ou_positivo(media(20,-30))
gerar_numero = function(valor = "nulo"){
if(valor == "nulo"){
return(0)
} else if (valor == "positivo"){
return(runif(n = 1,min = 1,max = 10))
} else if (valor == "negativo"){
return(-runif(n = 1,min = 1,max = 10))
}
}
gerar_numero("positivo")
gerar_numero("negativo")
sequencia = function(menor_numero, maior_numero, crescente = T){
if(crescente == TRUE){
while(menor_numero<=maior_numero){
print(menor_numero)
menor_numero = menor_numero + 1
}
}
if(crescente == FALSE){
while(menor_numero<=maior_numero){
print(maior_numero)
maior_numero = maior_numero - 1
}
}
}
sequencia(1,10,crescente = F)
sequencia(1,10,F)
# Obs.: Falar sobre pacotes
###################################
#### Carregando Banco de Dados ####
##Exemplo de milho pipoca
## Mostra o diretório de trabalho.
getwd()
setwd("C:\\Users\\Desktop\\...")
getwd()
## Mostra os arquivos dentro no diretório.
list.files()
####Importar arquivo de dados
pipoca<-read.table("pipoca.txt", header=TRUE, dec=".")
str(pipoca)
####
pipoca<-read.table("pipoca.txt", header=TRUE, dec=",", na.string="-", sep="\t")
str(pipoca)
pipoca$CE
mean(pipoca$CE)
mean(pipoca$CE, na.rm=T)
sd(pipoca$CE, na.rm=T)
pipoca1<-na.omit(pipoca)
## Selecionando as pipocas com CE maior que 30.
sel <- pipoca$CE>200; sel
pipoca[sel,]
## Selecionando as pipocas com CE maior que 200
#com peso maior ou igual a 70.
sel <- pipoca$CE>200 & pipoca$peso>=70
sel
pipoca[sel,]
## O operador ! inverte um vetor lógico.
!sel # como se multiplicar por -1
pipoca[!sel,]
sel <- pipoca$CE<200 & pipoca$peso<=70; sel
all(pipoca$CE>100) #todos
any(pipoca$CE<10) # pelo menos 1
any(pipoca$CE>10)
sum(pipoca$CE>200)
which(pipoca$CE>100)
############################
#### Conversão de Dados ####
## o que é um fator?
apropos("^as\\.") # lista de funções q fazem conversões
# a função apropos lista as funções (mais para os pacotes que já foram carregados)
#################
#### Atenção ####
## Nunca converter fator em numerico diretamente
# Factor==>character
# character==>numeric
## Conversão.
##################
## Salva a imagem (objetos criados) para serem carregados no futuro ou
## compartilhados.
save.image("Embrapa.RData")
rm(list=ls());ls()
## Carrega sessão.
load("Embrapa.RData")
ls() #carrega os objetos que ficou na memoria
##################
###### HELP ######
##################
#RSiteSearch ("qtls")# procura no site
#auto-completar + help
help(rep)
?rep
rep ##TAB + F1
??rep #lista pacote e função
#internet [R]
citation()
######
## Exercícios
## 3) Importar banco de dados "phosphorus.txt"...
# a) Qual a classe desse banco de dados?
# b) Quais os nomes das variáveis presentes nesse arquivo?
# c) Qual é o tipo de cada variável?
# d) Qual o valor da linha 5 e coluna 3?
# e) Transforme a coluna 2 em caracter.
# f) Transforme em numérico novamente.
#
## 4) Importar banco de dados "phosphorus.txt"...
# a) Qual a média de cada genotipo para cada variavel resposta?
# b) Subistituir os valores de zero na coluna 2 por 30.
##
|
8c8521674b73b6f3b5b76bb70b81cc89eaf852ac | e330c841b3f4e3f726dbc7e91bbb886d6186fc08 | /STUDY_INFO.R | e920f8f1fe14e3d58f545d4e1e7b87e2f66f097f | [] | no_license | crcox/SatelliteLearning | 64058c2ed933574da34c314b343a7cb3a4e5dba6 | 41080071612ead6555230ce1b4596e5930285b4c | refs/heads/master | 2020-05-30T00:26:50.687379 | 2015-03-08T23:48:42 | 2015-03-08T23:48:42 | 31,185,412 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 263 | r | STUDY_INFO.R | STUDY_INFO = list(
datadir="Spring2015/data"
# datadir="/Volumes/Data/Experiments/Behavioral/satelliteLearning/Spring2015/data"
#datadir="/mnt/RogersLab/Experiments/Behavioral/satelliteLearning/Spring2015/data"
)
save(STUDY_INFO, file='STUDY_INFO.Rdata')
|
6c45896445938aed7e73b2979506040678f11633 | 0ff06478c18026955ebf512cd3dcaef7293e1c30 | /R/hanabiPlot.R | 8998e18c30f07eb5c9041af58864db36e8c869ab | [
"CC0-1.0"
] | permissive | charles-plessy/smallCAGEqc | 83d19b21890eed9455eaca13c87455bd53f45950 | e3642f25b43314779c33388129b5d47a5a1538ec | refs/heads/master | 2021-03-13T01:36:47.956099 | 2018-01-25T04:27:20 | 2018-01-25T04:27:20 | 34,089,765 | 1 | 1 | null | 2017-03-22T05:47:31 | 2015-04-17T01:24:16 | R | UTF-8 | R | false | false | 8,294 | r | hanabiPlot.R | #' hanabi
#'
#' Rarefy data at multiple sample sizes, in preparation for plotting.
#'
#' The computation can be long, so the steps of rarefaction and plotting
#' are kept separate.
#'
#' @details The input must be a data frame, or a vector or a matrix, which
#' will be coerced into a matrix. The data must be counts (tag
#' counts, molecule counts, ...).
#'
#' @param expr_data An expression table where columns are samples and rows
#' are features such as genes, TSS, etc, or a vector of counts.
#' @param npoints The maximum number of rarefactions per sample.
#' @param step Subsample sizes are calculated by taking the largest sample
#' and multiplying it by the step "npoints" times.
#' @param from Add one sample size (typically "0") in order to extend the
#' plot on the left-hand side.
#'
#' @return A list-based object of class "hanabi".
#'
#' @family Hanabi functions
#' @seealso `[.hanabi`, as.list.hanabi, and vegan::rarecurve.
#'
#' @importFrom vegan rarefy
#' @export hanabi
#'
#' @examples
#'
#' bedFiles <- system.file(package = "smallCAGEqc", "extdata") %>%
#' list.files("*BED", full.names = TRUE)
#' bed <- loadBED12(bedFiles)
#' rar <- tapply(bed$score, bed$library, hanabi, from = 0) %>%
#' structure(class = "hanabi") # tapply discards the class !
#' hanabiPlot(rar, GROUP = levels(bed$library))
hanabi <- function( expr_data
, npoints = 20
, step = 0.75
, from = NULL) {
if (is.null(dim(expr_data)) & is.integer(expr_data))
expr_data %<>% data.frame
if (is.matrix(expr_data)) expr_data %<>% data.frame
if (! is.data.frame(expr_data)) stop("Input must be a data frame.")
ns <- step ^ (0:npoints)
ns <- round(max(colSums(expr_data)) * ns)
if (! is.null(from))
ns <- c( ns[ns > from], from)
nraref <- function(lib) {
ntags <- sum(lib)
ns <- c(ntags, ns[ns < ntags])
rarefy(lib, ns) %>% xy.coords(x = ns)
}
x <- lapply(expr_data, nraref)
structure(x, class = "hanabi")
}
as.list.hanabi <- function(h)
unclass(h)
`[.hanabi` <- function(h, i)
structure(as.list(h)[i], class = "hanabi")
#' points.hanabi
#'
#' Add a final point in hanabi plots.
#'
#' Will only add a point for the final, non-subsampled value of each
#' sample of in a hanabi object.
#'
#' @param h The hanabi object.
#' @param ... Other parameters passed to the generic points function
#'
#' @family Hanabi functions
#' @seealso hanabi, plot.hanabi
#'
#' @export points.hanabi lines.hanabi
points.hanabi <- function(h, ...) {
xmax <- sapply(h, function(x) max(x$x))
ymax <- sapply(h, function(x) max(x$y))
points(xmax, ymax, ...)
}
lines.hanabi <- function(h, ...) {
Map(lines, h, ...) %>% invisible
}
#' plot.hanabi
#'
#' Plotting Hanabi objects
#'
#' @param h The hanabi object to plot.
#' @param alpha The alpha transparency of the plot lines.
#' @param col A vector indicating a color per sample (or a vector that
#' can be recycled that way).
#' @param xlab Horizontal axis label.
#' @param ylab Vertical axis label.
#' @param main Plot title.
#' @param pch Plot character at the tip of the lines.
#' @param ... other arguments passed to the generic plot function.
#'
#' @family Hanabi functions
#' @seealso hanabi
#'
#' @export plot.hanabi
plot.hanabi <-
function( h
, alpha = 0.5
, col = "black"
, xlab = "Total counts"
, ylab = "Unique features"
, main = "Hanabi plot"
, pch = 1
, ...) {
xmax <- sapply(h, function(x) max(x$x))
xmin <- sapply(h, function(x) min(x$x))
ymax <- sapply(h, function(x) max(x$y))
ymin <- sapply(h, function(x) min(x$y))
# Accessory function to make the lines a little transparent.
# See https://gist.github.com/mages/5339689#file-add-alpha-r
add.alpha <- function(col, alpha)
apply( sapply(col, col2rgb) / 255
, 2
, function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
plot( c(min(xmin), max(xmax))
, c(min(ymin), max(ymax))
, type="n"
, xlab = xlab
, ylab = ylab
, main = main
, ...)
lines( h
, col = add.alpha(col, alpha))
points( h
, col = col
, pch = pch)
}
#' hanabiPlot
#'
#' Plot feature discovery curves
#'
#' Plots the number of features (genes, transcripts, ...) detected for a
#' given number of counts (reads, unique molecules, ...). Each library is
#' sub-sampled by rarefaction at various sample sizes, picked to provide
#' enough points so that the curves look smooth. The final point is plotted
#' as an open circle, hence the name "hanabi", which means fireworks in
#' Japanese.
#'
#' The rarefactions take time to do, so this step is done by a separate
#' function, so that the result is easily cached.
#'
#' @param RAR A rarefaction table, or a hanabi object.
#' @param S A vector of subsample sizes.
#' @param GROUP A vector grouping the samples. Coerced to factor.
#' @param ... Further arguments to be passed to the first plot function,
#' that plots the empty frame.
#' @param legend.pos Position of the legend, passed as "x" parameter to the
#' "legend" function.
#' @param pch Plot character at the tip of the lines.
#' @param col A vector of colors
#'
#' @seealso vegan, plot.hanabi, hanabi
#'
#' @examples
#' \dontrun{
#' hanabi(genes, npoints = 20, step = 0.8, from = 0) %>% hanabiPlot
#' hanabi(genes, npoints = 20, step = 0.9) %>% hanabiPlot
#' }
#' bedFiles <- system.file(package = "smallCAGEqc", "extdata") %>%
#' list.files("*BED", full.names = TRUE)
#' bed <- loadBED12(bedFiles)
#' rar <- tapply(bed$score, bed$library, hanabi, from = 0) %>%
#' structure(class = "hanabi") # tapply discards the class !
#' hanabiPlot(rar, GROUP = levels(bed$library))
#' hanabiPlot(rar, GROUP = levels(bed$library), col=c("red", "green", "blue"))
#' hanabiPlot(rar, col="purple")
#'
#' @family Hanabi functions
#'
#' @importFrom vegan rarefy
#' @export hanabiPlot
hanabiPlot <- function ( RAR, S, GROUP=NULL
, legend.pos = "topleft", pch = 1
, col = "black", ...) {
# Accessory function to make the lines a little transparent.
# See https://gist.github.com/mages/5339689#file-add-alpha-r
add.alpha <- function(col, alpha=1)
apply( sapply(col, col2rgb) / 255
, 2
, function(x)
rgb(x[1], x[2], x[3], alpha=alpha))
# Coerce GROUP to factor
if (! is.null(GROUP)) GROUP %<>% factor
# Take user-provided color, or take group levels as colors.
# col is the list of colors
# cols is the color of each sample
cols <- col
if (missing(col) & ! is.null(GROUP)) {
col <- 1:nlevels(GROUP)
cols <- as.numeric(GROUP)
} else {
cols <- col[as.numeric(GROUP)]
}
if (class(RAR) == "hanabi") {
plot(RAR, pch = pch, col = cols, ...)
if (! is.null(GROUP)) {
legend( x = legend.pos
, legend = levels(GROUP)
, col = col
, pch = pch)
}
return(invisible())
}
warning(c( "Running hanabiPlot on non-hanabi objects is deprecated\n"
, "This will be removed in after smallCAGEqc 1.0."))
# Accessory function to prepare an empty frame.
emptyFrame <- function ()
plot( range(S)
, range(RAR, na.rm = TRUE)
, type='n'
, ...)
# Accessory function to plot the lines.
rarLines <- function (X)
lines( S
, RAR[X,]
, col=add.alpha(cols[X],0.5))
# Eliminate data points past a cell's sampling size.
# Insert NAs everytime the subsampled number of genes
# equals the total number of genes (since it means that
# subsampling size was larger than real sampling size),
# except for the first occurence, which is the last
# point of the curve.
shiftTrueLeft <- function(TABLE) {
n <- ncol(TABLE)
TABLE <- cbind(F, TABLE)
TABLE[, 1:n]
}
RAR[shiftTrueLeft(RAR==apply(RAR, 1, max))] <- NA
emptyFrame()
sapply( 1:nrow(RAR), rarLines)
points( x = apply(RAR, 1, function(X) max(S[!is.na(X)])) # sampling sizes for each cell
, y = apply(RAR, 1, max, na.rm=T) # num. of detected feat. at max. sampl. size
, col=cols)
}
|
29a499995a214a6d86c4b697cffb650375eafb02 | 6249a9849904a3d584ffd4b4b0fd602d842a8d57 | /rhandsontable/rhandsontable editable table/ui.R | c5e7dd5984042a2f26fabe702b416d3621793163 | [] | no_license | kumar-sameer/R-Shinyapp-Tutorial | 737da46ffeb48db27326ed4876f58340e595d7c0 | 2d30b20212c1f1bf19f96ba8b319b110344fb816 | refs/heads/master | 2020-03-18T10:29:30.453843 | 2018-05-22T08:24:53 | 2018-05-22T08:24:53 | 134,615,528 | 0 | 1 | null | 2018-05-23T19:19:48 | 2018-05-23T19:19:48 | null | UTF-8 | R | false | false | 849 | r | ui.R | library(shiny)
# install.packages("rhandsontable") # install the package
library(rhandsontable) # load the package
shinyUI(fluidPage(
fluidRow(
titlePanel(title = "Demo rhandsontable & shiny - example app - editable data table"),
h4("rhandsontable is an htmlwidget wrapper package based on the handsontable.js library created by Jonathan Owen. It produces a data grid with excel like appearance and features. "),
h4("# how to create a rhandsontable object and use in shiny"),
h4("# renderRHandsontable()"),
h4("# rHandsontableOutput()"),
hr(),
column(4,
helpText("non-editable table"),
tableOutput("table1")),
column(4,
helpText("editable table"),
rHandsontableOutput("table"),
br(),
actionButton("saveBtn","Save"))
)
)) |
1318658eb042f83474cd945fdb45a7a77a50c0c3 | 81c98978bcff538fa2343e6bf9320293c7abda27 | /lab1/part2_ggplot.R | 48a411cf6e5d86fd436e2661c9a16a1130322603 | [] | no_license | aniksh/DataAnalytics2021_Anik_Saha | 2f654312e6ca047e0a1c173b5c062a7293542765 | a2911893522b3ca05621acb4c87d73b92954b6b1 | refs/heads/master | 2023-03-05T10:43:35.642008 | 2021-02-18T21:22:53 | 2021-02-18T21:22:53 | 333,891,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,770 | r | part2_ggplot.R | # mtcars dataset
plot(mtcars$wt,mtcars$mpg)
library(ggplot2)
qplot(mtcars$wt,mtcars$mpg)
qplot(wt, mpg, data = mtcars)
ggplot(mtcars, aes(x=wt,y=mpg)) + geom_point()
# pressure dataset
plot(pressure$temperature, pressure$pressure, type = "l")
plot(pressure$temperature, pressure$pressure)
lines(pressure$temperature, pressure$pressure/2, col = "red")
points(pressure$temperature, pressure$pressure/2, col = "blue")
qplot(pressure$temperature, pressure$pressure, geom="line")
qplot(temperature, pressure, data = pressure, geom = "line")
ggplot(pressure, aes(x=temperature,y=pressure)) + geom_line() + geom_point()
# Creating bar graphs
barplot(BOD$demand, names.arg = BOD$Time)
table(mtcars$cyl)
barplot(table(mtcars$cyl))
qplot(mtcars$cyl)
qplot(factor(mtcars$cyl))
qplot(factor(cyl), data = mtcars)
ggplot(mtcars, aes(x=factor(cyl))) + geom_bar()
# Creating Histogram
hist(mtcars$mpg)
hist(mtcars$mpg, breaks = 10)
hist(mtcars$mpg, breaks = 5)
hist(mtcars$mpg, breaks = 12)
qplot(mpg, data = mtcars, binwidth=4)
ggplot(mtcars, aes(x=mpg)) + geom_histogram(binwidth=4)
ggplot(mtcars, aes(x=mpg)) + geom_histogram(binwidth=5)
# Creating Box-plot
plot(ToothGrowth$supp, ToothGrowth$len)
boxplot(len ~ supp, data = ToothGrowth)
boxplot(len ~ supp + dose, data = ToothGrowth) # Interaction two separate variables on x-axis
qplot(ToothGrowth$supp, ToothGrowth$len, geom = "boxplot")
qplot(supp, len, data = ToothGrowth, geom = "boxplot")
ggplot(ToothGrowth, aes(x=supp, y=len)) + geom_boxplot()
# Interaction of three separate vectors
qplot(interaction(ToothGrowth$supp, ToothGrowth$dose), ToothGrowth$len, geom="boxplot")
qplot(interaction(supp,dose), len, data = ToothGrowth, geom = "boxplot")
ggplot(ToothGrowth, aes(x=interaction(supp,dose), y=len)) + geom_boxplot()
|
18688ead611b97ef50fe081769e34f5177068de1 | e9a14f3d06022ffda16ad454486d7f1f79788bc6 | /Rcpp_and_armadillo/app_roll_portf.R | 58cbba3d0be633258aecfc5ec1a15612c128766b | [] | no_license | Karagul/Algorithmic-Portfolio-Management-in-R-programming-language | 19b146847a3c003e290566b35746e339c0706449 | cbcaecb49659a33a5d3b1b4b70467fe072344331 | refs/heads/master | 2020-06-19T20:28:18.149882 | 2018-08-19T06:02:57 | 2018-08-19T06:02:57 | 196,860,196 | 1 | 0 | null | 2019-07-14T16:37:21 | 2019-07-14T16:37:21 | null | UTF-8 | R | false | false | 2,703 | r | app_roll_portf.R | ##############################
# This is a shiny app for simulating rolling portfolio
# optimization strategies, which produces an interactive
# dygraphs plot.
# Just press the "Run App" button on upper right of this panel.
##############################
## Below is the setup code that runs once when the shiny app is started
# load packages
library(shiny)
library(dygraphs)
library(rutils)
# Model and data setup
# source the model function
source("C:/Develop/R/lecture_slides/scripts/roll_portf.R")
max_eigen <- 2
sym_bols <- colnames(rutils::env_etf$re_turns)
sym_bols <- sym_bols[!(sym_bols=="VXX")]
n_weights <- NROW(sym_bols)
re_turns <- rutils::env_etf$re_turns[, sym_bols]
re_turns <- zoo::na.locf(re_turns)
re_turns <- na.omit(re_turns)
risk_free <- 0.03/260
ex_cess <- re_turns - risk_free
# calculate equal weight portfolio
equal_portf <- cumsum(re_turns %*% rep(1/sqrt(NCOL(re_turns)), NCOL(re_turns)))
# Define end_points
end_points <- rutils::calc_endpoints(re_turns, inter_val="months")
end_points <- end_points[end_points>50]
len_gth <- NROW(end_points)
# End setup code
## Define elements of the UI user interface
inter_face <- shiny::shinyUI(fluidPage(
titlePanel("Max Sharpe Strategy"),
sidebarLayout(
sidebarPanel(
# Define look_back interval
sliderInput("look_back", label="lookback interval:",
min=6, max=30, value=12, step=1),
# Define the shrinkage intensity
sliderInput("al_pha", label="shrinkage intensity alpha:",
min=0.01, max=0.99, value=0.1, step=0.05)
),
mainPanel(
dygraphOutput("dygraph")
)
)
)) # end shinyUI interface
## Define the server code
ser_ver <- shiny::shinyServer(function(input, output) {
# Re-calculate the data and rerun the model
da_ta <- reactive({
# get model parameters from input
look_back <- input$look_back
al_pha <- input$al_pha
# define start_points
start_points <- c(rep_len(1, look_back-1), end_points[1:(len_gth-look_back+1)])
# rerun the model
strat_rets <- cbind(
roll_portf_r(ex_cess, re_turns, start_points, end_points, al_pha, max_eigen),
equal_portf) # end cbind
colnames(strat_rets) <- c("strat_rets", "equal weight")
strat_rets
}) # end reactive code
# Create the output plot
output$dygraph <- renderDygraph({
dygraph(da_ta(), main="Max Sharpe Strategy") %>%
dySeries("strat_rets", label="max Sharpe", strokeWidth=1, color=c("blue", "red"))
}) # end output plot
}) # end server code
## Return a Shiny app object
shiny::shinyApp(ui=inter_face, server=ser_ver)
|
28951ba5e475c4532389ee26d186804c3af0fac0 | 06eff7cef9e88eaad3d9f128efd509d67c7cef87 | /man/to_ChemoSpec.Rd | 5cb0c57d4303ce8247bd7899ae850d6b6c909107 | [
"MIT"
] | permissive | anastasiyaprymolenna/AlpsNMR | 384ef42297232710175b0d25382b4247697a04e5 | c870f9aa07b4a47646f742a13152ca747608f01d | refs/heads/master | 2022-12-03T10:48:41.554384 | 2020-08-26T14:43:34 | 2020-08-26T14:43:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,573 | rd | to_ChemoSpec.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{to_ChemoSpec}
\alias{to_ChemoSpec}
\title{Convert to ChemoSpec Spectra class}
\usage{
to_ChemoSpec(nmr_dataset, desc = "A nmr_dataset")
}
\arguments{
\item{nmr_dataset}{An \link{nmr_dataset_1D} object}
\item{desc}{a description for the dataset}
}
\value{
A Spectra object from the ChemoSpec package
}
\description{
Convert to ChemoSpec Spectra class
}
\examples{
dir_to_demo_dataset <- system.file("dataset-demo", package = "AlpsNMR")
dataset <- nmr_read_samples_dir(dir_to_demo_dataset)
dataset_1D <- nmr_interpolate_1D(dataset, axis = c(min = -0.5, max = 10, by = 2.3E-4))
chemo_spectra <- to_ChemoSpec(dataset_1D)
}
\seealso{
Other import/export functions:
\code{\link{Pipelines}},
\code{\link{files_to_rDolphin_blood}()},
\code{\link{files_to_rDolphin_cell}()},
\code{\link{files_to_rDolphin_urine}()},
\code{\link{load_and_save_functions}},
\code{\link{nmr_data}()},
\code{\link{nmr_meta_export}()},
\code{\link{nmr_read_bruker_fid}()},
\code{\link{nmr_read_samples}()},
\code{\link{nmr_zip_bruker_samples}()},
\code{\link{save_files_to_rDolphin}()},
\code{\link{save_profiling_output}()}
Other nmr_dataset_1D functions:
\code{\link{[.nmr_dataset_1D}()},
\code{\link{computes_peak_width_ppm}()},
\code{\link{file_lister}()},
\code{\link{files_to_rDolphin_blood}()},
\code{\link{files_to_rDolphin_cell}()},
\code{\link{files_to_rDolphin_urine}()},
\code{\link{format.nmr_dataset_1D}()},
\code{\link{is.nmr_dataset_1D}()},
\code{\link{load_and_save_functions}},
\code{\link{new_nmr_dataset_1D}()},
\code{\link{nmr_align_find_ref}()},
\code{\link{nmr_baseline_removal}()},
\code{\link{nmr_baseline_threshold}()},
\code{\link{nmr_exclude_region}()},
\code{\link{nmr_integrate_regions}()},
\code{\link{nmr_interpolate_1D}()},
\code{\link{nmr_meta_add}()},
\code{\link{nmr_meta_export}()},
\code{\link{nmr_meta_get_column}()},
\code{\link{nmr_meta_get}()},
\code{\link{nmr_normalize}()},
\code{\link{nmr_pca_build_model}()},
\code{\link{nmr_pca_outliers_filter}()},
\code{\link{nmr_pca_outliers_plot}()},
\code{\link{nmr_pca_outliers_robust}()},
\code{\link{nmr_pca_outliers}()},
\code{\link{nmr_ppm_resolution}()},
\code{\link{plot.nmr_dataset_1D}()},
\code{\link{plot_webgl}()},
\code{\link{print.nmr_dataset_1D}()},
\code{\link{rdCV_PLS_RF_ML}()},
\code{\link{rdCV_PLS_RF}()},
\code{\link{save_files_to_rDolphin}()},
\code{\link{validate_nmr_dataset_peak_table}()},
\code{\link{validate_nmr_dataset}()}
}
\concept{import/export functions}
\concept{nmr_dataset_1D functions}
|
a2a02c472625de578c0ed7b5976f6df121020a6b | 31b542164a8c46473af45d6dfd591c18191e8f70 | /mkt-project/src/R/Scripts/trim_wiki_eod.R | ebd1ef51282cd022385a84ba5c751942c5b66f0a | [] | no_license | saridhi/fin-ml | 4fc45dda275b4df436ffd574173ebaa5d65e5e19 | a5d60c20336807e1b388bbe12898e6452c729d76 | refs/heads/master | 2022-11-29T16:31:33.758742 | 2020-05-02T11:52:00 | 2020-05-02T11:52:00 | 161,928,872 | 2 | 0 | null | 2022-11-21T21:10:12 | 2018-12-15T17:34:50 | JavaScript | UTF-8 | R | false | false | 580 | r | trim_wiki_eod.R |
all_ts <- lapply(as.vector(trim_tickers), function(x) {
print(x)
tryCatch({
getTimeSeries(dr, ticker=x)
}, error = function(e) {
return (NULL)
})
})
dates <- index(ts)
#Script to trim wiki_eod.csv
ticker_list <- lapply(dates, function(x) {
all_volumes <- csv_contents[csv_contents$date==as.character(x),"adj_volume"]
all_tickers <- csv_contents[csv_contents$date==as.character(x),"ticker"]
ticker_index <- which(all_volumes %in% sort(as.numeric(all_volumes), decreasing = TRUE)[1:10])
print(x)
all_tickers[ticker_index]
})
series.volumes <- lapply()
|
e0411b9af592ad97d4a260bf2433bf5db9ac3ad2 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.compute/man/ec2_replace_route_table_association.Rd | 8d620987b6095ba668a096124bd99685c603391f | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,348 | rd | ec2_replace_route_table_association.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_replace_route_table_association}
\alias{ec2_replace_route_table_association}
\title{Changes the route table associated with a given subnet, internet
gateway, or virtual private gateway in a VPC}
\usage{
ec2_replace_route_table_association(AssociationId, DryRun = NULL, RouteTableId)
}
\arguments{
\item{AssociationId}{[required] The association ID.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{RouteTableId}{[required] The ID of the new route table to associate with the subnet.}
}
\description{
Changes the route table associated with a given subnet, internet gateway, or virtual private gateway in a VPC. After the operation completes, the subnet or gateway uses the routes in the new route table. For more information about route tables, see \href{https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Route_Tables.html}{Route tables} in the \emph{Amazon VPC User Guide}.
See \url{https://www.paws-r-sdk.com/docs/ec2_replace_route_table_association/} for full documentation.
}
\keyword{internal}
|
2723036b82437dddaba7bab031ca7cbfd904868b | cf3f5c4af0ea3b3b80d582020ceccd7efa4b4960 | /Capstone Project/Shiny app Source code/ui.R | 33054241b2915abc8a49c53334875b654f7271b6 | [] | no_license | Mrugankakarte/Coursera-Projects | 2b2f62e115598504bc3f0be4d60c78ea965ae95a | edf36e0d21aeaf1d7b647ec16279d0a64a76e477 | refs/heads/master | 2021-01-16T18:02:22.507888 | 2018-10-20T14:33:12 | 2018-10-20T14:33:12 | 78,918,933 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,511 | r | ui.R | # ui.R
library(shinythemes)
fluidPage(
theme = shinytheme("slate"),
navbarPage(title = "Word Prediction!",
tabPanel("Home",
h4("This application was developed for Capstone Project of Data Science Specialization offered by John Hopkins University."),
h4("*Please read the ReadME section under the tab 'More..")
), # home tab
tabPanel("Prediction",
sidebarLayout(
sidebarPanel(
wellPanel(
textInput("input_vec", label = "Enter text :", value = "")
)
), #edit sidebarpanel
mainPanel(
h3("Predictions"),
dataTableOutput("table")
) #edit mainpanel
) # sidebarlayout
), #prediction tab
navbarMenu("More...",
tabPanel("ReadME",
includeMarkdown("ReadME.Rmd")
)
) #more tab
) #navbarpage
) # fluid page |
eaba325b635dcf13c28a69802b1501f56b3d39a4 | 3fbd06f8bd2bd8c23e2ed0d630a0687828d9de5f | /InfluentialScores_SensitivityAnalysis/SingleNodeRemove_GRNAddLink_001To020.r | 6ef24110df05b5c0d029fef44f23dbbd58b9933b | [] | no_license | anu-bioinfo/Robust-Multilyer-BioNetworks | a2ae6b79541282761f7d757eaf94a99e90c06994 | 9e4ea0c595439f4f910ac51baf44c40f193b0f36 | refs/heads/master | 2022-11-30T19:02:38.771423 | 2020-08-07T02:30:24 | 2020-08-07T02:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,308 | r | SingleNodeRemove_GRNAddLink_001To020.r | # Sensitivity analysis after add a fraction (0.01:0.01:0.2) of links in the GRN
# Output: the final functional network size in the PPI network
library(igraph)
#g_PPI_Ori <- read.graph("PPI_Net_Num.txt", format = c("edgelist"))
g_PPI_Ori <- read.graph("PPI_NoSelfLoop_WCC.txt", format = c("edgelist"))
wccb <- clusters(g_PPI_Ori, mode="weak")
largest_wcc_vb <- which(wccb$membership == which.max(wccb$csize))
OriFailNodes <- setdiff(V(g_PPI_Ori), largest_wcc_vb)
g_PPI <- delete.edges(g_PPI_Ori, E(g_PPI_Ori)[V(g_PPI_Ori)[OriFailNodes] %--% V(g_PPI_Ori)])
N_PPI <- length(largest_wcc_vb)
PPI_Nodes <- largest_wcc_vb
g_PPI_Read <- g_PPI
PPI_Deg <- degree(g_PPI, V(g_PPI), mode = c("all"))
PPI_Isolates <- which(PPI_Deg==0)
g_GRN_ori <- read.graph("Gene_Net_Num.txt", format = c("edgelist"))
for(i_Rep in 1:100)
{
for(Add_Frac in (1:20)/100)
{
g_ori <- g_GRN_ori
GRN_Deg <- degree(g_ori, V(g_ori), mode = c("all"))
GRN_OutDeg <- degree(g_ori, V(g_ori), mode = c("out"))
TF_Nodes <- which(GRN_OutDeg!=0)
GRN_Nodes <- which(GRN_Deg!=0)
N_GRN_link <- length(E(g_ori))
Add_Num <- 1
while(Add_Num<=round(N_GRN_link*Add_Frac)) {
Nodes_ids <- igraph.sample(1, length(GRN_Nodes), 1)
Pick_TargetNodes <- GRN_Nodes[Nodes_ids]
Nodes_ids2 <- igraph.sample(1, length(TF_Nodes), 1)
Pick_SourceNodes <- TF_Nodes[Nodes_ids2]
if(Pick_TargetNodes!=Pick_SourceNodes)
{
AConnectC <- which(Pick_SourceNodes %in% as_ids(neighbors(g_ori, Pick_TargetNodes, mode=c("in"))))
if(length(AConnectC)==0)
{
g_ori <- add_edges(g_ori, c(Pick_SourceNodes, Pick_TargetNodes))
Add_Num <- Add_Num + 1
}
}
}
G_GRN_Read <- g_ori
Gene_Deg <- degree(g_ori, V(g_ori), mode = c("all"))
Isolates <- union(which(Gene_Deg==0),which(PPI_Deg==0))
N_gene <- length(V(g_ori))-length(which(Gene_Deg==0))
Gene_Nodes <- setdiff(V(g_ori),which(Gene_Deg==0))
num_gfunc_max <- 10
pmax <- length(V(g_ori))
Siz_funca <- array(0, dim = c(pmax, num_gfunc_max))
Siz_funcb <- array(0, dim = c(pmax, num_gfunc_max))
Size_FuncGene <- array(rep(0,pmax*(1)), dim=c(pmax,1))
Size_FuncPPI <- array(rep(0,pmax*(1)), dim=c(pmax,1))
for(p_num in 1:pmax)
{
g_ori <- G_GRN_Read
g_PPI <- g_PPI_Read
Del_Ver1 <- p_num
Del_Ver2 <- neighborhood(g_ori, 1, nodes=Del_Ver1, "out")
Del_Ver <- unique(unlist(Del_Ver2))
Rand_Ver <- setdiff(V(g_ori),Del_Ver)
g_ori <- delete.edges(g_ori, E(g_ori)[V(g_ori)[Del_Ver] %--% V(g_ori)])
Fail2PPI <- setdiff(Del_Ver, intersect(Del_Ver, Isolates))
Siz_funca[p_num, 1] <- N_gene
Siz_funcb[p_num, 1] <- N_PPI
####Degrees distribution of the original network
EROri_Deg <- degree(g_ori, V(g_ori), mode = c("all"))
Siz_funca[p_num, 2] <- length(which(EROri_Deg!=0))
####### PPI
Del_Vertices_ori <- which(EROri_Deg==0)
Del_Vertices <- setdiff(Del_Vertices_ori, Isolates)
g_PPI <- delete.edges(g_PPI, E(g_PPI)[V(g_PPI)[Del_Vertices] %--% V(g_PPI)])
wccb <- clusters(g_PPI, mode="weak")
largest_wcc_vb <- which(wccb$membership == which.max(wccb$csize))
Siz_funcb[p_num, 2] <- length(largest_wcc_vb)
k_while <- 2
while((Siz_funca[p_num, k_while]!=Siz_funca[p_num, k_while-1]) || (Siz_funcb[p_num, k_while]!=Siz_funcb[p_num, k_while-1])) {
k_while <- k_while+1
Del_Ver0 <- setdiff(V(g_PPI), largest_wcc_vb)
Del_Ver1 <- setdiff(Del_Ver0, intersect(Del_Ver0, Isolates))
Del_Ver2 <- neighborhood(g_ori, 1, nodes=Del_Ver1, "out")
Del_Ver <- unique(unlist(Del_Ver2))
g_ori <- delete.edges(g_ori, E(g_ori)[V(g_ori)[Del_Ver] %--% V(g_ori)])
EROri_Deg <- degree(g_ori, V(g_ori), mode = c("all"))
Siz_funca[p_num, k_while] <- length(which(EROri_Deg!=0))
####### PPI
Del_Vertices_ori <- which(EROri_Deg==0)
Del_Vertices <- setdiff(Del_Vertices_ori, Isolates)
g_PPI <- delete.edges(g_PPI, E(g_PPI)[V(g_PPI)[Del_Vertices] %--% V(g_PPI)])
wccb <- clusters(g_PPI, mode="weak")
largest_wcc_vb <- which(wccb$membership == which.max(wccb$csize))
Siz_funcb[p_num, k_while] <- length(largest_wcc_vb)
}
Size_FuncGene[p_num] <- Siz_funca[p_num, k_while]
Size_FuncPPI[p_num] <- Siz_funcb[p_num, k_while]
}
Gene_PPI_Nodes <- intersect(which(Gene_Deg!=0),which(PPI_Deg!=0))
PPIFunc_NonIso <- Size_FuncPPI[Gene_PPI_Nodes]
PPI_Nodes_Robust <- array(rep(0,2*length(Gene_PPI_Nodes)),dim=c(length(Gene_PPI_Nodes),2))
PPI_Nodes_Robust[,1] <- Gene_PPI_Nodes[order(PPIFunc_NonIso)]
PPI_Nodes_Robust[,2] <- sort(PPIFunc_NonIso)
FileName1 <- paste("AddLinkFrac",Add_Frac*100, sep="")
FileName2 <- paste(FileName1,"_Rep", sep="")
FileName3 <- paste(FileName2,i_Rep+3, sep="")
FileName4 <- paste(FileName3, "_CoupledPPI_Robust.txt", sep="")
write.table(PPI_Nodes_Robust, FileName4, row.names = FALSE, col.names = FALSE)
}
}
|
e1181b31b2606e71eefa34de637304676c15f1bd | e5fcf1aeff9cbfab3f19a862c7d80145ce68dbe4 | /R/plots.confints.bootpls.R | 538c4661313cc98942975210c026f8780dfa9efe | [] | no_license | fbertran/plsRglm | 7d7294101829065f4d1672d26af42b09b577464a | 058296cd0c1e1488265b87573d524a61f538809b | refs/heads/master | 2023-04-08T02:43:19.921763 | 2023-03-14T22:28:43 | 2023-03-14T22:28:43 | 18,454,150 | 16 | 6 | null | 2021-03-14T15:41:24 | 2014-04-04T22:09:56 | R | UTF-8 | R | false | false | 9,910 | r | plots.confints.bootpls.R | #' Plot bootstrap confidence intervals
#'
#' This function plots the confidence intervals derived using the function
#' \code{confints.bootpls} from from a \code{bootpls} based object.
#'
#'
#' @param ic_bootobject an object created with the \code{confints.bootpls}
#' function.
#' @param indices vector of indices of the variables to plot. Defaults to
#' \code{NULL}: all the predictors will be used.
#' @param legendpos position of the legend as in
#' \code{\link[graphics:legend]{legend}}, defaults to \code{"topleft"}
#' @param prednames do the original names of the predictors shall be plotted ?
#' Defaults to \code{TRUE}: the names are plotted.
#' @param articlestyle do the extra blank zones of the margin shall be removed
#' from the plot ? Defaults to \code{TRUE}: the margins are removed.
#' @param xaxisticks do ticks for the x axis shall be plotted ? Defaults to
#' \code{TRUE}: the ticks are plotted.
#' @param ltyIC lty as in \code{\link[graphics:plot]{plot}}
#' @param colIC col as in \code{\link[graphics:plot]{plot}}
#' @param typeIC type of CI to plot. Defaults to \code{typeIC=c("Normal",
#' "Basic", "Percentile", "BCa")} if BCa intervals limits were computed and to
#' \code{typeIC=c("Normal", "Basic", "Percentile")} otherwise.
#' @param las numeric in 0,1,2,3; the style of axis labels. 0: always parallel
#' to the axis [default], 1: always horizontal, 2: always perpendicular to the
#' axis, 3: always vertical.
#' @param mar A numerical vector of the form \code{c(bottom, left, top, right)}
#' which gives the number of lines of margin to be specified on the four sides
#' of the plot. The default is \code{c(5, 4, 4, 2) + 0.1.}
#' @param mgp The margin line (in mex units) for the axis title, axis labels
#' and axis line. Note that \code{mgp[1]} affects title whereas \code{mgp[2:3]}
#' affect axis. The default is \code{c(3, 1, 0)}.
#' @param \dots further options to pass to the
#' \code{\link[graphics:plot]{plot}} function.
#' @return \code{NULL}
#' @author Frédéric Bertrand\cr
#' \email{frederic.bertrand@@utt.fr}\cr
#' \url{https://fbertran.github.io/homepage/}
#' @seealso \code{\link{confints.bootpls}}
#' @keywords regression models
#' @examples
#'
#' data(Cornell)
#' modpls <- plsR(Y~.,data=Cornell,3)
#'
#' # Lazraq-Cleroux PLS (Y,X) bootstrap
#' set.seed(250)
#' Cornell.bootYX <- bootpls(modpls, R=250, verbose=FALSE)
#' temp.ci <- confints.bootpls(Cornell.bootYX,2:8)
#'
#' plots.confints.bootpls(temp.ci)
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
#' main="Bootstrap confidence intervals for the bj")
#' plots.confints.bootpls(temp.ci,indices=1:3,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,c(2,4,6),"bottomright")
#' plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
#' main="Bootstrap confidence intervals for some of the bj")
#'
#' temp.ci <- confints.bootpls(Cornell.bootYX,typeBCa=FALSE)
#' plots.confints.bootpls(temp.ci)
#' plots.confints.bootpls(temp.ci,2:8)
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#'
#'
#' # Bastien CSDA 2005 (Y,T) bootstrap
#' Cornell.boot <- bootpls(modpls, typeboot="fmodel_np", R=250, verbose=FALSE)
#' temp.ci <- confints.bootpls(Cornell.boot,2:8)
#'
#' plots.confints.bootpls(temp.ci)
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
#' main="Bootstrap confidence intervals for the bj")
#' plots.confints.bootpls(temp.ci,indices=1:3,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,c(2,4,6),"bottomright")
#' plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
#' main="Bootstrap confidence intervals for some of the bj")
#'
#' temp.ci <- confints.bootpls(Cornell.boot,typeBCa=FALSE)
#' plots.confints.bootpls(temp.ci)
#' plots.confints.bootpls(temp.ci,2:8)
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#'
#'
#' \donttest{
#' data(aze_compl)
#' modplsglm <- plsRglm(y~.,data=aze_compl,3,modele="pls-glm-logistic")
#'
#' # Lazraq-Cleroux PLS (Y,X) bootstrap
#' # should be run with R=1000 but takes much longer time
#' aze_compl.bootYX3 <- bootplsglm(modplsglm, typeboot="plsmodel", R=250, verbose=FALSE)
#' temp.ci <- confints.bootpls(aze_compl.bootYX3)
#'
#' plots.confints.bootpls(temp.ci)
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
#' main="Bootstrap confidence intervals for the bj")
#' plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,c(2,4,6),"bottomleft")
#' plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
#' main="Bootstrap confidence intervals for some of the bj")
#' plots.confints.bootpls(temp.ci,indices=1:34,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE,ltyIC=1,colIC=c(1,2))
#'
#' temp.ci <- confints.bootpls(aze_compl.bootYX3,1:34,typeBCa=FALSE)
#' plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE)
#'
#'
#' # Bastien CSDA 2005 (Y,T) Bootstrap
#' # much faster
#' aze_compl.bootYT3 <- bootplsglm(modplsglm, R=1000, verbose=FALSE)
#' temp.ci <- confints.bootpls(aze_compl.bootYT3)
#'
#' plots.confints.bootpls(temp.ci)
#' plots.confints.bootpls(temp.ci,typeIC="Normal")
#' plots.confints.bootpls(temp.ci,typeIC=c("Normal","Basic"))
#' plots.confints.bootpls(temp.ci,typeIC="BCa",legendpos="bottomleft")
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,prednames=FALSE,articlestyle=FALSE,
#' main="Bootstrap confidence intervals for the bj")
#' plots.confints.bootpls(temp.ci,indices=1:33,prednames=FALSE)
#' plots.confints.bootpls(temp.ci,c(2,4,6),"bottomleft")
#' plots.confints.bootpls(temp.ci,c(2,4,6),articlestyle=FALSE,
#' main="Bootstrap confidence intervals for some of the bj")
#' plots.confints.bootpls(temp.ci,prednames=FALSE,ltyIC=c(2,1),colIC=c(1,2))
#'
#' temp.ci <- confints.bootpls(aze_compl.bootYT3,1:33,typeBCa=FALSE)
#' plots.confints.bootpls(temp.ci,prednames=FALSE)
#' }
#'
#' @export plots.confints.bootpls
plots.confints.bootpls = function (ic_bootobject, indices = NULL, legendpos = "topleft",
prednames = TRUE, articlestyle = TRUE, xaxisticks=TRUE, ltyIC=c(2, 4, 5, 1), colIC=c("darkgreen", "blue", "red", "black"), typeIC, las=par("las"), mar, mgp, ...)
{
if(missing(typeIC)){
if(attr(ic_bootobject, "typeBCa")){
typeIC <- c("Normal", "Basic", "Percentile", "BCa")
}
else {
typeIC <- c("Normal", "Basic", "Percentile")
}
}
if((!attr(ic_bootobject, "typeBCa"))&("BCa" %in% typeIC)){stop("BCa intervals were not computed, hence cannot be plotted.")}
if(length(ltyIC)<length(typeIC)){ltyIC <- rep_len(ltyIC,length(typeIC))}
if(length(colIC)<length(typeIC)){colIC <- rep_len(colIC,length(typeIC))}
nr <- nrow(ic_bootobject)
if (is.null(indices)) {
indices <- 1:nr
}
plotpos <- (1:nr)[1:length(indices)]
if (articlestyle) {
oldparmar <- par("mar")
oldparmgp <- par("mgp")
if(missing(mar)){mar=c(2, 2, 1, 1) + 0.1}
if(missing(mgp)){mgp=c(2, 1, 0)}
par(mar = mar); par(mgp = mgp)
}
plot(c(1, 1), xlab = "", ylab = "", type = "n", xlim = c(1,
length(indices) + 0.5), ylim = c(min(ic_bootobject[indices,]),
max(ic_bootobject[indices,])), xaxt = "n", ...)
legendtxt <- NULL
indictypeIC <- rep(FALSE,4)
nbIC <- 0
if ("Normal" %in% typeIC){
indictypeIC[1] <- TRUE
arrows(plotpos + nbIC*0.15, ic_bootobject[indices, 1], plotpos, ic_bootobject[indices,
2], lend = "butt", lwd = 2, lty = ltyIC[1], col = colIC[1],
code = 3, angle = 90, length = 0.1)
legendtxt <- c(legendtxt,"Normal")
nbIC <- nbIC+1
}
if ("Basic" %in% typeIC){
indictypeIC[2] <- TRUE
arrows(plotpos + nbIC*0.15, ic_bootobject[indices, 3], plotpos +
nbIC*0.15, ic_bootobject[indices, 4], lend = "butt", lwd = 2,
lty = ltyIC[2], col = colIC[2], code = 3, angle = 90, length = 0.1)
legendtxt <- c(legendtxt,"Basic")
nbIC <- nbIC+1
}
if ("Percentile" %in% typeIC){
indictypeIC[3] <- TRUE
arrows(plotpos + nbIC*0.15, ic_bootobject[indices, 5], plotpos +
nbIC*0.15, ic_bootobject[indices, 6], lend = "butt", lwd = 2,
lty = ltyIC[3], col = colIC[3], code = 3, angle = 90, length = 0.1)
legendtxt <- c(legendtxt,"Percentile")
nbIC <- nbIC+1
}
if (("BCa" %in% typeIC)&(attr(ic_bootobject, "typeBCa"))){
indictypeIC[4] <- TRUE
arrows(plotpos + nbIC*0.15, ic_bootobject[indices, 7], plotpos +
nbIC*0.15, ic_bootobject[indices, 8], lend = "butt", lwd = 2,
lty = ltyIC[4], col = colIC[4], code = 3, angle = 90, length = 0.1)
legendtxt <- c(legendtxt,"BCa")
nbIC <- nbIC+1
}
if (prednames) {
if(xaxisticks){
axis(1, at = plotpos + (nbIC-1)*0.15/2, labels = rownames(ic_bootobject)[indices], las=las)
}
else
{
axis(1, at = plotpos + (nbIC-1)*0.15/2, labels = rownames(ic_bootobject)[indices],lwd.ticks=0, las=las)
}
}
else {
if(xaxisticks){
axis(1, at = plotpos + (nbIC-1)*0.15/2, labels = paste("x", (1:nr)[indices], sep = ""), las=las)
}
else
{
axis(1, at = plotpos + (nbIC-1)*0.15/2, labels = paste("x", (1:nr)[indices], sep = ""),lwd.ticks=0, las=las)
}
}
abline(h = 0, lty = 3, lwd = 2)
legend(legendpos, legend = legendtxt, lty = ltyIC[indictypeIC], col = colIC[indictypeIC], lwd = 2)
if (articlestyle) {
par(mar=oldparmar)
par(mgp=oldparmgp)
}
}
|
c5a94c186ad148e9f5f395a32e9b713444cfd8ec | 8dbd9ecd7929913d2095f1ec10b5ee98b2ef1032 | /man/prop_10.Rd | 4cf53fcf5ac117692181c31b91b5ee6cd16da688 | [] | no_license | cran/mazeinda | 587fe727a3609f03dba44d0376d99fa7ee61f189 | 285b026ecef5f7d31d6c75a10291c39c4af1c9e9 | refs/heads/master | 2022-05-12T16:21:15.096059 | 2022-05-09T06:10:07 | 2022-05-09T06:10:07 | 117,666,015 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 357 | rd | prop_10.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tau.R
\name{prop_10}
\alias{prop_10}
\title{p_10 estimator}
\usage{
prop_10(x, y)
}
\arguments{
\item{x, y}{vectors to be correlated. Must be numeric.}
}
\value{
p_10 estimator
}
\description{
computes estimate of parameter p_01 based on sample proportions.
}
\keyword{internal}
|
b2a08362460ca353c27db79c5b22e23723dcddb7 | 618d4648b27ead670fdcc155a0720afefcd29ff3 | /run_analysis.R | 4b8afea1fd296afd16ee40320f3ef10bc478bdfc | [] | no_license | THJacobsen/Cleaning-peer-assignemnet | f4b78abb490e49ff04a6ef22f652b18e0ccc67af | 33df19117a20a0dd6abbb2be557e1184ed625863 | refs/heads/main | 2023-07-02T21:00:55.882317 | 2021-08-03T07:33:18 | 2021-08-03T07:33:18 | 392,201,530 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,089 | r | run_analysis.R | # First we load in some of the packages often used in this course
library(tidyverse)
library(lubridate)
library(data.table)
# We need to unzip the files. Setting the working directory where files (unzipped) are placed and then unzipping is first step
wd<-setwd("~/coursera/R data science/cleaning data/peer_folder1/Cleaning-peer-assignemnet")
unzip(zipfile="~/coursera/R data science/cleaning data/peer_folder1/Cleaning-peer-assignemnet/getdata_projectfiles_UCI HAR Dataset.zip",exdir=wd)
# path to the zipped files need to be set and files need to be listed
pathdata = file.path("~/coursera/R data science/cleaning data/peer_folder1/Cleaning-peer-assignemnet", "UCI HAR Dataset")
files = list.files(pathdata, recursive=TRUE)
# step 1 loading in all the data- the headers are not given in raw data
xtrain = read.table(file.path(pathdata, "train", "X_train.txt"),header = FALSE)
ytrain = read.table(file.path(pathdata, "train", "y_train.txt"),header = FALSE)
subjecttrain = read.table(file.path(pathdata, "train", "subject_train.txt"),header = FALSE)
xtest = read.table(file.path(pathdata, "test", "X_test.txt"),header = FALSE)
ytest = read.table(file.path(pathdata, "test", "y_test.txt"),header = FALSE)
subjecttest = read.table(file.path(pathdata, "test", "subject_test.txt"),header = FALSE)
features = read.table(file.path(pathdata, "features.txt"),header = FALSE)
activitylabels = read.table(file.path(pathdata, "activity_labels.txt"),header = FALSE)
#The data needs to be tidy- mainly there should be names to the columns- column features are used for x
colnames(xtrain) = features[,2]
colnames(ytrain) = "activityId"
colnames(subjecttrain) = "subjectId"
colnames(xtest) = features[,2]
colnames(ytest) = "activityId"
colnames(subjecttest) = "subjectId"
colnames(activitylabels) <- c('activityId','activityType')
# next the datasets are merge by cbind followed by rbind
train_m = cbind(ytrain, subjecttrain, xtrain)
test_m = cbind(ytest, subjecttest, xtest)
all = rbind(train_m, test_m)
# step 2:next we need to only extract the relevant measures: mean and std
# reading all available variables
colNames = colnames(all)
colNames
#get mean and sd- as logical vector
mean_and_std = (grepl("activityId" , colNames) | grepl("subjectId" , colNames) | grepl("mean.." , colNames) | grepl("std.." , colNames))
mean_and_std
#apply vector to all dataset
meanstd_with <- all[ , mean_and_std == TRUE]
meanstd_with
#now in step 3 using descriptive names to name the activities in the dataset
a_names_with = merge(meanstd_with, activitylabels, by='activityId', all.x=TRUE)
#all,meanstd_with are the answers to question 4
# finally in step 5 with previous datasets an independent tidy data set with the average of each variable for each activity and each subject is created
tidy<- a_names_with %>% group_by(subjectId,activityId) %>% summarise_all(mean,na.rm=TRUE) %>% arrange(subjectId, activityId)
#Saving the new data
#and keeping format as to begin with storing to folder
write.table(tidy, "tidy.csv", row.name=FALSE)
write.table(tidy, "tidy.txt", row.name=FALSE)
|
3320902c42e92d5a953a4d3dda9053469177ea5c | b2be4e6387b31880b74a4c08adf6e207c1c9c33a | /mgmt_table.R | f171168d08026daea41e85aac1635ef40477cb0c | [] | no_license | michaelrahija/EAR_pilot | ed5317c4f921186cf55722fa19642cf34eba652a | f4159f7c4cb1486cc7c10babd60eab25337a61b5 | refs/heads/master | 2021-01-21T13:41:24.337796 | 2016-05-15T08:57:49 | 2016-05-15T08:57:49 | 43,822,846 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,869 | r | mgmt_table.R |
library(dplyr)
library(xlsx)
library(tidyr)
library(countrycode)
library(XLConnect)
sys <- Sys.info()
if(sys[7] == "josh"){
dir = "~/Documents/Github/EAR_pilot/"
} else if(sys[5] == "x86_64"){
dir = "~/Dropbox/FAO_ESS_STUFF/EAR_pilot" #Mac
data.dir = "~/Dropbox/FAO_ESS_STUFF/EAR_pilot_data/"
} else if (sys[5] == "Michael"){
dir = "C:/Users/Michael/Dropbox/FAO_ESS_STUFF/EAR_pilot"#HOME PC
data.dir = "C:/Users/Michael/Dropbox/FAO_ESS_STUFF/EAR_pilot_data/"
} else if (sys[6]=="Rahija") {
dir = "C:/Users/rahija/Dropbox/FAO_ESS_STUFF/EAR_pilot" #FAO PC
data.dir = "C:/Users/rahija/Dropbox/FAO_ESS_STUFF/EAR_pilot_data/"
} else {
stop("Implement location for current user!")
}
setwd(dir)
#Import data, and filter for operationally active, active pipeline, final consultation
active <- read.csv(file = paste0(data.dir,"operationally_active_06_05_16.csv"),
stringsAsFactors = FALSE)
active <- filter(active, Project.Status == "Operationally Active")
pipe <- read.csv(file = paste0(data.dir,"pipeline_06_05_16.csv"),
stringsAsFactors = FALSE)
pipe <- filter(pipe, Project.Status == "Active Pipeline" | Project.Status == "Final Consultation")
#Select only relevant columns
active <- select(active,
Actual.NTE,
Country.Name,
Geographical.Coverage,#for filtering later
Project.Staff...Funding.Officer,
Project.Staff...LTO.Officer,
Project.Staff...LTU.Officer,
Project.Status,
Project.Symbol,
Project.Title, #to apply addTeam function
Total.Budget..FPMIS.)
pipe <-select(pipe,
Actual.NTE,
Country.Name,
Geographical.Coverage,#for filtering later
Project.Staff...Funding.Officer,
Project.Staff...LTO.Officer,
Project.Staff...LTU.Officer,
Project.Status,
Project.Symbol,
Project.Title, #to apply addTeam function
Total.Budget..FPMIS.)
master <- rbind(active, pipe) #some column names not matching....
#create staff column, to use addTeam function
master$staff <- paste0(master$Project.Staff...Funding.Officer,
master$Project.Staff...LTO.Officer,
master$Project.Staff...LTU.Officer,
sep = ";")
master$staff <- gsub(";$", "",master$staff)
#clean up column names and project titles
cols <- colnames(master)
cols <- gsub("\\.","",cols)
colnames(master) <- cols
#clean up project names - still some to do!
master$ProjectTitle <- gsub("\\\x82","e",master$ProjectTitle)
master$ProjectTitle <- gsub("\\?","'",master$ProjectTitle)
master$ProjectTitle <- gsub("\xfc\xbe\x8c\xa6\x84\xbc","o", master$ProjectTitle)
master$ProjectTitle <- gsub("\xfc\xbe\x8c\xb6\x84\xbc","u",master$ProjectTitle)
#clean country
master$CountryName <- gsub("\\\x93","o",master$CountryName)
master$CountryName <- gsub("\\\x82","e",master$CountryName)
#-Add Team column
#Add team - DOUBLE CHECK TEAMS!
source("R/addTeam.R")
master <- addTeam(master)
#-Filter for country projects, clean name
x <- filter(master, GeographicalCoverage == "Country")
x$CountryName <- countrycode(x$CountryName,
origin = "country.name",
destination = "country.name")
x <- filter(x, !is.na(CountryName))
x <- select(x, CountryName, team, ActualNTE, ProjectSymbol, TotalBudgetFPMIS)
#-id duplicated rows, concatenate w/ letter, put back in df
x$dup <- duplicated(paste0(x$CountryName, x$team))
xx <- filter(x, dup == FALSE)
y <- filter(x, dup == TRUE)
y$CountryName <- paste0(y$CountryName,"_",sample(letters,nrow(y)))
x <- rbind(xx,y)
if(sum(duplicated(paste0(x$CountryName,x$team)))) stop("duplicated country names")
#-create df for each team
teams <- unique(x$team)
team.list <- list()
for(i in 1:length(teams)){
temp <- select(x,-dup)
team.list[[i]] <- filter(temp, team == teams[i])
colnames(team.list[[i]]) <- c("Country",
"team",
paste0(teams[i],"_","NTE"),
paste0(teams[i],"_","Symbol"),
paste0(teams[i],"_","Budget"))#,
#paste0(teams[i],"_","dup"))
team.list[[i]] <- select(team.list[[i]],
-team)
}
#- merge dfs from list into 1 data frame
master <- team.list[[1]]
for(i in 2:length(team.list)){
master <- merge(master, team.list[[i]],
by = "Country",
all = TRUE)
}
#-Clean up Project Symbols and create 1 column
symbol.cols <- grep("symbol",
colnames(master),
ignore.case = TRUE)
test<- master[, symbol.cols]
|
a428890b53cde3127d2418c2031ae91c9bc82308 | 2c1de14b2e972f654709b85fbe1aee4d36666758 | /hw1/problem1.r | dbcf72975f4fb72f4c30bd6836ddd0c9ec27f251 | [] | no_license | antoncoding/Statiscal-Learning | d6b737d6f400debaded197d84262e8bedc779877 | fc9fdf18bbde8585f8493f1db6e45e991a0bd0b5 | refs/heads/master | 2021-04-03T02:56:24.907731 | 2018-06-11T15:59:55 | 2018-06-11T15:59:55 | 124,864,925 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,089 | r | problem1.r | options(scipen=10)
df1_train = read.csv('df1_train.csv')
df1_test1 = read.csv('df1_test1.csv')
df1_test1y = read.csv('df1_test1y.csv')
gpredict<-function(dftrain, dftest){
dftrain_a <- dftrain[,1]
dftrain_b <- data.matrix(dftrain[,2:ncol(dftrain)])
mu_a <- mean(dftrain_a)
mu_b <- colMeans(dftrain_b)
sigma_ab <- cov(dftrain_a, dftrain_b) #1*43
sigma_bb <- cov(dftrain_b) #43*43
sigma_bb_i <- solve(sigma_bb) #50*43
if(!missing(dftest)){
if(ncol(dftest)==ncol(dftrain_b)){
# Correct
predictions <- as.vector(sigma_ab %*% (sigma_bb_i %*% t(sweep(dftest,2,mu_b))) + mu_a )
}
else{
# Column Error
return(NULL)
}
}
else{
# Missing Testing File
predictions = NULL
}
# Return Result in List
return_list =list(mua=mu_a, mub=mu_b, s_ab = sigma_ab, s_bb = sigma_bb, predict= predictions)
return(return_list)
}
out1 = gpredict(df1_train[1:200,],df1_test1)
print(out1$mua)
print(out1$mub[1:5])
print(out1$s_ab[1:5])
print(out1$s_bb[1:5,1:5])
mae1a = mean(abs(df1_test1y[,1] - out1$predict))
cat("MAE1a=", mae1a, "\n")
|
67dcf04a5205d62906cab17c82d6609085be13b7 | db9f4506ddd16a3ef9af346a892c85a586aef4dc | /project1DA.R | 3df50c83913752a990839caea82c263b3e7fbcc6 | [] | no_license | pbshaw/ExData_Plotting1 | 568e4a98b4e7914d36ee0a5aba7adbeb28da450a | 3d52e125cb3cbd2ff82d60dd7999d3e6a816bbd1 | refs/heads/master | 2021-01-16T19:15:17.902622 | 2015-03-07T23:32:34 | 2015-03-07T23:32:34 | 31,753,519 | 0 | 0 | null | 2015-03-06T05:06:02 | 2015-03-06T05:06:01 | null | UTF-8 | R | false | false | 2,646 | r | project1DA.R | ## Common functuins used to read data files for project 1
## plotting assignment.
getDataFrame <- function()
{
## Returns data frame in format required by the plotting functions
## Will return pre-built data frame of presnt, otherwise will
## call functions to read csv file and build and store data frame
print(paste("Getting Data Frame ", date()))
setwd("E:/Documents/education/RData/DataAnalysis/exdata-data-household_power_consumption")
if (file.exists("powerConsumption.Rda"))
{
print(paste("getting data frame from cache ", date()))
load("powerConsumption.Rda")
}
else
{
powerConsumption <- buildDataFrame()
save(powerConsumption,file="powerConsumption.Rda")
dateBuilt <- date()
}
print(paste("Getting Data Frame Complete", date()))
powerConsumption
}
buildDataFrame <- function()
{
## Builds data frame in format required by the plotting functions
print(paste("Building Data Frame ", date()))
options(gsubfn.engine = "R")
library(sqldf)
library(plyr)
library(datasets)
powerConsumption <- readDataSet()
if (nrow(powerConsumption) > 0)
{
powerConsumption[powerConsumption == "?" ] = NA
}
print(paste("Building Data Frame NA replacement complete ", date()))
powerConsumption <- mutate(powerConsumption,DateTime=as.POSIXct(paste(Date,Time, sep=" "),format="%d/%m/%Y %H:%M:%S"),
Global_active_power=as.numeric(Global_active_power),
Global_reactive_power=as.numeric(Global_reactive_power),Voltage=as.numeric(Voltage),Global_intensity=as.numeric(Global_intensity),
Sub_metering_1=as.numeric(Sub_metering_1),Sub_metering_2=as.numeric(Sub_metering_2),Sub_metering_3=as.numeric(Sub_metering_3))[,3:10]
print(paste("Building Data Frame data type conversion complete ", date()))
powerConsumption
}
readDataSet <- function()
{
## Reads csv file filtering for Feb 1 and 2 2007
print(paste("Reading DataSet ", date()))
options(gsubfn.engine = "R")
library(sqldf)
library(plyr)
library(datasets)
setwd("E:/Documents/education/RData/DataAnalysis/exdata-data-household_power_consumption")
#targetFile = "./household_power_consumption.sample.txt"
targetFile = "./household_power_consumption.txt"
powerConsumption <- read.csv2.sql(targetFile,
colClasses= c("character"[9]),
sql = "select * from file where Date in ('1/2/2007', '2/2/2007')"
)
closeAllConnections()
print(paste("Done Reading DataSet ", date()))
powerConsumption
}
|
254481146ab0fe8a041ccc4ebda0ae0a7fd69a39 | bee9610b99b6e045b078a1a2503d60dba2c56e49 | /tests/testthat/test_hello.R | f8be42fadd5df630429ac3d1f8dd9007f19522f7 | [] | no_license | HLeviel/hello | 79eff30b1e376ec81375689f421136e42028e2c8 | ec523b01cabbed75f8119d98f9c1dab0e7b02ae4 | refs/heads/master | 2021-01-22T19:55:04.991115 | 2017-03-17T21:13:10 | 2017-03-17T21:13:10 | 85,263,091 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 181 | r | test_hello.R | library(testthat)
# Works with one name
expect_that(hello("Hubert"), equals("Hello Hubert"))
# Works with two name
expect_that(hello(c("Hubert", "Toto")), equals("Hello Hubert"))
|
2efd3255d748bc5af350acdf59a5330abdfee570 | 43f6d2a89e611f49d1bff870e6381fa182184ce2 | /man/binary_range_to_letter_code_list.Rd | eb1ee88b6370fcea2a48827175384e4ba183a552 | [] | no_license | pedroreys/BioGeoBEARS | 23bab5299c44b4cfa2ab0e9dbe0de4ecf2196f69 | 9aef25ebf57b854e6f02d5a3a2ca420e31833123 | refs/heads/master | 2021-01-17T17:12:08.479890 | 2013-08-04T02:35:28 | 2013-08-04T02:35:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,413 | rd | binary_range_to_letter_code_list.Rd | \name{binary_range_to_letter_code_list}
\alias{binary_range_to_letter_code_list}
\title{Convert binary presence/absence codes (1/0) to a list of text area names}
\usage{
binary_range_to_letter_code_list(tipranges_row,
areanames)
}
\arguments{
\item{tipranges_row}{row of a \code{tipranges} object.}
\item{areanames}{a list of the names of the areas}
}
\value{
\code{list_of_areas_in_the_state} A list of the name(s)
of the areas corresponding to the presence/absence coding
in the row
}
\description{
Given a row of a \code{tipranges} object, converts to a
list of the corresponding name(s). E.g., if the areas
were \code{(A,B,C,D)}, and the tipranges row had \code{(1
0 1 0)}, the output statename would be ("A","C").
}
\note{
Go BEARS!
}
\examples{
testval=1
tipranges_row = c(1, 0, 1, 0)
areanames = c("A", "B", "C", "D")
list_of_areas_in_the_state = binary_range_to_letter_code_list(tipranges_row,
areanames)
list_of_areas_in_the_state
}
\author{
Nicholas J. Matzke \email{matzke@berkeley.edu}
}
\references{
\url{http://phylo.wikidot.com/matzke-2013-international-biogeography-society-poster}
Matzke_2012_IBS
}
\seealso{
\code{\link{binary_ranges_to_letter_codes}},
\code{\link{letter_string_to_binary}},
\code{\link{letter_strings_to_tipranges_df}},
\code{\link{binary_range_to_letter_code_txt}},
\code{\link{tipranges_to_tip_condlikes_of_data_on_each_state}}
}
|
8b50b78da0ac82ce237242f720c0cca324573399 | de7ab3b174b92a50645ab8c238775c59ace0e168 | /man/xxirt.Rd | 48d31c09d9cb6616a9c520c29c3a412c1647c9df | [] | no_license | SanVerhavert/sirt | 27f4ccc0fe55f56e0cd9b91598c22b72d48dc128 | c2ec5d6d38a53f4d081bb3011163657a9aff233a | refs/heads/master | 2021-06-29T03:23:51.834988 | 2017-09-20T14:32:48 | 2017-09-20T14:32:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,096 | rd | xxirt.Rd | %% File Name: xxirt.Rd
%% File Version: 0.41
%% File Last Change: 2017-03-01 19:30:55
\name{xxirt}
\alias{xxirt}
\alias{summary.xxirt}
\alias{print.xxirt}
\alias{logLik.xxirt}
\alias{anova.xxirt}
\alias{coef.xxirt}
\alias{vcov.xxirt}
\alias{confint.xxirt}
\alias{IRT.se.xxirt}
\alias{IRT.expectedCounts.xxirt}
\alias{IRT.irfprob.xxirt}
\alias{IRT.likelihood.xxirt}
\alias{IRT.posterior.xxirt}
\alias{IRT.modelfit.xxirt}
\alias{summary.IRT.modelfit.xxirt}
\alias{IRT.factor.scores.xxirt}
\alias{xxirt_hessian}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
User Defined Item Response Model
}
\description{
Estimates a user defined item response model. Both, item response functions
and latent trait distributions can be specified by the user (see Details).
}
\usage{
xxirt(dat, Theta = NULL, itemtype = NULL, customItems = NULL, partable = NULL,
customTheta = NULL, group = NULL, weights = NULL, globconv = 1e-06, conv = 1e-04,
maxit = 200, mstep_iter = 4, mstep_reltol = 1e-06, h = 1E-4 , use_grad = TRUE ,
verbose = TRUE)
\method{summary}{xxirt}(object, digits = 3, file = NULL, ...)
\method{print}{xxirt}(x, ...)
\method{anova}{xxirt}(object,...)
\method{coef}{xxirt}(object,...)
\method{logLik}{xxirt}(object,...)
\method{vcov}{xxirt}(object,...)
\method{confint}{xxirt}(object, parm, level = .95, ... )
\method{IRT.expectedCounts}{xxirt}(object,...)
\method{IRT.factor.scores}{xxirt}(object, type = "EAP", ...)
\method{IRT.irfprob}{xxirt}(object,...)
\method{IRT.likelihood}{xxirt}(object,...)
\method{IRT.posterior}{xxirt}(object,...)
\method{IRT.modelfit}{xxirt}(object,...)
\method{summary}{IRT.modelfit.xxirt}(object,...)
\method{IRT.se}{xxirt}(object,...)
# computes Hessian matrix
xxirt_hessian(object)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dat}{
Data frame with item responses
}
\item{Theta}{
Matrix with \eqn{\bold{\theta}} grid vector of latent trait
}
\item{itemtype}{
Vector of item types
}
\item{customItems}{
List containing types of item response functions created by
\code{\link{xxirt_createDiscItem}}.
}
\item{partable}{
Item parameter table which is initially created by
\code{\link{xxirt_createParTable}} and which can be modified by
\code{\link{xxirt_modifyParTable}}.
}
\item{customTheta}{
User defined \eqn{\bold{\theta}} distribution created by
\code{\link{xxirt_createThetaDistribution}}.
}
\item{group}{
Optional vector of group indicators
}
\item{weights}{
Optional vector of person weights
}
\item{globconv}{
Convergence criterion for relative change in deviance
}
\item{conv}{
Convergence criterion for absolute change in parameters
}
\item{maxit}{
Maximum number of iterations
}
\item{mstep_iter}{
Maximum number of iterations in M-step
}
\item{mstep_reltol}{
Convergence criterion in M-step
}
\item{h}{Numerical differentiation parameter}
\item{use_grad}{Logical indicating whether the gradient should be supplied
to \code{\link[stats:optim]{stats::optim}}}
\item{verbose}{
Logical indicating whether iteration progress should be displayed
}
\item{object}{Object of class \code{xxirt}}
\item{digits}{Number of digits to be rounded}
\item{file}{Optional file name to which \code{summary} output is written}
\item{parm}{Optional vector of parameters}
\item{level}{Confidence level}
\item{x}{Object of class \code{xxirt}}
\item{type}{Type of person parameter estimate. Currently, only
\code{EAP} is implemented.}
\item{\dots}{
Further arguments to be passed
}
}
\details{
Item response functions can be specified as functions of unknown parameters
\eqn{\bold{\delta}_i} such that
\eqn{P(X_{i}=x | \bold{\theta}) = f_i( x | \bold{\theta} ; \bold{\delta}_i )}
The item response model is estimated under the assumption of
local stochastic independence of items. Equality constraints of
item parameters \eqn{\bold{\delta}_i} among items are allowed.
Probability distribution \eqn{P(\bold{\theta})} are specified as functions
of an unknown parameter vector \eqn{\bold{\gamma}}.
}
\value{
List with following entries
\item{partable}{Item parameter table}
\item{par_items}{Vector with estimated item parameters}
\item{par_items_summary}{Data frame with item parameters}
\item{par_items_bounds}{Data frame with summary on bounds of estimated
item parameters}
\item{par_Theta}{Vector with estimated parameters of theta distribution}
\item{Theta}{Matrix with \eqn{\bold{\theta}} grid}
\item{probs_items}{Item response functions}
\item{probs_Theta}{Theta distribution}
\item{deviance}{Deviance}
\item{loglik}{Log likelihood value}
\item{ic}{Information criteria}
\item{item_list}{List with item functions}
\item{customItems}{Used customized item response functions}
\item{customTheta}{Used customized theta distribution}
\item{p.xi.aj}{Individual likelihood}
\item{p.aj.xi}{Individual posterior}
\item{n.ik}{Array of expected counts}
\item{EAP}{EAP person parameter estimates}
\item{dat}{Used dataset with item responses}
\item{dat_resp}{Dataset with response indicators}
\item{weights}{Vector of person weights}
\item{G}{Number of groups}
\item{group}{Integer vector of group indicators}
\item{group_orig}{Vector of original group_identifiers}
\item{ncat}{Number of categories per item}
\item{converged}{Logical whether model has converged}
\item{iter}{Number of iterations needed}
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
Alexander Robitzsch
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
See the \code{\link[mirt:createItem]{mirt::createItem}} and
\code{\link[mirt:mirt]{mirt::mirt}} functions in the \pkg{mirt}
package for similar functionality.
}
\examples{
\dontrun{
#############################################################################
## EXAMPLE 1: Unidimensional item response functions
#############################################################################
data(data.read)
dat <- data.read
#------ Definition of item response functions
#*** IRF 2PL
P_2PL <- function( par, Theta , ncat){
a <- par[1]
b <- par[2]
TP <- nrow(Theta)
P <- matrix( NA , nrow=TP , ncol=ncat)
P[,1] <- 1
for (cc in 2:ncat){
P[,cc] <- exp( (cc-1) * a * Theta[,1] - b )
}
P <- P / rowSums(P)
return(P)
}
#*** IRF 1PL
P_1PL <- function( par, Theta , ncat){
b <- par[1]
TP <- nrow(Theta)
P <- matrix( NA , nrow=TP , ncol=ncat)
P[,1] <- 1
for (cc in 2:ncat){
P[,cc] <- exp( (cc-1) * Theta[,1] - b )
}
P <- P / rowSums(P)
return(P)
}
#** created item classes of 1PL and 2PL models
par <- c( "a"= 1 , "b" = 0 )
# define some slightly informative prior of 2PL
item_2PL <- sirt::xxirt_createDiscItem( name = "2PL" , par = par , est = c(TRUE,TRUE),
P = P_2PL, prior = c(a = "dlnorm"), prior_par1 = c( a = 0 ),
prior_par2 = c(a=5) )
item_1PL <- sirt::xxirt_createDiscItem( name = "1PL" , par = par[2] , est = c(TRUE),
P = P_1PL )
customItems <- list( item_1PL , item_2PL )
#---- definition theta distribution
#** theta grid
Theta <- matrix( seq(-6,6,length=21) , ncol=1 )
#** theta distribution
P_Theta1 <- function( par , Theta , G){
mu <- par[1]
sigma <- max( par[2] , .01 )
TP <- nrow(Theta)
pi_Theta <- matrix( 0 , nrow=TP , ncol=G)
pi1 <- dnorm( Theta[,1] , mean = mu , sd = sigma )
pi1 <- pi1 / sum(pi1)
pi_Theta[,1] <- pi1
return(pi_Theta)
}
#** create distribution class
par_Theta <- c( "mu"=0, "sigma" = 1 )
customTheta <- sirt::xxirt_createThetaDistribution( par=par_Theta , est=c(FALSE,TRUE),
P=P_Theta1 )
#****************************************************************************
#******* Model 1: Rasch model
#-- create parameter table
itemtype <- rep( "1PL" , 12 )
partable <- sirt::xxirt_createParTable( dat , itemtype = itemtype,
customItems = customItems )
# estimate model
mod1 <- sirt::xxirt( dat = dat , Theta=Theta , partable = partable,
customItems = customItems, customTheta = customTheta)
summary(mod1)
# estimate Rasch model by providing starting values
partable1 <- sirt::xxirt_modifyParTable( partable , parname = "b",
value = - stats::qlogis( colMeans(dat) ) )
# estimate model again
mod1b <- sirt::xxirt( dat = dat , Theta=Theta , partable = partable1,
customItems = customItems, customTheta = customTheta )
summary(mod1b)
# extract coefficients, covariance matrix and standard errors
coef(mod1b)
vcov(mod1b)
IRT.se(mod1b)
#****************************************************************************
#******* Model 2: 2PL Model with three groups of item discriminations
#-- create parameter table
itemtype <- rep( "2PL" , 12 )
partable <- sirt::xxirt_createParTable( dat, itemtype=itemtype, customItems=customItems)
# modify parameter table: set constraints for item groups A, B and C
partable1 <- sirt::xxirt_modifyParTable(partable, item=paste0("A",1:4),
parname="a", parindex=111)
partable1 <- sirt::xxirt_modifyParTable(partable1, item=paste0("B",1:4),
parname="a", parindex=112)
partable1 <- sirt::xxirt_modifyParTable(partable1, item=paste0("C",1:4),
parname="a", parindex=113)
# delete prior distributions
partable1 <- sirt::xxirt_modifyParTable(partable1, parname="a", prior=NA)
#-- fix sigma to 1
customTheta1 <- customTheta
customTheta1$est <- c("mu"=FALSE,"sigma"=FALSE )
# estimate model
mod2 <- sirt::xxirt( dat = dat, Theta=Theta, partable = partable1,
customItems=customItems, customTheta = customTheta1 )
summary(mod2)
#****************************************************************************
#******* Model 3: Cloglog link function
#*** IRF cloglog
P_1N <- function( par, Theta , ncat){
b <- par
TP <- nrow(Theta)
P <- matrix( NA , nrow=TP , ncol=ncat)
P[,2] <- 1 - exp( - exp( Theta - b ) )
P[,1] <- 1 - P[,2]
return(P)
}
par <- c("b"=0)
item_1N <- sirt::xxirt_createDiscItem( name = "1N" , par = par , est = c(TRUE),
P = P_1N )
customItems <- list( item_1N )
itemtype <- rep( "1N" , I )
partable <- sirt::xxirt_createParTable( dat[,items] , itemtype = itemtype,
customItems = customItems )
partable <- sirt::xxirt_modifyParTable( partable=partable , parname = "b" ,
value = - stats::qnorm( colMeans(dat[,items] )) )
#*** estimate model
mod3 <- sirt::xxirt( dat=dat, Theta=Theta, partable=partable, customItems=customItems,
customTheta= customTheta )
summary(mod3)
IRT.compareModels(mod1,mod3)
#****************************************************************************
#******* Model 4: Latent class model
K <- 3 # number of classes
Theta <- diag(K)
#*** Theta distribution
P_Theta1 <- function( par , Theta , G ){
logitprobs <- par[1:(K-1)]
l1 <- exp( c( logitprobs , 0 ) )
probs <- matrix( l1/sum(l1) , ncol=1)
return(probs)
}
par_Theta <- stats::qlogis( rep( 1/K , K-1 ) )
names(par_Theta) <- paste0("pi",1:(K-1) )
customTheta <- sirt::xxirt_createThetaDistribution( par=par_Theta,
est= rep(TRUE,K-1) , P=P_Theta1)
#*** IRF latent class
P_lc <- function( par, Theta , ncat){
b <- par
TP <- nrow(Theta)
P <- matrix( NA , nrow=TP , ncol=ncat)
P[,1] <- 1
for (cc in 2:ncat){
P[,cc] <- exp( Theta \%*\% b )
}
P <- P / rowSums(P)
return(P)
}
par <- seq( -1.5 , 1.5 , length=K )
names(par) <- paste0("b",1:K)
item_lc <- sirt::xxirt_createDiscItem( name = "LC", par=par,
est=rep(TRUE,K) , P=P_lc )
customItems <- list( item_lc )
# create parameter table
itemtype <- rep( "LC" , 12 )
partable <- sirt::xxirt_createParTable( dat, itemtype=itemtype, customItems=customItems)
partable
#*** estimate model
mod4 <- sirt::xxirt( dat = dat, Theta=Theta, partable = partable, customItems = customItems,
customTheta= customTheta)
summary(mod4)
# class probabilities
mod4$probs_Theta
# item response functions
imod4 <- IRT.irfprob( mod5 )
round( imod4[,2,] , 3 )
#****************************************************************************
#******* Model 5: Ordered latent class model
K <- 3 # number of classes
Theta <- diag(K)
Theta <- apply( Theta , 1 , cumsum )
#*** Theta distribution
P_Theta1 <- function( par , Theta , G ){
logitprobs <- par[1:(K-1)]
l1 <- exp( c( logitprobs , 0 ) )
probs <- matrix( l1/sum(l1) , ncol=1)
return(probs)
}
par_Theta <- stats::qlogis( rep( 1/K , K-1 ) )
names(par_Theta) <- paste0("pi",1:(K-1) )
customTheta <- sirt::xxirt_createThetaDistribution( par=par_Theta ,
est= rep(TRUE,K-1) , P=P_Theta1 )
#*** IRF ordered latent class
P_olc <- function( par, Theta , ncat){
b <- par
TP <- nrow(Theta)
P <- matrix( NA , nrow=TP , ncol=ncat)
P[,1] <- 1
for (cc in 2:ncat){
P[,cc] <- exp( Theta \%*\% b )
}
P <- P / rowSums(P)
return(P)
}
par <- c( -1 , rep( .5 , , length=K-1 ) )
names(par) <- paste0("b",1:K)
item_olc <- sirt::xxirt_createDiscItem( name = "OLC" , par = par , est = rep(TRUE,K) ,
P = P_olc , lower=c( -Inf , 0 , 0 ) )
customItems <- list( item_olc )
itemtype <- rep( "OLC" , 12 )
partable <- sirt::xxirt_createParTable( dat, itemtype = itemtype, customItems = customItems)
partable
#*** estimate model
mod5 <- sirt::xxirt( dat=dat, Theta=Theta, partable = partable, customItems = customItems,
customTheta= customTheta )
summary(mod5)
# estimated item response functions
imod5 <- IRT.irfprob( mod5 )
round( imod5[,2,] , 3 )
#############################################################################
## EXAMPLE 2: Multiple group models with xxirt
#############################################################################
data(data.math)
dat <- data.math$data
items <- grep( "M[A-Z]" , colnames(dat) , value=TRUE )
I <- length(items)
Theta <- matrix( seq(-8,8,len=31) , ncol=1 )
#****************************************************************************
#******* Model 1: Rasch model, single group
#*** Theta distribution
P_Theta1 <- function( par , Theta , G ){
mu <- par[1]
sigma <- max( par[2] , .01 )
p1 <- stats::dnorm( Theta[,1] , mean = mu , sd = sigma)
p1 <- p1 / sum(p1)
probs <- matrix( p1 , ncol=1)
return(probs)
}
par_Theta <- c(0,1)
names(par_Theta) <- c("mu","sigma")
customTheta <- sirt::xxirt_createThetaDistribution( par=par_Theta ,
est= c(FALSE,TRUE) , P=P_Theta1 )
customTheta
#*** IRF 1PL logit
P_1PL <- function( par, Theta , ncat){
b <- par
TP <- nrow(Theta)
P <- matrix( NA , nrow=TP , ncol=ncat)
P[,2] <- plogis( Theta - b )
P[,1] <- 1 - P[,2]
return(P)
}
par <- c("b"=0)
item_1PL <- sirt::xxirt_createDiscItem( name = "1PL", par = par, est = c(TRUE), P = P_1PL)
customItems <- list( item_1PL )
itemtype <- rep( "1PL" , I )
partable <- sirt::xxirt_createParTable( dat[,items] , itemtype = itemtype ,
customItems = customItems )
partable <- sirt::xxirt_modifyParTable( partable=partable , parname = "b" ,
value = - stats::qlogis( colMeans(dat[,items] )) )
#*** estimate model
mod1 <- sirt::xxirt( dat = dat[,items] , Theta=Theta , partable = partable,
customItems = customItems, customTheta= customTheta )
summary(mod1)
#****************************************************************************
#******* Model 2: Rasch model, multiple groups
#*** Theta distribution
P_Theta2 <- function( par , Theta , G ){
mu1 <- par[1]
mu2 <- par[2]
sigma1 <- max( par[3] , .01 )
sigma2 <- max( par[4] , .01 )
TP <- nrow(Theta)
probs <- matrix( NA , nrow=TP , ncol=G)
p1 <- stats::dnorm( Theta[,1] , mean = mu1 , sd = sigma1)
probs[,1] <- p1 / sum(p1)
p1 <- stats::dnorm( Theta[,1] , mean = mu2 , sd = sigma2)
probs[,2] <- p1 / sum(p1)
return(probs)
}
par_Theta <- c(0,0,1,1)
names(par_Theta) <- c("mu1","mu2","sigma1","sigma2")
customTheta2 <- sirt::xxirt_createThetaDistribution( par=par_Theta ,
est= c(FALSE,TRUE,TRUE,TRUE) , P=P_Theta2 )
customTheta2
#*** estimate model
mod2 <- sirt::xxirt( dat=dat[,items], group=dat$female, Theta=Theta, partable=partable,
customItems = customItems , customTheta= customTheta2 , maxit=40)
summary(mod2)
IRT.compareModels(mod1, mod2)
#*** compare results with TAM package
library(TAM)
mod2b <- TAM::tam.mml( resp=dat[,items] , group = dat$female )
summary(mod2b)
IRT.compareModels(mod1, mod2, mod2b)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ ~kwd1 }
%\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
8d4235422b4b63d9a503acc8f482fb8959e0c99a | 78b6410be67a167fde91abb6a039847a45ce46cc | /man/perc01.Rd | 30d883085ec08bcb6469ec1c889f3c796712fbe8 | [] | no_license | reyesem/IntroAnalysis | fea3283abc4bd995339acfc7e74f2193812317e2 | 54cf3930879303fb128faf81bd1710b385300d6c | refs/heads/master | 2023-07-12T08:45:27.546965 | 2023-06-29T22:07:02 | 2023-06-29T22:07:02 | 123,822,392 | 0 | 0 | null | 2022-08-15T14:34:13 | 2018-03-04T19:42:24 | HTML | UTF-8 | R | false | true | 1,846 | rd | perc01.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variable_summaries.R
\name{perc01}
\alias{perc01}
\alias{perc05}
\alias{perc10}
\alias{perc20}
\alias{perc80}
\alias{perc85}
\alias{perc90}
\alias{perc95}
\alias{perc99}
\alias{Q1}
\alias{Q3}
\title{Compute specific percentiles of a numeric variable.}
\usage{
perc01(x, na.rm = FALSE, type = 7)
perc05(x, na.rm = FALSE, type = 7)
perc10(x, na.rm = FALSE, type = 7)
perc20(x, na.rm = FALSE, type = 7)
perc80(x, na.rm = FALSE, type = 7)
perc85(x, na.rm = FALSE, type = 7)
perc90(x, na.rm = FALSE, type = 7)
perc95(x, na.rm = FALSE, type = 7)
perc99(x, na.rm = FALSE, type = 7)
Q1(x, na.rm = FALSE, type = 7)
Q3(x, na.rm = FALSE, type = 7)
}
\arguments{
\item{x}{numeric vector whose sample quantiles are wanted. \code{NA} and
\code{NaN} values are not allowed in numeric vectors unless \code{na.rm} is
\code{TRUE}.}
\item{na.rm}{logical; if \code{TRUE}, any \code{NA} or \code{NaN}'s are
removed from \code{x} before the quantiles are computed.}
\item{type}{an integer between 1 and 9 selecting one of the nine quantile
algorithms detailed in \code{\link[stats]{quantile}}.}
}
\description{
These are wrappers for the more common \code{\link[stats]{quantile}} function
which computes the most commonly used percentiles. Functions beginning with
\code{perc} compute the corresponding percentile; functions beginning with
\code{Q} compute the corresponding quartile, and \code{IQR} computes the
interquartile range.
}
\section{Functions}{
\itemize{
\item \code{perc05()}:
\item \code{perc10()}:
\item \code{perc20()}:
\item \code{perc80()}:
\item \code{perc85()}:
\item \code{perc90()}:
\item \code{perc95()}:
\item \code{perc99()}:
\item \code{Q1()}:
\item \code{Q3()}:
}}
\examples{
summarize_variable(mpg ~ 1, data = mtcars, perc01)
}
|
3552b0676abb33087a0c1e9410ea4b81129e45ff | e3080268f304f7e576ef3850ba72c63ca01e1282 | /man/computeLinks.Rd | effddf717c6b33ebccb07ffb87bdc73da8e0ba9c | [] | no_license | Displayr/flipPlots | 4e176be43a2765b31986c26be930cf4eb2962dad | 4dd20ed8e87a0dfa9bf69d8a3f65de2f6a840018 | refs/heads/master | 2023-08-19T03:32:58.337048 | 2023-08-17T00:05:34 | 2023-08-17T00:05:34 | 45,726,921 | 5 | 4 | null | 2023-08-17T00:05:35 | 2015-11-07T07:26:00 | R | UTF-8 | R | false | true | 604 | rd | computeLinks.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sankeydiagram.R
\name{computeLinks}
\alias{computeLinks}
\title{computeLinks}
\usage{
computeLinks(data, weights, show.percentages = FALSE)
}
\arguments{
\item{data}{A \code{\link{data.frame}} or \code{\link{list}} of variables.}
\item{weights}{A numeric vector with length equal to the number of rows in
\code{data}. This is used to adjust the width of the links.}
\item{show.percentages}{Whether to show percentages or counts}
}
\description{
Computes the links between the nodes, so that can be expressed as a network.
}
|
e38cdcf5abfc54af369d523533b615f90cae9dd0 | b1ff01ac33fb04562cf9350b0839bb01cbf7d7a4 | /man/mpspline_fit1.Rd | acf4874535b5f9bb30f5cc6331ac37d62b5a7af0 | [] | no_license | obrl-soil/mpspline2 | 330805adf86b0277bf7b1ab9629b09aa172105dd | 13268e2012446a6c8c0ac3eb5dd49fbe5c055de7 | refs/heads/master | 2023-01-21T14:28:29.904516 | 2023-01-18T07:00:15 | 2023-01-18T07:00:15 | 167,176,801 | 3 | 2 | null | 2022-03-16T23:43:44 | 2019-01-23T12:06:23 | R | UTF-8 | R | false | true | 1,296 | rd | mpspline_fit1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mpspline.R
\name{mpspline_fit1}
\alias{mpspline_fit1}
\title{Fit spline parameters}
\usage{
mpspline_fit1(
s = NULL,
p = NULL,
var_name = NULL,
d = NULL,
vhigh = NULL,
vlow = NULL
)
}
\arguments{
\item{s}{data.frame; data for one site}
\item{p}{list; estimated spline parameters for one site from
\code{\link[mpspline2:mpspline_est1]{mpspline_est1}}}
\item{var_name}{length-1 character or length-1 integer denoting the column in
\code{obj} in which target data is stored. If not supplied, the fourth
column of the input object is assumed to contain the target data.}
\item{d}{sequential integer vector; denotes the output depth ranges in cm.
Defaults to \code{c(0, 5, 15, 30, 60, 100, 200)} after the GlobalSoilMap
specification, giving output predictions over intervals 0-5cm, 5-15cm,
etc.}
\item{vhigh}{numeric; constrains the maximum predicted value to a realistic
number. Defaults to 1000.}
\item{vlow}{numeric; constrains the minimum predicted value to a realistic
number. Defaults to 0.}
}
\value{
list of two vectors: fitted values at 1cm intervals and the average
of same over the requested depth ranges.
}
\description{
Fit spline parameters to data for a single site.
}
\keyword{internal}
|
982b95941646ba999db27a24bfd50fd70d9ec0bb | 90fd571404fa7ccbdbfb7412f198420385ff8c78 | /ML Denis/Course Materials/Section 25 - Iris Dataset/Iris (task).R | e139e7c054716f7d715cda96d109ac883b2ea3b5 | [] | no_license | guptasoumya26/Jupyter-Archives | 9416cebff92c9986610fe04e20bf308f9fda5eff | 8ed9d667e6578e6f2667334b39342e29efe037e4 | refs/heads/master | 2020-05-19T11:55:57.075604 | 2019-07-04T04:30:21 | 2019-07-04T04:30:21 | 185,002,478 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,043 | r | Iris (task).R | # We set the working directory to the folder of this file.
if (!is.null(parent.frame(2)$ofile)) {
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
}
library(data.table)
library(ggplot2)
library(caret)
# Task: What kind of plant is it?
# Say the column `Species` beforehand. Divide the data first
# in training and test data. Then compare the following models:
#
# - Logistic regression (multinom via the nnet module)
# - KNN (with different values for k, e.g. k = 1, k = 3, k = 5)
#
# Note that you have to scale the data for the KNN model.
#
# Which model has the best accuracy on the test data?
#
# Hint:
# - The column Id is not needed for the model, this column
# should not be included in the calculation of the model with
# Flow in... #
- You want to predict the column `Species` here. This column contains
# three different values - so you want them before calculating them
# Convert to factor: iris$Species <- as.factor(iris$Species)
iris <- fread("iris.csv")
iris$Species <- as.factor(iris$Species)
print(iris)
|
776f4f3ffa2d8f537a04160414e5b58fc00e0e70 | 134653874008b705781b768aabfcbb1d2338d844 | /LESSON.R | c9ae7030fdc081d3a1261a8471f1264154d579ce | [
"BSD-3-Clause"
] | permissive | hlshao/politicalMoney3 | f4eefa94849080ab5e97dc88a68232c6818a2636 | a607a6254f9b27234f4a6c72f9a0ad82e80dd7bb | refs/heads/master | 2020-07-26T23:15:14.657092 | 2019-09-16T13:28:11 | 2019-09-16T13:28:11 | 208,794,447 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,583 | r | LESSON.R | #如何用資料科學研究法律與政治:誰給了誰政治獻金?給了多少?
##中央大學工作坊20190930
##邵軒磊(副教授 台師大)
###我們來練習使用「政治獻金資料集」(註一),練習使用「資料科學」方式來找出一些數字上的規律。從而使學員能夠
###1. 初步瞭解資料科學研究政治的方法。
###2.有簡易的數據與程式能力。
###3.初嚐資料科學給研究帶來的便利。
###在以下的篇幅中,學員將使用「做中學」方式回答下列問題:
###1.誰給了政治獻金,給了多少?
###2.誰收了政治獻金,得了多少?
###3.哪種特性的人會有更多政治獻金?「多」出多少?
##1. 資料讀入
library(dplyr)
A02=read.csv("A_all.csv")
head(A02)
###我們讀入前述「政治獻金資料集」,是2016年公布資料,你可以看到很多熟悉的政治人物,主要是立委候選人。
###注意這些資料的「單位」是什麼?每一筆是什麼?每一欄是什麼?
##2. 資料分析與觀察:table
### 2-1 先看看候選人與政黨
### 這可能是大家第一感覺會想到的。
dim(A02)
table(A02$候選人)
###先看看有哪些候選人。注意這個單位是每一筆,所以這個表示每個人收到的「筆數」。
table(A02$推薦政黨.x)
###這些捐款都捐給哪些政黨?
###2-2 這些捐款是什麼時候捐的?
####---diy---
###能看出來捐款都集中在什麼時候。
###延伸思考:為什麼集中在這些時候?
###延伸思考:其實還是沒有完全處理好,你能試試做的更好嗎?
### 2-3 然後是誰捐的?
####---diy---
###同樣的,這也是「筆數」,捐了幾筆。
### 2-4 每次大約捐了多少錢呢?
####---diy---
###這表示什麼?
###想一想,哪些是你可能的研究題目
## 3. 資料分析實做
### 3-1 誰給了政治獻金,給了多少?
### 注意:之前我們的資料是「筆數」,需要調整成「每個人捐了每筆的總和」,才能回答。
aa=A02 %>%
group_by(捐贈者.支出對象) %>%
summarise(Total_pMoney = sum(pMoney))
aa=aa[order(aa$Total_pMoney,decreasing = T),]
head(aa,10)
### 3-2. 誰收了政治獻金,得了多少?
####這個可以自己試試看?先不看答案能不能跑出來結果?
####---diy---
### 3-3. 哪些政黨收了政治獻金,得了多少?
####這個也可以自己試試看?先不看答案能不能跑出來結果?
####---diy---
## 4. 資料分析實做:哪種特性的人會有更多政治獻金?怎麼樣「多」?
###思路:對人做整合之後,再依照特性做T檢定。
###先做一個新的表。請先觀察一下。
aa=A02 %>%
group_by(候選人,性別,是否現任,出生年次,推薦政黨.x) %>%
summarise(Total_pMoney = sum(pMoney), com=n())
aa=aa[order(aa$Total_pMoney,decreasing = T),]
head(aa,10)
### 4-1 性別不同是不是募款能力不同?
####包括「盒鬚圖」與「t檢定」
boxplot(formula = Total_pMoney ~ 性別,
data = aa)
t.test(Total_pMoney ~ 性別,
data=aa)
### 4-2 「現任立委」是不是募款能力不同?
####這個可以自己試試看?先不看答案能不能跑出來結果?
####---diy---
####這個差距有點明顯。
### 4-3 「黨籍」是不是募款能力不同?
####這個可以自己試試看?先不看答案能不能跑出來結果?
####---diy---
## 5. 資料分析實做:學員自由發揮
####---diy---
## 6. 資料分析實做:與現有研究比較與理論
####---diy---
#(註一)鏡傳媒,政治獻金專題資料,https://github.com/mirror-media/politicalcontribution。
### 建議閱讀文獻:
####邵軒磊、吳國清,法律資料分析與文字探勘:跨境毒品流動要素與結構研究,問題與研究,58卷2期(2019/03),91-114。(研究實例討論)
####黃詩淳、邵軒磊,酌定子女親權之重要因素:以決策樹方法分析相關裁判,臺大法學論叢,47卷1期( 2018/03) ,299-344(研究實例討論)
####邵軒磊,當代西方民主研究論述分析:知識系譜與文字探勘,哲學與文化 46卷2期 ( 2019/02) , 33-56。(研究實例討論)
####王鼎銘,台灣政治獻金法及參選人政治獻金資料之實證研究,選舉研究,14卷2期(2007/11) , 121-144。
####李宗榮,企業權力與民主:台灣企業集團2008年立委選舉的政治獻金分析,台灣社會學,31期 ( 2016/06) , 99-139。 |
01854562dad8a490cdd26ade626c96ee5cbe3928 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/dobson/examples/birthweight.Rd.R | 9e5c4e326ec5ed4f432c1fe084b9fe4968158854 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 191 | r | birthweight.Rd.R | library(dobson)
### Name: birthweight
### Title: Birthweight data from table 2.3
### Aliases: birthweight
### Keywords: datasets
### ** Examples
data(birthweight)
summary(birthweight)
|
4fba2a3179ee500d6c44ef54714acad01650e0a8 | 2ff886cc39d80d7794959d9445f40816c5b9d853 | /man/Mortality.Rd | 4f918095318800d908a8299b65ef88f9efc97b6b | [] | no_license | cran/RkMetrics | 46c74216cb28f8931f35c85f0695da3687e93e8e | 569dd68009612b9e466fabb5ac8323dca023326a | refs/heads/master | 2021-01-01T19:37:38.094653 | 2017-07-29T11:43:55 | 2017-07-29T11:43:55 | 98,632,773 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,038 | rd | Mortality.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Data_Doc.R
\docType{data}
\name{Mortality}
\alias{Mortality}
\title{Switzerland Mortality Data}
\format{A data frame with 6 columns corresponding to:
\describe{
\item{Year}{Corresponding year of data collected}
\item{Age}{Age of the individual}
\item{E.Male}{Male Exposed-to-Risk Population}
\item{E.Female}{Female Exposed-to-Risk Population}
\item{D.Male}{Number of male death counts, for the given year and age}
\item{D.Female}{Number of female death counts, for the given year and age}
}}
\source{
\url{http://www.mortality.org/cgi-bin/hmd/country.php?cntr=CHE&level=1}
}
\usage{
Mortality
}
\description{
Exposed to Risk and number of deaths data.
}
\details{
Mortality data for both Males and Females in Switzerland, from 1981
to 2014.
These data are freely available at the Human Mortality Database
}
\references{
Glei, D. and Andreeva, M. (2016). About mortality data for switzerland.
}
\keyword{datasets}
|
5ec36272c242d63756df74107e17aae2bf1d4fbf | 929c1c7a62e838e09ff576a41e96b6799d355508 | /R/getAUC.R | 93491895e0efd3e072b60233ed3c3bb2f83ad867 | [] | no_license | pjiang1105/TimeMeter | be6ac05bcda96f1089387bdde7c6f5ad66b95707 | 970970b5b3b495f789c0e6a532d9436e856300ca | refs/heads/master | 2022-02-11T07:42:49.654558 | 2022-01-27T02:23:15 | 2022-01-27T02:23:15 | 251,404,867 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 772 | r | getAUC.R | #' Area under the curve for a linear regression
#'
#' This function calculate the area under the curve for a linear regression
#'
#' @param x_start start coordinate for a linear regression
#' @param x_end end coordinate for a linear regression
#' @param slope slope of a linear regression
#' @param intercept intercept of a linear regression
#'
#' @return area under the curve for a linear regression
#'
#' @examples
#' x_start=0
#' x_end=10
#' slope=1
#' intercept=1
#' getAUC(x_start, x_end, slope, intercept)
#'
#' @export
#' @author Peng Jiang \email{PJiang@morgridge.org}
getAUC <- function(x_start,x_end,slope,intercept) {
integrand <- function(x) {
return(slope*x+intercept)
}
AUC=integrate(integrand, lower = x_start, upper=x_end)
return(AUC$value)
}
|
420125596cdbdb633ffb50e34c8018cda86268cc | d1c5d49a70f110df9d138296d3b45c2eadf01de6 | /arvore-randomica-credit-data.R | 7893d45f0882014611a27c4a06581c14a8d34ff9 | [] | no_license | wwenceslau/machine-learning-and-data-science-with-R | b8c1b8d5aa02ccceee9b210bcd854309ec9d8742 | 09a66dc35996f71e0f88363b95be23584d25feec | refs/heads/master | 2020-06-01T12:40:05.477850 | 2019-07-03T07:29:24 | 2019-07-03T07:29:24 | 190,781,639 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 978 | r | arvore-randomica-credit-data.R | base = read.csv("credit-data.csv")
base$clientid = NULL
#dados inconsistentes
base$age = ifelse(base$age < 0, mean(base$age[base$age > 0], na.rm = TRUE), base$age)
#dados faltantes
base$age = ifelse(is.na(base$age), mean(base$age[!is.na(base$age)], na.rm = TRUE), base$age)
#escalonamento
base[, 1:3] = scale(base[, 1:3])
#enconding da classe
base$default = factor(base$default, levels = c(0,1))
#Separação entre treino e teste
library("caTools")
set.seed(1)
divisao = sample.split(base$default, SplitRatio = 0.75)
base_treinamento = subset(base, divisao == TRUE)
base_teste = subset(base, divisao == FALSE)
install.packages('randomForest')
library(randomForest)
set.seed(1)
classificador = randomForest(x = base_treinamento[-4], y = base_treinamento$default, ntree = 20)
print(classificador)
previsao = predict(classificador, newdata = base_teste[-4])
print(previsao)
matriz_confusao = table(base_teste[,4], previsao)
library(caret)
confusionMatrix(matriz_confusao)
|
41975c3db1138fa34dee569cae11d124f356caba | 72e15264d5adfa09d2971a869e2a89daa670dd70 | /data_analysis/scalability_analysis.R | d045d7d285d9ff7dd24b4cda54bd596b6a763224 | [] | no_license | benjaminmposnick/Fall-Detector | 9e510d48bd857526fcac1eac905c17861e90a720 | ca6136b30f56fc684efb82ca02a2b2579a85e817 | refs/heads/master | 2022-09-03T10:37:59.245298 | 2020-05-23T13:13:46 | 2020-05-23T13:13:46 | 264,302,599 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,815 | r | scalability_analysis.R | # library(ggplot2)
# Read data
oneServer <- read.csv(file="scalability_test.csv",sep=",",head=TRUE)
twoServer <- read.csv('scalability_test_2.csv', sep = ",",head=TRUE)
threeServer <- read.csv('scalability_test_3.csv', sep = ",",head=TRUE)
fourServer <- read.csv('scalability_test_4.csv', sep = ",",head=TRUE)
# Calculate quantiles and averages
one95 <- quantile(oneServer$resp_time, .95)
two95 <- quantile(twoServer$resp_time, .95)
three95 <- quantile(threeServer$resp_time, .95)
four95 <- quantile(fourServer$resp_time, .95)
Percentiles_95th <- c(one95, two95, three95, four95)
oneMean <- oneServer$resp_time[oneServer$point=="mean"]
twoMean <- twoServer$resp_time[twoServer$point=="mean"]
threeMean <- threeServer$resp_time[threeServer$point=="mean"]
fourMean <- fourServer$resp_time[fourServer$point=="mean"]
Mean_times <- c(oneMean, twoMean, threeMean, fourMean)
oneMin <- min(oneServer$resp_time)
twoMin <- min(twoServer$resp_time)
threeMin <- min(threeServer$resp_time)
fourMin <- min(fourServer$resp_time)
minTimes <- c(oneMin, twoMin, threeMin, fourMin)
oneMax <- max(oneServer$resp_time)
twoMax <- max(twoServer$resp_time)
threeMax <- max(threeServer$resp_time)
fourMax <- max(fourServer$resp_time)
maxTimes <- c(oneMax, twoMax, threeMax, fourMax)
Number_homes <- c(1,2,3,4)
png(file="scalability.png")
plot(Number_homes, Percentiles_95th,
main="Scalability Analysis: # Homes vs Response Time Metrics",
ylab="Response Times (s)",
xlab="Number of Homes",
xlim=c(1,4),
ylim=c(0.043,0.09),
# ylim=c(0.058,0.065),
xaxt = "n",
type="l",
col="blue")
axis(1, at = 1:4)
lines(Number_homes, Mean_times, col="red")
lines(Number_homes, minTimes, col="green")
lines(Number_homes, maxTimes, col="orange")
legend("topleft",
c("95th Percentile", "Mean", "Min", "Max"),
fill=c("blue","red", "green", "orange"))
|
e3b884c6b784ebadbf96b2c6443924e845515a44 | 9f8a019d4d19bc763821361499694b606172f7f0 | /R/roxygenize.R | 4c6f87f403e4d9a68c4e024e3c5a3058714f4b83 | [] | no_license | miraisolutions/roxygen2 | 92111663d2cdb1dd5eb73ff933e10c6575078909 | 157e8913036954c6216354ef4aa6d2660b6ef95f | refs/heads/master | 2020-12-25T10:35:39.069369 | 2013-06-12T08:41:15 | 2013-06-12T08:41:15 | 10,638,946 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,969 | r | roxygenize.R | #' Process a package with the Rd, namespace and collate roclets.
#'
#' This is the workhorse function that uses roclets, the built-in document
#' tranformation functions, to build all documentation for a package. See
#' the documentation for the individual roclets, \code{\link{rd_roclet}},
#' \code{\link{namespace_roclet}} and \code{\link{collate_roclet}}, for
#' documentation on how to use each one.
#'
#' @param package.dir the package's top directory
#' @param roxygen.dir where to create roxygen output; defaults to
#' \file{package.dir}.
#' @param copy.package copies the package over before adding/manipulating
#' files.
#' @param overwrite overwrite target files?
#' @param unlink.target unlink target directory before processing files?
#' @param file.ext extension of source files to be processed
#' @param roclets character vector of roclet names to apply to package
#' @return \code{NULL}
#' @rdname roxygenize
#' @export
roxygenize <- function(package.dir,
roxygen.dir=package.dir,
copy.package=package.dir != roxygen.dir,
overwrite=TRUE,
unlink.target=FALSE,
file.ext = "[.Rr]",
roclets=c("collate", "namespace", "rd")) {
skeleton <- c(roxygen.dir, file.path(roxygen.dir, "man"))
if (copy.package) {
copy.dir(package.dir, roxygen.dir, unlink.target = unlink.target,
overwrite = overwrite, verbose = FALSE)
}
for (dir in skeleton) {
dir.create(dir, recursive=TRUE, showWarnings=FALSE)
}
roxygen.dir <- normalizePath(roxygen.dir)
r_files <- dir(file.path(roxygen.dir, "R"), paste0(file.ext, "$"), full.names = TRUE)
# If description present, use Collate to order the files
# (but still include them all, and silently remove missing)
DESCRIPTION <- file.path(package.dir, "DESCRIPTION")
if (file.exists(DESCRIPTION)) {
desc <- read.description(DESCRIPTION)
raw_collate <- desc$Collate %||% ""
con <- textConnection(raw_collate)
on.exit(close(con))
collate <- scan(con, "character", sep = " ", quiet = TRUE)
collate_path <- file.path(roxygen.dir, "R", collate)
collate_exists <- Filter(file.exists, collate_path)
r_files <- c(collate_exists, setdiff(r_files, collate_exists))
# load the dependencies
pkgs <- paste(c(desc$Depends, desc$Imports), collapse = ", ")
if (pkgs != "") {
pkgs <- gsub("\\s*\\(.*?\\)", "", pkgs)
pkgs <- strsplit(pkgs, ",")[[1]]
pkgs <- gsub("^\\s+|\\s+$", "", pkgs)
lapply(pkgs[pkgs != "R"], require, character.only = TRUE)
}
}
parsed <- parse.files(r_files)
roclets <- str_c(roclets, "_roclet", sep = "")
for (roclet in roclets) {
roc <- match.fun(roclet)()
results <- roc_process(roc, parsed, roxygen.dir)
roc_output(roc, results, roxygen.dir)
}
}
#' @rdname roxygenize
#' @export
roxygenise <- roxygenize
# Recursively copy a directory thither; optionally unlinking
# the target first; optionally overwriting; optionally
# verbalizing.
#
# @note Not tested on non-linux platforms
copy.dir <- function(source,
target = source,
unlink.target=FALSE,
overwrite=FALSE,
verbose=FALSE) {
if (unlink.target)
unlink(target, recursive=TRUE)
files <- list.files(source,
full.names=TRUE,
recursive=TRUE,
all.files=TRUE)
for (source.file in files) {
promoted.file <- sub('[^/\\]*(/|\\\\)', '', source.file)
target.file <- file.path(target, promoted.file)
target.dir <- dirname(target.file)
## Could create, instead, a list of unique directories in
## Theta(n).
dir.create(target.dir, recursive=TRUE, showWarnings=FALSE)
if (verbose)
cat(sprintf('%s -> %s', source.file, target.file), '\n')
file.copy(source.file, target.file, overwrite=overwrite)
}
}
|
8b4a35ddba82facb7716088d4446a7cc46d3e781 | ff09d8ab1b517ea730d301c1fb6167f0102929b3 | /R/AoRSS.R | 4817a85b8aedbca5b51d80b7bf2519a19f4bd0f4 | [] | no_license | IMPRESSPROJECT/AoRSS | cf75cc7e5795b3b59eae4ba00406eff01760786e | e915d7cc8038027b217a7f64fb24310b3d65b0bf | refs/heads/master | 2021-04-17T04:11:28.049028 | 2021-03-02T08:58:39 | 2021-03-02T08:58:39 | 249,411,982 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,573 | r | AoRSS.R | #' @title Analysis of residual sum of squares (AoRSS)
#'
#' @description Returns the test statistic and p-value corresponding to a method of comparing a number of curves called the analysis of residual sum of squares (AoRSS). The null hypothesis of test is that the K curves can be assumed coincident.
#'
#' @param RSSi a vector containing for each data set the sum of squared residuals calculated after fitting the corresponding curve.
#' @param RSSp the sum of squared residuals for the fit of the curve for the pooled data.
#' @param K the number of curves being compared.
#' @param N the total or pooled sample size.
#' @details The function returns the test statistic and p-value corresponding to the following test. Previously to compute the statistic:\itemize{
#' \item For each data set i, fit a curve and calculate the sum of squared residuals, RSSi.
#' \item Data for all curves are pooled, a new curve is fitted to the combined data, and the total or pooled RSSp is calculated.}
#' Now, all the necessary terms for computing our F-statistic are available and F is equal to:
#' \deqn{((RSSp-sum(RSSi))/(3(K-1)))/((sum(RSSi))/(N-3K)),}
#' where F is the F statistic with 3.(K – 1) and (N – 3.K) degrees of freedom, K is the number of curves being compared, and N is the total or pooled sample size.
#' Remember that the null hypothesis is that the K curves can be assumed coincident.
#' @return The value of the F-statistic and the corresponding p-value.
#' @references Haddon, Malcolm. (2011). Modelling and Quantitative Methods in Fisheries 2nd Edition.
#' @author
#' \itemize{
#' \item{Marta Cousido-Rocha}
#' \item{Santiago Cerviño López}
#' }
#' @examples
#' # An example based on the age length data relating
#' # to Pacific hake with separate data for both males
#' # and females (Example Table 9.3 of Haddon 2011,
#' # Quantitative Methods of Fisheries). The question
#' # is whether the male and female Pacific hake exhibit
#' # different growth throughout their lives. This is, we
#' # testing if the growth curves for males and females
#' # are coincident or not?
#' RSSi=c(28.8003778903944, 19.4233877094241)
#' RSSp=79.7645155773056
#' K=2
#' N=24
#' AoRSS.test(RSSi,RSSp,K,N)
#' @export
AoRSS.test<-function(RSSi,RSSp,K,N){
# F statistic
numerator=(RSSp-sum(RSSi))/(3*(K-1))
denominator=(sum(RSSi)/(N-3*K))
F=abs(numerator/denominator)
# p-value
df1=3*(K-1)
df2=N-3*K
p.value=1-stats::pf(F,df1,df2)
# Results
res<-list(F.statistic=F,p.value=p.value)
return(res)
}
|
b3724f5229ca8109eb5e496564dcd1780402c53d | da9ed9e8a4a02ec680072f658201a023461737f0 | /Course 3 Week 3 Quiz.R | 74977deda85502c2fe19b01798b46fe2c23924ed | [] | no_license | SiddhantUchil/Course-3-Week-3-Quiz | 04768de3def30d97c9dacb8413ecf404ae336176 | 535e71fa2df6daf7dadf1fe6fe1884d12fa05b69 | refs/heads/master | 2022-07-21T22:42:27.307966 | 2020-05-22T18:38:00 | 2020-05-22T18:38:00 | 266,183,144 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,191 | r | Course 3 Week 3 Quiz.R | getwd()
a = read.csv("hid.csv")
library(dplyr)
library(tidyr)
a = tbl_df(a)
a
agricultureLogical = a %>% drop_na(ACR, AGS) %>% mutate(LOGICAL = ACR == 3 & AGS == 6)
agricultureLogical$LOGICAL
which(agricultureLogical$LOGICAL == TRUE)
?drop_na
df <- tibble(x = c(1, 2, NA), y = c("a", NA, "b"))
df %>% drop_na()
df %>% drop_na(x)
install.packages("jpeg")
library(jpeg)
b = readJPEG("jeff.jpg", native = TRUE)
str(b)
b
quantile(b, probs = 0.8)
c = read.csv("gdp.csv", skip = 3, nrows = 191)
d = read.csv("edstats.csv", nrows = 227)
c = tbl_df(c)
d = tbl_df(d)
names(c)
colnames(c)[1] = "CountryCode"
c = c %>% select(-("X.1"))
c = select(c, -(X.2:X.6))
colnames(c)[4] = "money"
c = c %>% drop_na(money)
c
i = c[-1, ]
View(i)
View(d)
names(i)
names(d)
?match
e = which(i$CountryCode %in% d$CountryCode == TRUE)
f = c$CountryCode %in% d$CountryCode
g = arrange(i, money )
View(g)
j = merge(i,d,by = "CountryCode")
View(j)
names(j)
colnames(j)[2] = "rank"
names(j)
k = arrange(j, desc(rank))
View(k)
groupedDF <- group_by(mergedDF, Income.Group)
avgRankings<- dplyr::summarize(groupedDF, agvGDP = mean(Ranking))
filter(avgRankings, Income.Group %in% c('High income: nonOECD', 'High income: OECD'))
suppressMessages(library(Hmisc))
#using cut2(), need not mention quantile boundaries explicitly, set g=no_of_quantile_groups
cutGDP <- cut2(sortedDF$Ranking, g=5 )
table(cutGDP, sortedDF$Income.Group )
mergedDT[`Income Group` == "High income: OECD"
, lapply(.SD, mean)
, .SDcols = c("Rank")
, by = "Income Group"]
mergedDT[`Income Group` == "High income: nonOECD"
, lapply(.SD, mean)
, .SDcols = c("Rank")
, by = "Income Group"]
# install.packages('dplyr')
library('dplyr')
breaks <- quantile(mergedDT[, Rank], probs = seq(0, 1, 0.2), na.rm = TRUE)
mergedDT$quantileGDP <- cut(mergedDT[, Rank], breaks = breaks)
mergedDT[`Income Group` == "Lower middle income", .N, by = c("Income Group", "quantileGDP")]
##rowwise() is my go-to function. It's like group_by() but it treats each row as an individual group.
##df %>% rowwise() %>% mutate(Sum = sum(c(var1, var2, var3), na.rm = TRUE))
|
65b86f0a344dc55a366382f92b65b59801053293 | f064ecae355e2eada3c7438247ba784129f8f028 | /datasciencecoursera/ml/CaretRandomForests.R | 5ec74d606ae46442f9af5d5e03983cb33f3b54ba | [
"CC0-1.0"
] | permissive | ArnulfoPerez/R | c0a1a43680dd486bd9d0f8379ae05057efcbc235 | 70fb8a57b19d86eceb7a4d5f75aec712bfebfff5 | refs/heads/master | 2022-11-06T16:00:57.404891 | 2020-06-28T07:04:19 | 2020-06-28T07:04:19 | 260,382,530 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,119 | r | CaretRandomForests.R | library(caret)
library(ggplot2)
data("iris")
names(iris)
table(iris$Species)
inTrain <- createDataPartition(y=iris$Species,p=0.7,list=FALSE)
training <- iris[inTrain,]
testing <- iris[-inTrain,]
dim(training); dim(testing)
qplot(Petal.Width,Sepal.Width,colour=Species,data = iris)
modFit <- train(Species ~ ., method="rpart",data=training)
print(modFit$finalModel)
plot(modFit$finalModel,uniform=TRUE,main="Classification tree")
text(modFit$finalModel,use.n = TRUE, all = TRUE, cex = .8)
library(rattle)
fancyRpartPlot(modFit$finalModel)
predict(modFit, newdata = testing)
modFit <- train(Species ~ ., method="rf", prox = TRUE, data=training)
modFit
library(randomForest)
getTree(modFit$finalModel,k=2)
irisP <- classCenter(training[,c(3,4)], training$Species, modFit$finalModel$proximity)
irisP <- as.data.frame(irisP)
irisP$Species <- rownames(irisP)
p <- qplot(Petal.Width,Petal.Length,col=Species,data = training)
p + geom_point(aes(x=Petal.Width,y=Petal.Length,col=Species),size=5,shape=4,data = irisP)
pred <- predict(modFit,testing)
testing$predRight <- pred==testing$Species
table(pred,testing$Species)
|
bc17896c8ef646b38f5a39e981071c7057376ca8 | 9069a14191ff2e9077de348720eef82b1a8bcf42 | /2015s2-mo444-assignment-04/codes/ep4_after.R | 63f2cee227979f442eaa4c7f7cad471b2bd49f8a | [
"MIT"
] | permissive | rodneyrick/MO444-PatternRecognition-and-MachineLearning | a0fdad638922cf6b1ef4450c5ad732dc66131a71 | 5b0f9968b5b9e5c761cac48675118a9a755a5592 | refs/heads/master | 2020-04-24T01:45:41.999102 | 2016-08-26T22:32:42 | 2016-08-26T22:32:42 | 66,675,858 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,874 | r | ep4_after.R | train <- read.csv(file="D:\\MO444\\2015s2-mo444-assignment-04\\trat_200.csv", sep=",", header = FALSE, nrows=20000)
val <- read.csv(file="D:\\MO444\\2015s2-mo444-assignment-04\\trat_200.csv", sep=",", header = FALSE, nrows=20000, skip = 25001)
# original data
pca <- princomp(train[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
cut_off <- 0.75
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- train[,1] # add column with activity
fmla <- as.formula(paste("V1 ~ ",paste(colnames(d[,-dim(d)[2]]),sep=" ", collapse = " + ")))
library(randomForest)
fit <- randomForest(fmla,data=d)
print(fit) # view results
# importance(fit) # importance of each predictor
# original data
pca <- princomp(val[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(val$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- val[,1] # add column with activity
val.pred <- predict(fit, d[,-dim(d)[2]])
table(observed = val[,1], predicted = val.pred)
library(caret)
xtab <- table(val.pred, val[,1])
confusionMatrix(xtab)
confusionMatrix(val.pred, val[,1])
plot(fit, main = "25% dos atributos" )
legend("topright", colnames(fit$err.rate),col=1:4,cex=0.8,fill=1:4)
# rm(list = setdiff(ls(), lsf.str()))
# --------------------------------------------------------------
# Regression Tree Example
library(rpart)
pca <- princomp(train[-1])
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- train[,1] # add column with activity
# grow tree
fit <- rpart(fmla,data=d)
printcp(fit) # display the results
plot(fit)
plotcp(fit) # visualize cross-validation results
summary(fit) # detailed summary of splits
pca <- princomp(val[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(val$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- val[,1] # add column with activity
val.pred <- predict(fit, d[,-dim(d)[2]], type = "class")
xtab <- table(val.pred, d[,dim(d)[2]])
confusionMatrix(xtab)
# create additional plots
par(mfrow=c(1,1)) # two plots on one page
rsq.rpart(fit) # visualize cross-validation results
# plot tree
plot(fit, uniform=TRUE,
main="Regression Tree for Mileage ")
text(fit, use.n=TRUE, all=TRUE, cex=.8)
# create attractive postcript plot of tree
post(fit, file = "D:\\MO444\\2015s2-mo444-assignment-04\\tree2.ps",
title = "Regression Tree for Mileage")
# --------------------------------------------------------------
library(rattle)
library(rpart.plot)
library(RColorBrewer)
prp(fit)
fancyRpartPlot(fit, main = "Rpart com 100% das colunas para treino", sub = "Jogging: 0.4, LyingDown: 0.53, Sitting: 0.5, Stairs: 0.45, Standing: 0.52, Walking:0.61")
# --------------------------------------------------------------
# original data
pca <- princomp(train[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
fmla <- as.formula(paste("V1 ~ ",paste(colnames(d[,-1]),sep=" ", collapse = " + ")))
# http://www.ats.ucla.edu/stat/r/dae/logit.htm
mylogit <- glm(fmla, data = d, family = "binomial")
# original data
pca <- princomp(val[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(val$V1)
colnames(d) <- c("V1")
val <- cbind(d,pc.comp)
val.pred <- predict.glm(mylogit, newdata = val[,-1], type = "response")
xtab <- table(val.pred,val[,1])
confusionMatrix(xtab)
confusion.glm <- function(model, des.mat=NULL, response=NULL, cutoff=0.5) {
if (missing(des.mat)) {
prediction <- predict(model, type='response') > cutoff
confusion <- table(as.logical(model$y), prediction)
} else {
if (missing(response) || class(response) != "logical") {
stop("Must give logical vector as response when des.mat given")
}
prediction <- predict(model, des.mat, type='response') > cutoff
confusion <- table(response, prediction)
}
confusion <- cbind(confusion,
c(1 - confusion[1,1] / rowSums(confusion)[1],
1 - confusion[2,2] / rowSums(confusion)[2]))
confusion <- as.data.frame(confusion)
names(confusion) <- c('FALSE', 'TRUE', 'class.error')
return(confusion)
}
confusion.glm(mylogit, des.mat = val[,-1], response = val[,1], cutoff=0.9)
# ------------------------------------------------------------------------
library(Hmisc)
t <- var(as.matrix(val))
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var)
d <- as.data.frame(t[,vars > quantile(vars, 0.95)])
names(d)
d$V1 <- train[,1]
# original data
pca <- princomp(train[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
fmla <- as.formula(paste("V1 ~ ",paste(colnames(d[,-16]),sep=" ", collapse = " + ")))
# ----------------------------------------------------------------------------------------
train <- read.csv(file="D:\\MO444\\2015s2-mo444-assignment-04\\trat_100.csv", sep=",", header = TRUE, nrows=25000)
val <- read.csv(file="D:\\MO444\\2015s2-mo444-assignment-04\\trat_100.csv", sep=",", header = FALSE, nrows=25000, skip = 25001)
# original data
pca <- princomp(train[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
cut_off <- 0.75
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- train[,1] # add column with activity
fmla <- as.formula(paste("V1 ~ ",paste(colnames(d[,-dim(d)[2]]),sep=" ", collapse = " + ")))
## classification mode
# default with factor response:
library(e1071) # Support Vector Machines
fit <- svm(fmla, data = d)
print(fit) # view results
# importance(fit) # importance of each predictor
# original data
pca <- princomp(val[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(val$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- val[,1] # add column with activity
val.pred <- predict(fit, d[,-dim(d)[2]])
table(observed = val[,1], predicted = val.pred)
library(caret)
xtab <- table(val.pred, val[,1])
confusionMatrix(xtab)
plot(fit,d[,1:2,76],fmla)
m <- as.matrix(d[,-76])
x <- data.frame(t(m[1,]))
library(moments)
# original data
pca <- princomp(train[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
fmla <- as.formula(paste("V1 ~ ",paste(colnames(ss[,-dim(ss)[2]]),sep=" ", collapse = " + ")))
library(randomForest)
fit <- randomForest(fmla,data=ss)
print(fit) # view results
# importance(fit) # importance of each predictor
# original data
pca <- princomp(val[-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(val$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
s <- apply(d[,-1], 1, skewness)
k <- apply(d[,-1], 1, kurtosis)
m <- apply(d[,-1], 1, mean)
mi <- apply(d[,-1], 1, min)
ma <- apply(d[,-1], 1, max)
ss <- as.data.frame(s)
ss$k <- k
ss$m <- m
ss$mi <- mi
ss$ma <- ma
ss$V1 <- d[,1]
val.pred <- predict(fit, ss[,-1])
table(observed = val[,1], predicted = val.pred)
library(caret)
xtab <- table(val.pred, val[,1])
confusionMatrix(xtab)
confusionMatrix(val.pred, val[,1])
plot(fit, main = "100% dos atributos" )
legend("topright", colnames(fit$err.rate),col=1:4,cex=0.8,fill=1:4)
# -------------------------------------------------------------------------------
train <- read.csv(file="D:\\MO444\\2015s2-mo444-assignment-04\\trat_100.csv", sep=",", header = TRUE, nrows=20000)
val <- read.csv(file="D:\\MO444\\2015s2-mo444-assignment-04\\trat_100.csv", sep=",", header = FALSE, nrows=20000, skip = 25001)
# original data
pca <- princomp(train[,-1]) # principal components analysis using correlation matrix
pc.comp <- pca$scores
d <- as.data.frame(train$V1)
colnames(d) <- c("V1")
d <- cbind(d,pc.comp)
cut_off <- 0.75
t <- as.matrix(d[,-1])
vars <- apply(t, 2, var) #apply in each column
d <- as.data.frame(t[,vars > quantile(vars, cut_off)]) #get columns have more variance than 90%
d$V1 <- train[,1] # add column with activity
fft_train <- d[1,]
apply(d[1:20,], 1, function(x) {
s <- as.numeric(as.vector(x[-target]))
test <- fft(s)
# extract magnitudes and phases
magn <- Mod(test) # sqrt(Re(test)*Re(test)+Im(test)*Im(test))
# plot(test, magn)
new_row <- c(x[target],magn)
fft_train <- rbind(fft_train, new_row)
})
plot(fft_train[1,])
|
4323bcf6931942dd7446963a3968ed7ccd174489 | c31544cebf23c91ab8ed91a181a679aa15046091 | /reports/08_landuse_intensity.R | 69784064c3dec242866ffca1785a81f4be9a32e3 | [
"MIT"
] | permissive | CecSve/InsectMobile_Biomass | ea5c95b125ff2448a588c2e221345df5690a2480 | b72dfc200e14e2be109db3bf06c71824862d69ed | refs/heads/master | 2022-10-13T02:06:19.386263 | 2022-10-11T12:14:59 | 2022-10-11T12:14:59 | 229,278,989 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 28,058 | r | 08_landuse_intensity.R | #run 03mergeData script
### Load required libraries ###########################
library(cowplot)
library(ggplot2)
library(wesanderson)
library(ggpubr)
library(scales)
library(psych)
library(ggfortify)
library(lme4)
library(lmerTest)
library(effects)
library(corrplot)
library(MuMIn)
library(sjPlot)
#### Set colour scheme ##############################################################
landuseCols <- c("#CC79A7", "#E69F00", "#D55E00", "#56B4E9", "#009E73", "darkgrey") # colour friendly, ordered by land cover
### DK urban##############################################
allInsects <- read.delim("cleaned-data/DK_allInsects.txt")
# change land covers to be 0-100 instead of 0-1
allInsects[, c(26:49, 70:137,139)] <- allInsects[, c(26:49, 70:137,139)]*100
### urban greenness analysis (1st review) ####
# since we find a strong negative effect of urban cover in the main land cover analysis, we wish to explore whether green space might remedy some of this negative effect. To test this, we need to extract the urban green land use variables, hedges and urban green areas, and calculate their porportional cover within urban land cover.
# first examine general correlations
names(allInsects)
someInsects <- allInsects[,c(12,22, 48, 62, 70, 74:77)]
colnames(someInsects)
colnames(someInsects) <- c("Biomass", "Stops", "Urban", "Hedges", "Urban green", "Inner city", "Commercial", "Residential", "Multistory")
p <- cor(someInsects, use="pairwise.complete.obs")
# add significance
res1 <- cor.mtest(someInsects, conf.level = .95)
res2 <- cor.mtest(someInsects, conf.level = .99)
png(height=7, width=7, units="in", file="plots/corr_urban_landuse.jpeg", type = "cairo-png", res = 300)
# with correlation coefficient instead of p-values, coloured boxes = significant at a 0.05 level
corrplot(p, method = "color",
type = "upper", order = "original", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 90, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
corrplot(p, method = "color",
type = "upper", order = "AOE", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 90, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
dev.off()
### PCA urban ##################
names(someInsects)
mydata <- someInsects[,c(3,5:9)]
fit <- princomp(mydata, cor=TRUE)
biplot(fit)
#with ggplot
autoplot(fit)
autoplot(fit, data = mydata,
loadings = TRUE,
loadings.colour = 'black',
loadings.label = TRUE,
loadings.label.size = 5) +
scale_colour_manual(values = landuseCols[1:6])+
theme_bw() + labs(colour = "Land cover")
# the main axis explains most of the variation - the general urban cover effect.
#cowplot::save_plot("plots/pca_landuse_urban.png", dk_autoplot, base_height = 8, base_width = 12)
pca_rotated <- psych::principal(mydata, rotate="varimax", nfactors=2, scores=TRUE)
biplot(pca_rotated, main = "")
print(pca_rotated)
### subset to urban routes #########
# we choose a % cover threshold
quantile(allInsects$Urban_1000)
data <- subset(allInsects, allInsects$Urban_1000 > 6.67550)
#make data proportional
#data$propHedge <- (data$byHegnMeterPerHa_1000/data$Urban_1000)
data$propurbGreen <- (data$urbGreenPropArea_1000/data$Urban_1000)*100
data$propLargecity <- (data$Bykerne_1000/data$Urban_1000)*100
data$propCommercial <- (data$Erhverv_1000/data$Urban_1000)*100
data$propMultistory <- (data$Høj.bebyggelse_1000/data$Urban_1000)*100
tail(data)
# merge greenness
data$propGreen <- data$propurbGreen
#data$propMajorcity <- data$propLargecity + data$propCommercial #+ data$propMultistory
mean(data$propGreen)
max(data$propGreen)
median(data$propGreen)
#mean(data$propMajorcity)
#max(data$propMajorcity)
#median(data$propMajorcity)
data <- data %>% mutate_if(is.numeric, function(x) ifelse(is.infinite(x), 0, x))
### correlation plot proportional urban ###################
someInsects <- data[,c("Biomass", "cStops", "Urban_1000", "propGreen")]
colnames(someInsects)
colnames(someInsects) <- c("Biomass", "Stops", "Urban", "propGreen")
p <- cor(someInsects, use="pairwise.complete.obs")
# add significance
res1 <- cor.mtest(someInsects, conf.level = .95)
res2 <- cor.mtest(someInsects, conf.level = .99)
# with correlation coefficient instead of p-values, coloured boxes = significant at a 0.05 level
png(units="in", width=7, height=7, file="plots/corr_propurban_landuse.jpeg", type = "cairo-png", res = 300)
corrplot(p, method = "color",
type = "upper", order = "original", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 60, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
dev.off()
# now, after we have calculated the proportional green cover within urban, there is not a strong correlation between urban and greenness variable
lme1000 <- lmer(log(Biomass+1) ~
#Urban_1000 +
propGreen +
Time_band +
Time_band:cnumberTime + cStops + cyDay +
(1|RouteID_JB) + (1|PilotID), data=data)
summary(lme1000)
tab_model(lme1000, digits = 3, show.intercept = F, collapse.ci = T, pred.labels = c("Prop. green cover", "Evening vs midday", "Potential stops", "Day of year", "Time within midday", "Time within evening"))
car::vif(lme1000)
gls1.alleffects <- allEffects(lme1000)
effectdata <- as.data.frame(gls1.alleffects, row.names=NULL, optional=TRUE)
eall.lm1 <- predictorEffects(lme1000)
plot(eall.lm1, lines=list(multiline=TRUE))
### ggplot effect plot ####
# Urbangreen_gradient
temp <- effectdata$propGreen
temp$landuse <- "propGreen"
propGreen <- temp %>%
dplyr::rename(
propcover = propGreen
)%>% dplyr::select(landuse, propcover, fit, se, lower, upper)
prop_data <- propGreen
str(prop_data)
# Visualization
landuse_urban <- prop_data %>% ggplot(aes(x = propcover, y = fit, fill = landuse)) +
geom_line(aes(color = landuse), size = 2) +
scale_color_manual(
values = c("#FA0081"), labels = "Prop. urban green cover"
) + theme_minimal_grid() + theme(
plot.subtitle = element_text(size = 20, face = "bold"),
legend.title = element_blank(),
legend.text = element_text(size = 8),
legend.position = "bottom"
) + scale_x_continuous(
limits = c(20, 70),
labels = function(x)
paste0(x, "%")) + geom_ribbon(
aes(
ymin = fit-se,
ymax = fit+se,
group = landuse
),
linetype = 2,
alpha = 0.2,
show.legend = F
) + labs(
x = "",
y = "log Predicted biomass (mg)",
subtitle = "A",
colour = "Land use type"
) + scale_fill_manual(values = c("#FA0081")) + guides(colour = guide_legend(nrow = 1))
#save_plot("plots/DK_urbanlanduse_cover_greenness.png", landuse_urban, base_width = 8, base_height = 6)
### PCA propUrban ##############
names(data)
someInsects <- data[,c(12,22, 48, 70, 74:77)]
colnames(someInsects)
#colnames(someInsects) <- c("Biomass", "Stops", "Urban", "Urban green", "Inner city", "Commercial", "Residential", "Multistory")
#make data proportional
someInsects$propurbGreen <- (someInsects$urbGreenPropArea_1000/someInsects$Urban_1000)*100
someInsects$propInnercity <- (someInsects$Bykerne_1000/someInsects$Urban_1000)*100
someInsects$propCommercial <- (someInsects$Erhverv_1000/someInsects$Urban_1000)*100
someInsects$propResidential <- (someInsects$Lav.bebyggelse_1000/someInsects$Urban_1000)*100
someInsects$propMultistory <- (someInsects$Høj.bebyggelse_1000/someInsects$Urban_1000)*100
names(someInsects)
fit <- princomp(someInsects[,c(3, 9:13)], cor=TRUE)
#with ggplot
biplot(fit)
autoplot(fit)
dk_autoplot <- autoplot(fit, data = mydata,
loadings = TRUE,
loadings.colour = 'black',
loadings.label = TRUE,
loadings.label.size = 5) +
scale_colour_manual(values = landuseCols[1:6])+
theme_bw() + labs(colour = "Land cover")
cowplot::save_plot("plots/pca_proplanduse_urban.png", dk_autoplot, base_height = 10, base_width = 12)
pca_rotated <- psych::principal(someInsects[,c(3, 9:13)],
rotate="varimax", nfactors=2, scores=TRUE)
biplot(pca_rotated, main = "")
print(pca_rotated)
#model selection with these pca axes
data$Urbanization_gradient <- pca_rotated$scores[,1]
data$Greening_gradient <- pca_rotated$scores[,2]
### plotting #############################################
#plot gradient (split into factor just for plotting purposes)
#general gradient
data$General_gradient_factor <- cut(data$Urbanization_gradient,3)
ggplot(data,aes(x=Urban_1000,y=log(Biomass+1),
group=General_gradient_factor))+
geom_smooth(method=lm,aes(colour=General_gradient_factor),se=F)+
scale_colour_viridis_d()
#greening gradient
data$Greening_gradient_factor <- cut(data$Greening_gradient,3)
ggplot(data,aes(x=Urban_1000,y=log(Biomass+1),
group=Greening_gradient_factor))+
geom_smooth(method=lm,aes(colour=Greening_gradient_factor),se=F)+
scale_colour_viridis_d()
#both gradients
ggplot(data,aes(x=Urban_1000,y=log(Biomass+1)))+
geom_smooth(method=lm,se=F)+
facet_grid(Greening_gradient_factor~General_gradient_factor)
#urban gradient alone
data$urban_gradient_factor <- cut(data$Urban,5)
ggplot(data,aes(x=Urban,y=log(Biomass+1),
group=urban_gradient_factor))+
geom_smooth(method="lm",aes(colour=urban_gradient_factor),se=F)
ggplot(data,aes(x=Urban,y=log(Biomass+1)))+
geom_smooth(method="loess")
#add PCA axes scores to the dataset
lme1000 <- lmer(log(Biomass+1) ~
Greening_gradient +
Urbanization_gradient +
Time_band +
Time_band:cnumberTime + cStops + cyDay +
(1|RouteID_JB) + (1|PilotID), data=data)
summary(lme1000)
tab_model(lme1000, show.intercept = F, collapse.ci = T, pred.labels = c("Urban green gradient", "Urbanization gradient", "Time band: evening vs midday", "Potential stops", "Day of year", "Time within midday", "Time within evening"))
#r.squaredGLMM(lme1000)
car::vif(lme1000)
# pairwise comparison
pair.ht <- glht(lme1000, linfct = c("Greening_gradient - Urbanization_gradient = 0"))
summary(pair.ht)
confint(pair.ht)
### DK farmland ##########################################
g1 <- ggplot(subset(allInsects, maxLand_use = "Agriculture_1000"),
aes(x=sqrt(hegnMeterPerHa_1000),y=(Biomass+1)))+
geom_point(col=landuseCols[3])+
scale_y_log10() +
theme_bw() +
geom_smooth(method="lm",color="grey70")+
xlab("Hedgerows m per ha") +ylab("Biomass")
g2 <- ggplot(subset(allInsects, maxLand_use = "Agriculture_1000"),
aes(sqrt(x=Ekstensiv_organic_1000),y=(Biomass+1)))+
geom_point(col=landuseCols[3])+
scale_y_log10() +
theme_bw() +
geom_smooth(method="lm",color="grey70")+
xlab("Organic extensive farmland cover") +ylab("")
g3 <- ggplot(subset(allInsects, maxLand_use = "Agriculture_1000"),
aes(sqrt(x=Ekstensiv_1000), y=(Biomass+1)))+
geom_point(col=landuseCols[3])+
scale_y_log10() +
theme_bw() +
geom_smooth(method="lm",color="grey70")+
xlab("Extensive farmland cover") +ylab("")
g4 <- ggplot(subset(allInsects, maxLand_use = "Agriculture_1000"),
aes(sqrt(x=Semi.intensiv_1000), y=(Biomass+1)))+
geom_point(col=landuseCols[3])+
scale_y_log10() +
theme_bw() +
geom_smooth(method="lm",color="grey70")+
xlab("Semi-intensive farmland cover") +ylab("Biomass")
g5 <- ggplot(subset(allInsects, maxLand_use = "Agriculture_1000"),
aes(x=Intensiv_1000, y=(Biomass+1)))+
geom_point(col=landuseCols[3])+
scale_y_log10() +
theme_bw() +
geom_smooth(method="lm",color="grey70")+
xlab("Intensive farmland cover") +ylab("")
g6 <- ggplot(subset(allInsects, maxLand_use = "Agriculture_1000"),
aes(sqrt(x=Markblok_1000), y=(Biomass+1)))+
geom_point(col=landuseCols[3])+
scale_y_log10() +
theme_bw() +
geom_smooth(method="lm",color="grey70")+
xlab("Unspecified crop cover") +ylab("")
cowplot::plot_grid(g1,g2,g3,g4,g5,g6)
### PCA farmland ####
mydata <- allInsects[,c("cStops",names(allInsects)[grepl("_1000",names(allInsects))])]
names(mydata)
mydata <- mydata[,c(2, 18:21,24:25)]
names(mydata) <- gsub("_1000","",names(mydata))
mydata <- mydata %>% drop_na(Ekstensiv)
#mydata <- plyr::rename(mydata, c("hegnMeterPerHa" = "Hedgerows"))
mydata <- plyr::rename(mydata, c("Ekstensiv" = "Extensive"))
mydata <- plyr::rename(mydata, c("Ekstensiv_organic" = "Organic extensive"))
mydata <- plyr::rename(mydata, c("Intensiv" = "Intensive"))
mydata <- plyr::rename(mydata, c("Intensiv_organic" = "Organic intensive"))
mydata <- plyr::rename(mydata, c("Semi.intensiv" = "Semi-intensive"))
mydata <- plyr::rename(mydata, c("Semi.intensiv_organic" = "Organic semi-intensive cover"))
fit <- princomp(mydata, cor=TRUE)
fit_data <- allInsects %>% drop_na(Ekstensiv_1000)
#with ggplot
autoplot(fit)
dk_autoplot <- autoplot(fit, data = fit_data,
loadings = TRUE,
loadings.colour = 'black',
loadings.label = TRUE,
loadings.label.size = 5) +
scale_colour_manual(values = landuseCols[1:6])+
theme_bw() + labs(colour = "Land cover")
cowplot::save_plot("plots/pca_landuse_farmland.png", dk_autoplot, base_height = 8, base_width = 12)
#packageurl <- "https://cran.r-project.org/src/contrib/Archive/mnormt/mnormt_1.5-7.tar.gz"
#install.packages(packageurl, repos=NULL, type="source")
pca_rotated <- psych::principal(mydata, rotate="varimax", nfactors=2, scores=TRUE)
biplot(pca_rotated, main = "")
print(pca_rotated)
#### correlation plot farmland use (not proportional) ############
names(data)
someInsects <- data[,c(12,142, 44, 90:93, 96:97)]
colnames(someInsects)
colnames(someInsects) <- c("Biomass", "Stops", "Farmland", "Extensive", "OrganicExtensive", "Intensive", "OrganicIntensive", "semiIntensive", "semiOrganicIntensive")
p <- cor(someInsects, use="pairwise.complete.obs")
# add significance
res1 <- cor.mtest(someInsects, conf.level = .95)
res2 <- cor.mtest(someInsects, conf.level = .99)
png(height=7, width=7, units = "in", file="plots/DK_farmland_use.jpeg", type = "cairo-png", res = 300)
# with correlation coefficient instead of p-values, coloured boxes = significant at a 0.05 level
corrplot(p, method = "color",
type = "upper", order = "original", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 90, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
dev.off()
# with correlation coefficient instead of p-values, coloured boxes = significant at a 0.05 level
corrplot(p, method = "color",
type = "upper", order = "AOE", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 90, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
dev.off()
### DK farmland proportional cover ######################################
# calculate the proportional cover of the land use intensity variables within the most land cover routes
quantile(allInsects$Agriculture_1000)
data <- subset(allInsects, allInsects$Agriculture_1000 > 60.07200)
data$propOrgExtensive <- (data$Ekstensiv_organic_1000/data$Agriculture_1000)*100
data$propExtensive <- (data$Ekstensiv_1000/data$Agriculture_1000)*100
data$propSemiIntensive <- (data$Semi.intensiv_1000/data$Agriculture_1000)*100
data$propOrgSemiIntensive <- (data$Semi.intensiv_organic_1000/data$Agriculture_1000)*100
data$propIntensive <- (data$`Intensiv_1000`/data$Agriculture_1000)*100
data$propOrgIntensive <- (data$Intensiv_organic_1000/data$Agriculture_1000)*100
tail(data)
data <- data %>% mutate_if(is.numeric, function(x) ifelse(is.infinite(x), 0, x))
# make NAs zeros
data <- data %>%
dplyr::mutate_all(funs(ifelse(is.na(.), 0, .)))
# merge covers that have similar farming practices
data$propOrganic_farmland <- data$propOrgExtensive + data$propOrgIntensive + data$propOrgSemiIntensive
max(data$propOrganic_farmland) # does not exceed 100
mean(data$propOrganic_farmland)
median(data$propOrganic_farmland)
data$propConventional_farmland <- data$propIntensive + data$propExtensive + data$propSemiIntensive
max(data$propConventional_farmland) # does not exceed 100
mean(data$propConventional_farmland)
median(data$propConventional_farmland)
### correlation plot proportional agriculture ###################
names(data)
someInsects <- data[,c(12,142, 44, 145:150)]
colnames(someInsects)
colnames(someInsects) <- c("Biomass", "Stops", "Farmland", "propOrgExtensive", "propExtensive", "propSemiIntensive", "propOrgSemiIntensive", "propIntensive", "propOrgIntensive")
p <- cor(someInsects, use="pairwise.complete.obs")
# add significance
res1 <- cor.mtest(someInsects, conf.level = .95)
res2 <- cor.mtest(someInsects, conf.level = .99)
png(height=7, width=7, units = "in", file="plots/prop_farmland_use.jpeg", type = "cairo-png", res = 300)
# with correlation coefficient instead of p-values, coloured boxes = significant at a 0.05 level
corrplot(p, method = "color",
type = "upper", order = "original", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 90, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
dev.off()
# the combined farming practices
names(data)
someInsects <- data[,c(12,142, 44, 151:152)]
colnames(someInsects)
colnames(someInsects) <- c("Biomass", "Stops", "Farmland", "propOrganic", "propConventional")
p <- cor(someInsects, use="pairwise.complete.obs")
# add significance
res1 <- cor.mtest(someInsects, conf.level = .95)
res2 <- cor.mtest(someInsects, conf.level = .99)
png(height=7, width=7, units = "in", file="plots/prop_farmland_practice.jpeg", type = "cairo-png", res = 300)
# with correlation coefficient instead of p-values, coloured boxes = significant at a 0.05 level
corrplot(p, method = "color",
type = "upper", order = "original", number.cex = .7,
addCoef.col = "black", # Add coefficient of correlation
tl.col = "black", tl.srt = 90, # Text label color and rotation
# Combine with significance
p.mat = res1$p, sig.level = 0.05, insig = "blank",
# hide correlation coefficient on the principal diagonal
diag = FALSE, mar=c(0,0,1,0))
dev.off()
names(data)
mean(data$Intensiv_1000)
# model
lmer1000 <- lmer(
log(Biomass + 1) ~
propOrgExtensive +
propExtensive +
propOrgSemiIntensive +
propSemiIntensive +
propIntensive +
#propOrgIntensive +
Time_band + Time_band:cnumberTime + cStops + cyDay +
(1 | RouteID_JB) + (1 | PilotID),
data = data
)
summary(lmer1000)
car::vif(lmer1000)
tab_model(lmer1000, show.intercept = F, digits = 3, collapse.ci = T, pred.labels = c("Prop. organic extensive", "Prop. extensive", "Prop. organic semi-intensive", "Prop. semi-intensive", "Prop. intensive", "Evening vs midday", "Potential stops", "Day of year", "Time within time band"))
# pairwise comparison between interaction terms
pair.ht <- glht(lmer1000, linfct = c("propIntensive - propSemiIntensive = 0", "propIntensive - propOrgSemiIntensive = 0", "propIntensive - propExtensive = 0", "propIntensive - propOrgExtensive = 0"))
summary(pair.ht) # trend toward higher biomass in organic farmland
confint(pair.ht)
# effect
gls1.alleffects <- allEffects(lmer1000)
effectdata <- as.data.frame(gls1.alleffects, row.names=NULL, optional=TRUE)
eall.lm1 <- predictorEffects(lmer1000)
#effectdata <- as.data.frame(eall.lm1, row.names=NULL, optional=TRUE)
plot(eall.lm1, lines=list(multiline=TRUE))
#plot(predictorEffects(lmeurban1000, ~ urbGreenPropArea_1000 + byHegnMeterPerHa_1000 + Bykerne_1000 + Lav.bebyggelse_1000 + Høj.bebyggelse_1000 + Erhverv_1000 + cnumberTime, residuals = T), partial.residuals=list(smooth=TRUE, span=0.50, lty = "dashed"))
### ggplot effect plot ####
names(effectdata)
temp <- effectdata$propOrgExtensive
temp$landuse <- "propOrgExtensive"
propOrgExtensive <- temp %>%
dplyr::rename(
propcover = propOrgExtensive
)%>% dplyr::select(landuse, propcover, fit, se, lower, upper)
temp <- effectdata$propExtensive
temp$landuse <- "propExtensive"
propExtensive <- temp %>%
dplyr::rename(
propcover = propExtensive
)%>% dplyr::select(landuse, propcover, fit, se, lower, upper)
temp <- effectdata$propOrgSemiIntensive
temp$landuse <- "propOrgSemiIntensive"
propOrgSemiIntensive <- temp %>%
dplyr::rename(
propcover = propOrgSemiIntensive
)%>% dplyr::select(landuse, propcover, fit, se, lower, upper)
temp <- effectdata$propSemiIntensive
temp$landuse <- "propSemiIntensive"
propSemiIntensive <- temp %>%
dplyr::rename(
propcover = propSemiIntensive
)%>% dplyr::select(landuse, propcover, fit, se, lower, upper)
temp <- effectdata$propIntensive
temp$landuse <- "propIntensive"
propIntensive <- temp %>%
dplyr::rename(
propcover = propIntensive
)%>% dplyr::select(landuse, propcover, fit, se, lower, upper)
test <- rbind(propOrgExtensive, propExtensive, propOrgSemiIntensive, propSemiIntensive, propIntensive)
# Visualization
effectplot_farmland_1 <- test %>% mutate(
landuse = fct_relevel(
landuse,
"propOrgExtensive",
"propExtensive",
"propOrgSemiIntensive",
"propSemiIntensive",
"propIntensive"
)
)%>% ggplot(aes(x = propcover, y = fit, fill = landuse)) +
geom_line(aes(color = landuse), size = 2) +
scale_color_manual(
values = c("#29994B", "#44A1E3", "#55E680", "#3E7AFA", "#3C46F0"),
labels = c(
"Prop. organic extensive",
"Prop. extensive",
"Prop. organic semi-intensive",
"Prop. semi-intensive", "Prop. intensive"
)
) + theme_minimal_grid() + theme(
plot.subtitle = element_text(size = 20, face = "bold"),
legend.title = element_blank(),
legend.text = element_text(size = 8),
legend.position = "none"
) + scale_x_continuous(
limits = c(0, 15),
labels = function(x)
paste0(x, "%")) + geom_ribbon(
aes(
ymin = fit-se,
ymax = fit+se,
group = landuse
),
linetype = 2,
alpha = 0.2,
show.legend = F
) + labs(
x = "",
y = "log Predicted biomass (mg)",
subtitle = "B",
colour = "Land use type"
) + scale_fill_manual(values = c("#29994B", "#44A1E3", "#55E680", "#3E7AFA", "#3C46F0")) + guides(colour = guide_legend(nrow = 1))
effectplot_farmland_2 <- test %>% mutate(
landuse = fct_relevel(
landuse,
"propOrgExtensive",
"propExtensive",
"propOrgSemiIntensive",
"propSemiIntensive",
"propIntensive"
)
)%>% ggplot(aes(x = propcover, y = fit, fill = landuse)) +
geom_line(aes(color = landuse), size = 2) +
scale_color_manual(
values = c("#29994B", "#44A1E3", "#55E680", "#3E7AFA", "#3C46F0"),
labels = c(
"Prop. organic extensive",
"Prop. extensive",
"Prop. organic semi-intensive",
"Prop. semi-intensive", "Prop. intensive"
)
) + theme_minimal_grid() + theme(
plot.subtitle = element_text(size = 20, face = "bold"),
legend.title = element_blank(),
legend.text = element_text(size = 8),
legend.position = "bottom"
) + scale_x_continuous(
limits = c(40, 100),
labels = function(x)
paste0(x, "%")) + geom_ribbon(
aes(
ymin = fit-se,
ymax = fit+se,
group = landuse
),
linetype = 2,
alpha = 0.2,
show.legend = F
) + labs(
x = "Land use intensity cover",
y = "log Predicted biomass (mg)",
subtitle = "C",
colour = "Land use type"
) + scale_fill_manual(values = c("#29994B", "#44A1E3", "#55E680", "#3E7AFA", "#3C46F0")) + guides(colour = guide_legend(nrow = 1))
#cowplot::save_plot("plots/farmland_landuse_prop_farming_practice.jpeg", effectplot_farmland)
landuse_plots <- cowplot::plot_grid(landuse_urban, effectplot_farmland_1, effectplot_farmland_2, ncol = 1)
cowplot::save_plot("plots/propLanduse_plot.jpeg", landuse_plots, base_height = 12, base_width = 10)
### PCA propFarmland ####
mydata <- data
names(mydata)
mydata <- mydata[,c(44,145:150)]
names(mydata)
colnames(mydata) <- c("Farmland", "propOrgExtensive", "propExtensive", "propSemiIntensive", "propOrgSemiIntensive", "propIntensive", "propOrgIntensive")
fit <- princomp(mydata, cor=TRUE)
#with ggplot
autoplot(fit)
dk_autoplot <- autoplot(fit, data = fit_data,
loadings = TRUE,
loadings.colour = 'black',
loadings.label = TRUE,
loadings.label.size = 5) +
scale_colour_manual(values = landuseCols[1:6])+
theme_bw() + labs(colour = "Land cover")
cowplot::save_plot("plots/pca_landuse_propfarmland.png", dk_autoplot, base_height = 8, base_width = 12)
pca_rotated <- psych::principal(mydata, rotate="varimax", nfactors=2, scores=TRUE)
biplot(pca_rotated, main = "")
print(pca_rotated)
#ggsave("plots/pca_with_rotation_propfarmland_1000_DK.png", width = 12, height = 10)
#add PCA axes scores to the dataset
data$Organic_gradient <- pca_rotated$scores[,1]
data$Conventional_extensive_gradient <- pca_rotated$scores[,2]
lme1000 <- lmer(log(Biomass+1) ~
Organic_gradient + Conventional_extensive_gradient +
Time_band +
Time_band:cnumberTime + cStops + cyDay +
(1|RouteID_JB) + (1|PilotID), data= data)
summary(lme1000)
tab_model(lme1000, show.intercept = F, collapse.ci = T, digits = 3, pred.labels = c("Organic gradient", "Conventional extensive/semi-intensive gradient", "Time band: evening vs midday", "Potential stops", "Day of year", "Time within time band"))
car::vif(lme1000)
# pairwise comparison to farmland
pair.ht <- glht(lme1000, linfct = c("Conventional_extensive_gradient - Organic_gradient = 0"))
summary(pair.ht) # semi-natural covers have higher biomass than farmland, but it is only significant for grassland, urban has significantly lower biomass
confint(pair.ht)
|
9562651e344c425b9fa5ec3ca29a5c09823adae9 | 45a43d0bc94c0e18cbc58b45ad39d4e4a6171b7c | /rscripts/gbdrawer03.R | a6796981eae95a24d56a2ef518420593c799de09 | [] | no_license | rdosreis/rdosreis.source | a34d0dce9a4eb811905aeeddcc9c51e670364821 | aad8619786aff18a51981acb64192b2e27410115 | refs/heads/master | 2023-02-20T21:28:40.083052 | 2023-02-08T14:17:24 | 2023-02-08T14:17:24 | 221,798,187 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,532 | r | gbdrawer03.R | library(readr)
gbd_copd <- read_csv(file = here::here("data",
"IHME-GBD_2019_DATA-4fea7ca1-1.csv"))
gbd_copd$age <- factor(
x = gbd_copd$age,
levels = unique(gbd_copd$age),
labels = c(
c("Early Neonatal", "Late Neonatal", "Post Neonatal"),
unique(gbd_copd$age)[4:19],
paste(unique(gbd_copd$age)[20:22], "years"),
unique(gbd_copd$age)[23]
)
)
gbd_copd$val[gbd_copd$sex == "Male"] <- -gbd_copd$val[gbd_copd$sex == "Male"]
library(dplyr)
library(ggplot2)
library(RColorBrewer)
gbd_copd_19 <- gbd_copd %>%
filter (year == 2019)
p <- ggplot(data = gbd_copd_19,
mapping = aes(x = age,
y = val,
fill = sex)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = abs,
limits =
max(max(gbd_copd_19$val, na.rm = TRUE),
abs(min(gbd_copd_19$val))) * c(-1,1)) +
scale_fill_brewer(palette = "Set1") +
guides(fill = guide_legend(reverse = TRUE)) +
labs(x = "Age group", y = "DALYs", fill = "Sex",
title = "Carga global de COPD. DALYs por faixa etária, ano: 2019",
caption = "Global Burden of Disease Study 2019 (GBD 2019) Results.
Seattle, United States: Institute for Health Metrics and Evaluation (IHME), 2020.
Available from https://vizhub.healthdata.org/gbd-results/.") +
theme_minimal() +
theme(legend.position = "bottom") +
coord_flip()
p
p <- ggplot(data = gbd_copd,
mapping = aes(x = age,
y = val,
fill = sex)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = abs,
limits =
max(max(gbd_copd_19$val, na.rm = TRUE),
abs(min(gbd_copd_19$val))) * c(-1,1)) +
scale_fill_brewer(palette = "Set1") +
guides(fill = guide_legend(reverse = TRUE)) +
labs(x = "Age group", y = "DALYs", fill = "Sex",
title = "Carga global de COPD. DALYs por faixa etária",
caption = "Global Burden of Disease Study 2019 (GBD 2019) Results.
Seattle, United States: Institute for Health Metrics and Evaluation (IHME), 2020.
Available from https://vizhub.healthdata.org/gbd-results/.") +
theme_minimal() +
theme(legend.position = "bottom") +
coord_flip() +
facet_wrap( ~ year)
p
library(gganimate)
gbd_copd$year <- integer(gbd_copd$year)
p <- ggplot(data = gbd_copd,
mapping = aes(x = age,
y = val,
fill = sex)) +
geom_bar(stat = "identity") +
scale_y_continuous(labels = abs,
limits =
max(max(gbd_copd$val, na.rm = TRUE),
abs(min(gbd_copd$val))) * c(-1,1)) +
scale_fill_brewer(palette = "Set1") +
guides(fill = guide_legend(reverse = TRUE)) +
labs(x = "Age group", y = "DALYs", fill = "Sex",
title = "Carga global de COPD. DALYs por faixa etária, ano: {as.integer(frame_time)}",
caption = "Global Burden of Disease Study 2019 (GBD 2019) Results.
Seattle, United States: Institute for Health Metrics and Evaluation (IHME), 2020.
Available from https://vizhub.healthdata.org/gbd-results/.") +
theme_minimal() +
theme(legend.position = "bottom") +
coord_flip() +
transition_time(year) +
ease_aes('linear')
p
# gganimate(p, filename = "images/daly-copd.gif",
# ani.width = 1000, ani.height = 1600, ani.res = 200)
|
58e12760b71ee2ab7299ff4369692587d09a57eb | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.media.services/R/mediastore_operations.R | 8a4c14672e1e5b94979930e8c977ba5086264027 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | false | 34,318 | r | mediastore_operations.R | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include mediastore_service.R
NULL
#' Creates a storage container to hold objects
#'
#' @description
#' Creates a storage container to hold objects. A container is similar to a
#' bucket in the Amazon S3 service.
#'
#' @usage
#' mediastore_create_container(ContainerName, Tags)
#'
#' @param ContainerName [required] The name for the container. The name must be from 1 to 255 characters.
#' Container names must be unique to your AWS account within a specific
#' region. As an example, you could create a container named `movies` in
#' every region, as long as you don’t have an existing container with that
#' name.
#' @param Tags An array of key:value pairs that you define. These values can be
#' anything that you want. Typically, the tag key represents a category
#' (such as "environment") and the tag value represents a specific value
#' within that category (such as "test," "development," or "production").
#' You can add up to 50 tags to each container. For more information about
#' tagging, including naming and usage conventions, see [Tagging Resources
#' in
#' MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/tagging.html).
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Container = list(
#' Endpoint = "string",
#' CreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ARN = "string",
#' Name = "string",
#' Status = "ACTIVE"|"CREATING"|"DELETING",
#' AccessLoggingEnabled = TRUE|FALSE
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$create_container(
#' ContainerName = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_create_container
mediastore_create_container <- function(ContainerName, Tags = NULL) {
op <- new_operation(
name = "CreateContainer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$create_container_input(ContainerName = ContainerName, Tags = Tags)
output <- .mediastore$create_container_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$create_container <- mediastore_create_container
#' Deletes the specified container
#'
#' @description
#' Deletes the specified container. Before you make a
#' [`delete_container`][mediastore_delete_container] request, delete any
#' objects in the container or in any folders in the container. You can
#' delete only empty containers.
#'
#' @usage
#' mediastore_delete_container(ContainerName)
#'
#' @param ContainerName [required] The name of the container to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_container(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_delete_container
mediastore_delete_container <- function(ContainerName) {
op <- new_operation(
name = "DeleteContainer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$delete_container_input(ContainerName = ContainerName)
output <- .mediastore$delete_container_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$delete_container <- mediastore_delete_container
#' Deletes the access policy that is associated with the specified
#' container
#'
#' @description
#' Deletes the access policy that is associated with the specified
#' container.
#'
#' @usage
#' mediastore_delete_container_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container that holds the policy.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_container_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_delete_container_policy
mediastore_delete_container_policy <- function(ContainerName) {
op <- new_operation(
name = "DeleteContainerPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$delete_container_policy_input(ContainerName = ContainerName)
output <- .mediastore$delete_container_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$delete_container_policy <- mediastore_delete_container_policy
#' Deletes the cross-origin resource sharing (CORS) configuration
#' information that is set for the container
#'
#' @description
#' Deletes the cross-origin resource sharing (CORS) configuration
#' information that is set for the container.
#'
#' To use this operation, you must have permission to perform the
#' `MediaStore:DeleteCorsPolicy` action. The container owner has this
#' permission by default and can grant this permission to others.
#'
#' @usage
#' mediastore_delete_cors_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container to remove the policy from.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_cors_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_delete_cors_policy
mediastore_delete_cors_policy <- function(ContainerName) {
op <- new_operation(
name = "DeleteCorsPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$delete_cors_policy_input(ContainerName = ContainerName)
output <- .mediastore$delete_cors_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$delete_cors_policy <- mediastore_delete_cors_policy
#' Removes an object lifecycle policy from a container
#'
#' @description
#' Removes an object lifecycle policy from a container. It takes up to 20
#' minutes for the change to take effect.
#'
#' @usage
#' mediastore_delete_lifecycle_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container that holds the object lifecycle policy.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_lifecycle_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_delete_lifecycle_policy
mediastore_delete_lifecycle_policy <- function(ContainerName) {
op <- new_operation(
name = "DeleteLifecyclePolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$delete_lifecycle_policy_input(ContainerName = ContainerName)
output <- .mediastore$delete_lifecycle_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$delete_lifecycle_policy <- mediastore_delete_lifecycle_policy
#' Deletes the metric policy that is associated with the specified
#' container
#'
#' @description
#' Deletes the metric policy that is associated with the specified
#' container. If there is no metric policy associated with the container,
#' MediaStore doesn't send metrics to CloudWatch.
#'
#' @usage
#' mediastore_delete_metric_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container that is associated with the metric policy that
#' you want to delete.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$delete_metric_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_delete_metric_policy
mediastore_delete_metric_policy <- function(ContainerName) {
op <- new_operation(
name = "DeleteMetricPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$delete_metric_policy_input(ContainerName = ContainerName)
output <- .mediastore$delete_metric_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$delete_metric_policy <- mediastore_delete_metric_policy
#' Retrieves the properties of the requested container
#'
#' @description
#' Retrieves the properties of the requested container. This request is
#' commonly used to retrieve the endpoint of a container. An endpoint is a
#' value assigned by the service when a new container is created. A
#' container's endpoint does not change after it has been assigned. The
#' [`describe_container`][mediastore_describe_container] request returns a
#' single `Container` object based on `ContainerName`. To return all
#' `Container` objects that are associated with a specified AWS account,
#' use [`list_containers`][mediastore_list_containers].
#'
#' @usage
#' mediastore_describe_container(ContainerName)
#'
#' @param ContainerName The name of the container to query.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Container = list(
#' Endpoint = "string",
#' CreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ARN = "string",
#' Name = "string",
#' Status = "ACTIVE"|"CREATING"|"DELETING",
#' AccessLoggingEnabled = TRUE|FALSE
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$describe_container(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_describe_container
mediastore_describe_container <- function(ContainerName = NULL) {
op <- new_operation(
name = "DescribeContainer",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$describe_container_input(ContainerName = ContainerName)
output <- .mediastore$describe_container_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$describe_container <- mediastore_describe_container
#' Retrieves the access policy for the specified container
#'
#' @description
#' Retrieves the access policy for the specified container. For information
#' about the data that is included in an access policy, see the [AWS
#' Identity and Access Management User
#' Guide](https://docs.aws.amazon.com/iam/index.html).
#'
#' @usage
#' mediastore_get_container_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Policy = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_container_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_get_container_policy
mediastore_get_container_policy <- function(ContainerName) {
op <- new_operation(
name = "GetContainerPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$get_container_policy_input(ContainerName = ContainerName)
output <- .mediastore$get_container_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$get_container_policy <- mediastore_get_container_policy
#' Returns the cross-origin resource sharing (CORS) configuration
#' information that is set for the container
#'
#' @description
#' Returns the cross-origin resource sharing (CORS) configuration
#' information that is set for the container.
#'
#' To use this operation, you must have permission to perform the
#' `MediaStore:GetCorsPolicy` action. By default, the container owner has
#' this permission and can grant it to others.
#'
#' @usage
#' mediastore_get_cors_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container that the policy is assigned to.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' CorsPolicy = list(
#' list(
#' AllowedOrigins = list(
#' "string"
#' ),
#' AllowedMethods = list(
#' "PUT"|"GET"|"DELETE"|"HEAD"
#' ),
#' AllowedHeaders = list(
#' "string"
#' ),
#' MaxAgeSeconds = 123,
#' ExposeHeaders = list(
#' "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_cors_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_get_cors_policy
mediastore_get_cors_policy <- function(ContainerName) {
op <- new_operation(
name = "GetCorsPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$get_cors_policy_input(ContainerName = ContainerName)
output <- .mediastore$get_cors_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$get_cors_policy <- mediastore_get_cors_policy
#' Retrieves the object lifecycle policy that is assigned to a container
#'
#' @description
#' Retrieves the object lifecycle policy that is assigned to a container.
#'
#' @usage
#' mediastore_get_lifecycle_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container that the object lifecycle policy is assigned
#' to.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' LifecyclePolicy = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_lifecycle_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_get_lifecycle_policy
mediastore_get_lifecycle_policy <- function(ContainerName) {
op <- new_operation(
name = "GetLifecyclePolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$get_lifecycle_policy_input(ContainerName = ContainerName)
output <- .mediastore$get_lifecycle_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$get_lifecycle_policy <- mediastore_get_lifecycle_policy
#' Returns the metric policy for the specified container
#'
#' @description
#' Returns the metric policy for the specified container.
#'
#' @usage
#' mediastore_get_metric_policy(ContainerName)
#'
#' @param ContainerName [required] The name of the container that is associated with the metric policy.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' MetricPolicy = list(
#' ContainerLevelMetrics = "ENABLED"|"DISABLED",
#' MetricPolicyRules = list(
#' list(
#' ObjectGroup = "string",
#' ObjectGroupName = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_metric_policy(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_get_metric_policy
mediastore_get_metric_policy <- function(ContainerName) {
op <- new_operation(
name = "GetMetricPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$get_metric_policy_input(ContainerName = ContainerName)
output <- .mediastore$get_metric_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$get_metric_policy <- mediastore_get_metric_policy
#' Lists the properties of all containers in AWS Elemental MediaStore
#'
#' @description
#' Lists the properties of all containers in AWS Elemental MediaStore.
#'
#' You can query to receive all the containers in one response. Or you can
#' include the `MaxResults` parameter to receive a limited number of
#' containers in each response. In this case, the response includes a
#' token. To get the next set of containers, send the command again, this
#' time with the `NextToken` parameter (with the returned token as its
#' value). The next set of responses appears, with a token if there are
#' still more containers to receive.
#'
#' See also [`describe_container`][mediastore_describe_container], which
#' gets the properties of one container.
#'
#' @usage
#' mediastore_list_containers(NextToken, MaxResults)
#'
#' @param NextToken Only if you used `MaxResults` in the first command, enter the token
#' (which was included in the previous response) to obtain the next set of
#' containers. This token is included in a response only if there actually
#' are more containers to list.
#' @param MaxResults Enter the maximum number of containers in the response. Use from 1 to
#' 255 characters.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Containers = list(
#' list(
#' Endpoint = "string",
#' CreationTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' ARN = "string",
#' Name = "string",
#' Status = "ACTIVE"|"CREATING"|"DELETING",
#' AccessLoggingEnabled = TRUE|FALSE
#' )
#' ),
#' NextToken = "string"
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_containers(
#' NextToken = "string",
#' MaxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_list_containers
mediastore_list_containers <- function(NextToken = NULL, MaxResults = NULL) {
op <- new_operation(
name = "ListContainers",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$list_containers_input(NextToken = NextToken, MaxResults = MaxResults)
output <- .mediastore$list_containers_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$list_containers <- mediastore_list_containers
#' Returns a list of the tags assigned to the specified container
#'
#' @description
#' Returns a list of the tags assigned to the specified container.
#'
#' @usage
#' mediastore_list_tags_for_resource(Resource)
#'
#' @param Resource [required] The Amazon Resource Name (ARN) for the container.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' Resource = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_list_tags_for_resource
mediastore_list_tags_for_resource <- function(Resource) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$list_tags_for_resource_input(Resource = Resource)
output <- .mediastore$list_tags_for_resource_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$list_tags_for_resource <- mediastore_list_tags_for_resource
#' Creates an access policy for the specified container to restrict the
#' users and clients that can access it
#'
#' @description
#' Creates an access policy for the specified container to restrict the
#' users and clients that can access it. For information about the data
#' that is included in an access policy, see the [AWS Identity and Access
#' Management User Guide](https://docs.aws.amazon.com/iam/index.html).
#'
#' For this release of the REST API, you can create only one policy for a
#' container. If you enter
#' [`put_container_policy`][mediastore_put_container_policy] twice, the
#' second command modifies the existing policy.
#'
#' @usage
#' mediastore_put_container_policy(ContainerName, Policy)
#'
#' @param ContainerName [required] The name of the container.
#' @param Policy [required] The contents of the policy, which includes the following:
#'
#' - One `Version` tag
#'
#' - One `Statement` tag that contains the standard tags for the policy.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$put_container_policy(
#' ContainerName = "string",
#' Policy = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_put_container_policy
mediastore_put_container_policy <- function(ContainerName, Policy) {
op <- new_operation(
name = "PutContainerPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$put_container_policy_input(ContainerName = ContainerName, Policy = Policy)
output <- .mediastore$put_container_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$put_container_policy <- mediastore_put_container_policy
#' Sets the cross-origin resource sharing (CORS) configuration on a
#' container so that the container can service cross-origin requests
#'
#' @description
#' Sets the cross-origin resource sharing (CORS) configuration on a
#' container so that the container can service cross-origin requests. For
#' example, you might want to enable a request whose origin is
#' http://www.example.com to access your AWS Elemental MediaStore container
#' at my.example.container.com by using the browser's XMLHttpRequest
#' capability.
#'
#' To enable CORS on a container, you attach a CORS policy to the
#' container. In the CORS policy, you configure rules that identify origins
#' and the HTTP methods that can be executed on your container. The policy
#' can contain up to 398,000 characters. You can add up to 100 rules to a
#' CORS policy. If more than one rule applies, the service uses the first
#' applicable rule listed.
#'
#' To learn more about CORS, see [Cross-Origin Resource Sharing (CORS) in
#' AWS Elemental
#' MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/cors-policy.html).
#'
#' @usage
#' mediastore_put_cors_policy(ContainerName, CorsPolicy)
#'
#' @param ContainerName [required] The name of the container that you want to assign the CORS policy to.
#' @param CorsPolicy [required] The CORS policy to apply to the container.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$put_cors_policy(
#' ContainerName = "string",
#' CorsPolicy = list(
#' list(
#' AllowedOrigins = list(
#' "string"
#' ),
#' AllowedMethods = list(
#' "PUT"|"GET"|"DELETE"|"HEAD"
#' ),
#' AllowedHeaders = list(
#' "string"
#' ),
#' MaxAgeSeconds = 123,
#' ExposeHeaders = list(
#' "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_put_cors_policy
mediastore_put_cors_policy <- function(ContainerName, CorsPolicy) {
op <- new_operation(
name = "PutCorsPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$put_cors_policy_input(ContainerName = ContainerName, CorsPolicy = CorsPolicy)
output <- .mediastore$put_cors_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$put_cors_policy <- mediastore_put_cors_policy
#' Writes an object lifecycle policy to a container
#'
#' @description
#' Writes an object lifecycle policy to a container. If the container
#' already has an object lifecycle policy, the service replaces the
#' existing policy with the new policy. It takes up to 20 minutes for the
#' change to take effect.
#'
#' For information about how to construct an object lifecycle policy, see
#' [Components of an Object Lifecycle
#' Policy](https://docs.aws.amazon.com/mediastore/latest/ug/policies-object-lifecycle-components.html).
#'
#' @usage
#' mediastore_put_lifecycle_policy(ContainerName, LifecyclePolicy)
#'
#' @param ContainerName [required] The name of the container that you want to assign the object lifecycle
#' policy to.
#' @param LifecyclePolicy [required] The object lifecycle policy to apply to the container.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$put_lifecycle_policy(
#' ContainerName = "string",
#' LifecyclePolicy = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_put_lifecycle_policy
mediastore_put_lifecycle_policy <- function(ContainerName, LifecyclePolicy) {
op <- new_operation(
name = "PutLifecyclePolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$put_lifecycle_policy_input(ContainerName = ContainerName, LifecyclePolicy = LifecyclePolicy)
output <- .mediastore$put_lifecycle_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$put_lifecycle_policy <- mediastore_put_lifecycle_policy
#' The metric policy that you want to add to the container
#'
#' @description
#' The metric policy that you want to add to the container. A metric policy
#' allows AWS Elemental MediaStore to send metrics to Amazon CloudWatch. It
#' takes up to 20 minutes for the new policy to take effect.
#'
#' @usage
#' mediastore_put_metric_policy(ContainerName, MetricPolicy)
#'
#' @param ContainerName [required] The name of the container that you want to add the metric policy to.
#' @param MetricPolicy [required] The metric policy that you want to associate with the container. In the
#' policy, you must indicate whether you want MediaStore to send
#' container-level metrics. You can also include up to five rules to define
#' groups of objects that you want MediaStore to send object-level metrics
#' for. If you include rules in the policy, construct each rule with both
#' of the following:
#'
#' - An object group that defines which objects to include in the group.
#' The definition can be a path or a file name, but it can't have more
#' than 900 characters. Valid characters are: a-z, A-Z, 0-9, _
#' (underscore), = (equal), : (colon), . (period), - (hyphen), ~
#' (tilde), / (forward slash), and * (asterisk). Wildcards (*) are
#' acceptable.
#'
#' - An object group name that allows you to refer to the object group.
#' The name can't have more than 30 characters. Valid characters are:
#' a-z, A-Z, 0-9, and _ (underscore).
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$put_metric_policy(
#' ContainerName = "string",
#' MetricPolicy = list(
#' ContainerLevelMetrics = "ENABLED"|"DISABLED",
#' MetricPolicyRules = list(
#' list(
#' ObjectGroup = "string",
#' ObjectGroupName = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_put_metric_policy
mediastore_put_metric_policy <- function(ContainerName, MetricPolicy) {
op <- new_operation(
name = "PutMetricPolicy",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$put_metric_policy_input(ContainerName = ContainerName, MetricPolicy = MetricPolicy)
output <- .mediastore$put_metric_policy_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$put_metric_policy <- mediastore_put_metric_policy
#' Starts access logging on the specified container
#'
#' @description
#' Starts access logging on the specified container. When you enable access
#' logging on a container, MediaStore delivers access logs for objects
#' stored in that container to Amazon CloudWatch Logs.
#'
#' @usage
#' mediastore_start_access_logging(ContainerName)
#'
#' @param ContainerName [required] The name of the container that you want to start access logging on.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$start_access_logging(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_start_access_logging
mediastore_start_access_logging <- function(ContainerName) {
op <- new_operation(
name = "StartAccessLogging",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$start_access_logging_input(ContainerName = ContainerName)
output <- .mediastore$start_access_logging_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$start_access_logging <- mediastore_start_access_logging
#' Stops access logging on the specified container
#'
#' @description
#' Stops access logging on the specified container. When you stop access
#' logging on a container, MediaStore stops sending access logs to Amazon
#' CloudWatch Logs. These access logs are not saved and are not
#' retrievable.
#'
#' @usage
#' mediastore_stop_access_logging(ContainerName)
#'
#' @param ContainerName [required] The name of the container that you want to stop access logging on.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$stop_access_logging(
#' ContainerName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_stop_access_logging
mediastore_stop_access_logging <- function(ContainerName) {
op <- new_operation(
name = "StopAccessLogging",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$stop_access_logging_input(ContainerName = ContainerName)
output <- .mediastore$stop_access_logging_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$stop_access_logging <- mediastore_stop_access_logging
#' Adds tags to the specified AWS Elemental MediaStore container
#'
#' @description
#' Adds tags to the specified AWS Elemental MediaStore container. Tags are
#' key:value pairs that you can associate with AWS resources. For example,
#' the tag key might be "customer" and the tag value might be "companyA."
#' You can specify one or more tags to add to each container. You can add
#' up to 50 tags to each container. For more information about tagging,
#' including naming and usage conventions, see [Tagging Resources in
#' MediaStore](https://docs.aws.amazon.com/mediastore/latest/ug/tagging.html).
#'
#' @usage
#' mediastore_tag_resource(Resource, Tags)
#'
#' @param Resource [required] The Amazon Resource Name (ARN) for the container.
#' @param Tags [required] An array of key:value pairs that you want to add to the container. You
#' need to specify only the tags that you want to add or update. For
#' example, suppose a container already has two tags (customer:CompanyA and
#' priority:High). You want to change the priority tag and also add a third
#' tag (type:Contract). For TagResource, you specify the following tags:
#' priority:Medium, type:Contract. The result is that your container has
#' three tags: customer:CompanyA, priority:Medium, and type:Contract.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' Resource = "string",
#' Tags = list(
#' list(
#' Key = "string",
#' Value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_tag_resource
mediastore_tag_resource <- function(Resource, Tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$tag_resource_input(Resource = Resource, Tags = Tags)
output <- .mediastore$tag_resource_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$tag_resource <- mediastore_tag_resource
#' Removes tags from the specified container
#'
#' @description
#' Removes tags from the specified container. You can specify one or more
#' tags to remove.
#'
#' @usage
#' mediastore_untag_resource(Resource, TagKeys)
#'
#' @param Resource [required] The Amazon Resource Name (ARN) for the container.
#' @param TagKeys [required] A comma-separated list of keys for tags that you want to remove from the
#' container. For example, if your container has two tags
#' (customer:CompanyA and priority:High) and you want to remove one of the
#' tags (priority:High), you specify the key for the tag that you want to
#' remove (priority).
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' Resource = "string",
#' TagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname mediastore_untag_resource
mediastore_untag_resource <- function(Resource, TagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "POST",
http_path = "/",
paginator = list()
)
input <- .mediastore$untag_resource_input(Resource = Resource, TagKeys = TagKeys)
output <- .mediastore$untag_resource_output()
config <- get_config()
svc <- .mediastore$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.mediastore$operations$untag_resource <- mediastore_untag_resource
|
a5866dcd659d8bad7ccd2818286b5417ef3810d8 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.compute/man/ec2_create_transit_gateway_route_table.Rd | bd0b662c6f4d1e4ea68f933cc2b0ff3391ba26b7 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,010 | rd | ec2_create_transit_gateway_route_table.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_create_transit_gateway_route_table}
\alias{ec2_create_transit_gateway_route_table}
\title{Creates a route table for the specified transit gateway}
\usage{
ec2_create_transit_gateway_route_table(
TransitGatewayId,
TagSpecifications = NULL,
DryRun = NULL
)
}
\arguments{
\item{TransitGatewayId}{[required] The ID of the transit gateway.}
\item{TagSpecifications}{The tags to apply to the transit gateway route table.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Creates a route table for the specified transit gateway.
See \url{https://www.paws-r-sdk.com/docs/ec2_create_transit_gateway_route_table/} for full documentation.
}
\keyword{internal}
|
3c581012ca0de72eb35f44a5888702653ec6dfcb | 215702ce00728f74e3a91e605ccf3f0b4500e4bd | /R/totaltest.R | 949678237c220666ed060becbf8bb3e4973cc368 | [] | no_license | zhangyuwinnie/RPEXE.RPEXT | 25ed9b824fb8303a317004a85ddbe4e31bde9909 | 20f6bdd83584861aca2bc8fa9ad9cda8a8247567 | refs/heads/master | 2021-04-28T23:53:47.342865 | 2017-05-04T19:31:42 | 2017-05-04T19:31:42 | 77,723,694 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,037 | r | totaltest.R | #' @title total time on test
#'
#' @description Function 'totaltest' computes total-time-on-test.
#'
#' @param time event/censoring times
#' @param censor censoring status
#'
#' @usage totaltest(time,censor)
#' @return
#' time_die time points where events occur (in ascending order)
#' ttot total time on test corresponding to each time point in "time_die"
#' deaths number of death corresponding to each time point in "time_die"
#' @export
#'
#' @examples
#' t1 <- c(2,3,4,5.5,7,10,12,15)
#' c1 <- c(0,0,1,0,0,1,0,0)
#' totaltest(t1,c1)
totaltest <- function(time,censor)
{
#save the data in the orginal structure
sew=rep(0, length(time))
for(i in 1:length(time))
sew[i]=i
tmpdata=cbind(sew,censor,time)
# sort the tmp data
tmp2 = tmpdata[order(time),]
tmp2 = cbind(tmp2,rep(0,dim(tmp2)[1]))
#Compute alpha's for the sequence
for (i in 1:dim(tmp2)[1])
{
if (tmp2[i,2]==0)
tmp2[i,4]=0
else
tmp2[i,4]=1
}
#Deal with alpha > 1
for (i in 1:(dim(tmp2)[1]-1))
if (tmp2[dim(tmp2)[1]+1-i,3]== tmp2[dim(tmp2)[1]-i,3])
{
tmp2[dim(tmp2)[1]-i,4]= tmp2[dim(tmp2)[1]-i,4] + tmp2[dim(tmp2)[1]+1-i,4]
tmp2[dim(tmp2)[1]+1-i,4]=0
}
# Delete the repeats
k=as.null()
for (i in 1:dim(tmp2)[1])
if (tmp2[i,2] == 1&tmp2[i,4]==0)
k[length(k)+1]=i
tmp3 = tmp2
if (length(k)!=0)
tmp3=tmp3[-k,]
tmp3 = cbind(tmp3,rep(0,dim(tmp3)[1]))
#Compute the number of patients in the study
for(i in 1:dim(tmp3)[1])
{
if (tmp3[i,4]==0)
tmp3[i,5]= 1
else
tmp3[i,5]= tmp3[i,4]
}
tmp3 = cbind(tmp3,rep(0,dim(tmp3)[1]))
for(i in 1:dim(tmp3)[1]-1)
{
tmp3[i,6]= sum(tmp3[,5])-sum(tmp3[1:i,5])
tmp3[dim(tmp3)[1],6]= 0
}
#Compute the survival time of this cohort
tmp3 = cbind(tmp3,rep(0,dim(tmp3)[1]))
for(i in 1:dim(tmp3)[1])
{
if (i==1)
tmp3[i,7]= sum(tmp3[,5])*tmp3[i,3]
else
###survival time == patient number * time difference
tmp3[i,7] = tmp3[i-1,6]* (tmp3[i,3]-tmp3[i-1,3])
}
tmp3 = cbind(tmp3,rep(0,dim(tmp3)[1]))
tmp3[,8] = tmp3[,7]
for (i in 1:dim(tmp3)[1])
if (tmp3[i,2]==0)
{
if (t(tmp3[i:dim(tmp3)[1],2])%*%tmp3[i:dim(tmp3)[1],2]>0)
tmp3[i+1,8] = tmp3[i,8]+tmp3[i+1,8]
if (t(tmp3[i:dim(tmp3)[1],2])%*%tmp3[i:dim(tmp3)[1],2]==0 && tmp3[i-1,2]!=0)
{
### put all the credit to the last noncensered data
k = length(tmp3[i:dim(tmp3)[1],2])
for (j in 1:k)
tmp3[i-1,8] = tmp3[i-1,8]+tmp3[i-1+j,8]
}
}
#Build the survival reaction
tmp3 = cbind(tmp3,rep(0,dim(tmp3)[1]))
tmp3[,9] = tmp3[,6]
for (i in 2:length(tmp3[,9]))
tmp3[i,9]= tmp3[i-1,9]-tmp3[i,4]
#plot (tmp3[,3],tmp3[,9])
###delete all the censered items
k=as.null()
for (i in 1:length(tmp3[,1]))
if (tmp3[i,2]== 0)
k[length(k)+1]=i
tmp4 = tmp3
if (length(k)!=0)
tmp4=tmp4[-k,]
time_die=tmp4[,3]
ttot= tmp4[,8]
deaths= tmp4[,5]
returnv=cbind(time_die,ttot,deaths)
return(returnv)
}
|
deb6614b9c550a8beafce927ba2c8c0c6ebeb930 | 46384d319e28049608000800c8ce514ce1bf7316 | /man/matches_regex.Rd | dfeccdd8a37d7680c1199919ccfc550f01c75791 | [] | no_license | cran/assertive.strings | 060086017cd7163bd3040185c8498debf091c998 | 4f21ca1cb6189e3140731acfa6b320a66fb4df2a | refs/heads/master | 2021-01-21T14:08:10.443325 | 2016-05-10T10:14:32 | 2016-05-10T10:14:32 | 48,076,811 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 787 | rd | matches_regex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/internal-string.R
\name{matches_regex}
\alias{matches_regex}
\title{Does the input match the regular expression?}
\usage{
matches_regex(x, rx, ignore.case = TRUE, ...)
}
\arguments{
\item{x}{Input to check.}
\item{rx}{A regular expression.}
\item{ignore.case}{Should the case of alphabetic characters be ignored?}
\item{...}{Passed to \code{\link{grepl}}.}
}
\value{
A logical vector that is \code{TRUE} when the input matches the
regular expression.
}
\description{
Checks that the input matches the regular expression.
}
\note{
The default for \code{ignore.case} is different to the default in
\code{grepl}.
}
\seealso{
\code{\link{regex}} and \code{\link{regexpr}}.
}
|
2db8c462b1cdf0e963916680bbb768ba39587071 | 9ec4bb9170a6566ec0e19e3e05963187d5c0dbc3 | /man/summary.RM.Rd | 394917a0b786e23fd9ced64ef7b22c3fa0eadd30 | [] | no_license | smn74/MANOVA.RM | 4cd94f0d0e74863c733be01a2f06740faaad63e9 | 273d2f4bddc2aab5abbc99b89ffbec9e75b3da67 | refs/heads/master | 2023-02-25T12:46:41.101640 | 2023-02-08T11:47:59 | 2023-02-08T11:47:59 | 104,210,872 | 9 | 7 | null | null | null | null | UTF-8 | R | false | true | 489 | rd | summary.RM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{summary.RM}
\alias{summary.RM}
\title{Summarizing an RM object}
\usage{
\method{summary}{RM}(object, ...)
}
\arguments{
\item{object}{An RM object}
\item{...}{Additional parameters (currently not used)}
}
\description{
Returns a summary of the results including mean values, variances
and sample sizes for all groups as well
as test statistics with degrees of freedom and p-values
}
|
ea5483330385ee36381e30d952662eefce7a9f86 | 076d0987347891cb4e0b5c9865e489da8b323909 | /R/div.profile.r | 1c2bde22e8e0d86eeaec24349f8b1f174b28711e | [] | no_license | cylove1112/hilldiv | c3e9ebc819eb678c85c164e81321419840702882 | de70e217007f656a265e02c7fdd0e3a3617d5f6c | refs/heads/master | 2020-04-13T23:17:37.727022 | 2018-12-28T12:41:08 | 2018-12-28T12:41:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,085 | r | div.profile.r | div.profile <- function(abund,tree,order,values,hierarchy,level,log){
#Quality-check and warnings
if(missing(abund)) stop("The abundance data is missing")
if(missing(order)) {order= seq(from = 0, to = 5, by = (0.1))}
if(missing(values)) {values= "FALSE"}
if(missing(level)) {level= "gamma"}
if(missing(log)) {log= "FALSE"}
#If input data is a vector
if(is.null(dim(abund)) == TRUE){
profile <- c()
for (o in order){
if(missing(tree)){
div.value <- hilldiv::hill.div(abund,o)
}else{
div.value <- hilldiv::hill.div(abund,o,tree)
}
profile <- c(profile,div.value)
}
profile.melted <- as.data.frame(cbind(order,profile))
plot <- ggplot(profile.melted , aes(x = order, y = profile)) +
geom_line() +
xlab("Order of diversity") + ylab("Effective number of OTUs") +
theme_minimal()
print(plot)
if(values == "TRUE"){
return(profile)
}
}
#If input data is an OTU table
if(is.null(dim(abund)) == FALSE){
if(dim(abund)[1] < 2) stop("The OTU table only less than 2 OTUs")
if(dim(abund)[2] < 2) stop("The OTU table contains less than 2 samples")
profile <- c()
if(missing(hierarchy)){
for (o in order){
if(missing(tree)){
div.values <- hilldiv::hill.div(abund,o)
}else{
div.values <- hilldiv::hill.div(abund,o,tree)
}
profile <- rbind(profile,div.values)
}
rownames(profile) <- order
profile.melted <- as.data.frame(melt(profile))
colnames(profile.melted) <- c("Order","Sample","Value")
profile.melted[,1] <- as.numeric(as.character(profile.melted[,1]))
profile.melted[,3] <- as.numeric(as.character(profile.melted[,3]))
if(log == "TRUE"){profile.melted[,3] <- log(profile.melted[,3])}
getPalette = colorRampPalette(brewer.pal(ncol(abund), "Paired"))
plot <- ggplot(profile.melted , aes(x = Order, y = Value, group=Sample, colour=Sample)) +
geom_line() +
xlab("Order of diversity") +
ylab(if(log == "TRUE"){"Effective number of OTUs (log-transformed)" }else{"Effective number of OTUs"}) +
scale_colour_manual(values = getPalette(ncol(abund))) +
theme_minimal()
print(plot)
}else{
colnames(hierarchy) <- c("Sample","Group")
groups <- sort(unique(hierarchy$Group))
for (g in groups){
samples <- as.character(hierarchy[which(hierarchy$Group == g),1])
abund.subset <- abund[,samples]
abund.subset <- as.data.frame(abund.subset[apply(abund.subset, 1, function(z) !all(z==0)),])
if(!missing(tree)){
missing.otus <- setdiff(tree$tip.label,rownames(abund.subset))
tree.subset <- drop.tip(tree,missing.otus)
}
for (o in order){
if(missing(tree)){
if(level == "gamma"){div.value <- hilldiv::gamma.div(abund.subset,o)}
if(level == "alpha"){div.value <- hilldiv::alpha.div(abund.subset,o)}
if(level == "incidence"){div.value <- hilldiv::hill.div(rowSums(abund.subset != 0)/sum(rowSums(abund.subset != 0)),o)}
}else{
if(level == "gamma"){div.value <- hilldiv::gamma.div(abund.subset,o,tree.subset)}
if(level == "alpha"){div.value <- hilldiv::alpha.div(abund.subset,o,tree.subset)}
if(level == "incidence"){div.value <- hilldiv::hill.div(rowSums(abund.subset != 0)/sum(rowSums(abund.subset != 0)),o,tree.subset)}
}
profile <- rbind(profile,cbind(g,div.value))
}
}
profile <- as.data.frame(cbind(profile,rep(order,length(groups))))
profile[,2] <- as.numeric(as.character(profile[,2]))
profile[,3] <- as.numeric(as.character(profile[,3]))
colnames(profile) <- c("Group","Value","Order")
if(log == "TRUE"){profile[,2] <- log(profile[,2])}
getPalette = colorRampPalette(brewer.pal(length(groups), "Paired"))
plot <- ggplot(profile , aes(x = Order, y = Value, group=Group, colour=Group)) +
geom_line() +
xlab("Order of diversity") +
ylab(if((log == "TRUE") & missing(tree)){"Effective number of OTUs (log-transformed)"}else if((log == "TRUE") & !missing(tree)){"Effective number of lineages (log-transformed)"}else if((log == "FALSE") & !missing(tree)){"Effective number of lineages"}else{"Effective number of OTUs"}) +
scale_colour_manual(values = getPalette(length(groups))) +
theme_minimal()
print(plot)
}
if(values == "TRUE"){
return(profile)
}
}
}
|
a4aa2d8a61a151fb53d228d82b5bfdde298378d9 | c036c7abbd0cb9a8ae7dae5a08d7b052637e28cd | /run_analysis.R | 7eef9d2c001e7fd28aba5cce90b7b0f884ee6614 | [] | no_license | matthewtoney/datasciencecoursera | 532bf6a040f6e114d0d2d9a50bba9f8a4743ec19 | 2466190ecd7142fe30c3c6511d12d8111b03663a | refs/heads/master | 2016-09-06T08:04:01.491898 | 2015-03-22T22:45:38 | 2015-03-22T22:45:38 | 29,829,485 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,144 | r | run_analysis.R | # 1. Merge the training and the test sets to create one data set.
#set working directory to the location where the UCI HAR Dataset was unzipped
setwd('C:/Users/Computer/Downloads/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset')
# Read in the training data
features = read.table('./features.txt',header=FALSE); #imports features.txt
activityType = read.table('./activity_labels.txt',header=FALSE); #imports activity_labels.txt
subjectTrain = read.table('./train/subject_train.txt',header=FALSE); #imports subject_train.txt
xTrain = read.table('./train/x_train.txt',header=FALSE); #imports x_train.txt
yTrain = read.table('./train/y_train.txt',header=FALSE); #imports y_train.txt
# Assign names to columns
colnames(activityType) = c('activityId','activityType');
colnames(subjectTrain) = "subjectId";
colnames(xTrain) = features[,2];
colnames(yTrain) = "activityId";
# Create full training data
FullTrain = cbind(yTrain,subjectTrain,xTrain);
# Read in the test data
subjectTest = read.table('./test/subject_test.txt',header=FALSE); #imports subject_test.txt
xTest = read.table('./test/x_test.txt',header=FALSE); #imports x_test.txt
yTest = read.table('./test/y_test.txt',header=FALSE); #imports y_test.txt
# Assign column names to the test data imported above
colnames(subjectTest) = "subjectId";
colnames(xTest) = features[,2];
colnames(yTest) = "activityId";
# Create full test data
FullTest = cbind(yTest,subjectTest,xTest);
# Combine full training and full test data
FullSet = rbind(FullTrain,FullTest);
# Create a vector for the column names from the FullSet, which will be used
# to select the desired mean() & stddev() columns
Header = colnames(FullSet);
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
# Create new object with TRUE values for the ID, mean() & stddev() columns and FALSE for others
NewObject = (grepl("activity..",Header) | grepl("subject..",Header) | grepl("-mean..",Header) & !grepl("-meanFreq..",Header) & !grepl("mean..-",Header) | grepl("-std..",Header) & !grepl("-std()..-",Header));
# Subset FullSet table based on the NewObject to keep only desired columns
FullSet = FullSet[NewObject==TRUE];
# 3. Use descriptive activity names to name the activities in the data set
# Merge the FullSet set with the acitivityType table to include descriptive activity names
FullSet = merge(FullSet,activityType,by='activityId',all.x=TRUE);
# Update Header so new columns show up
Header = colnames(FullSet);
# 4. Appropriately label the data set with descriptive activity names.
# Cleaning up the variable names
for (i in 1:length(Header))
{
Header[i] = gsub("\\()","",Header[i])
Header[i] = gsub("-std$","StdDev",Header[i])
Header[i] = gsub("-mean","Mean",Header[i])
Header[i] = gsub("^(t)","time",Header[i])
Header[i] = gsub("^(f)","freq",Header[i])
Header[i] = gsub("([Gg]ravity)","Gravity",Header[i])
Header[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",Header[i])
Header[i] = gsub("[Gg]yro","Gyro",Header[i])
Header[i] = gsub("AccMag","AccMagnitude",Header[i])
Header[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",Header[i])
Header[i] = gsub("JerkMag","JerkMagnitude",Header[i])
Header[i] = gsub("GyroMag","GyroMagnitude",Header[i])
};
# Reassign the new columns names to FullSet
Header=colnames(FullSet);
# 5. Create a second, independent tidy data set with the average of each variable for each activity and each subject.
# Create a new table, FullSetNoActivityType without the activityType column
FullSetNoActType = FullSet[,names(FullSet) != 'activityType'];
# Summarizing the FullSetNoActivityType table to include just the mean of each variable for each activity and each subject
FinalSet=aggregate(FullSetNoActType[,names(FullSetNoActType) != c('activityId','subjectId')],by=list(activityId=FullSetNoActType$activityId,subjectId = FullSetNoActType$subjectId),mean);
# Merging the tidyData with activityType to include descriptive acitvity names
FinalSet=merge(FinalSet,activityType,by='activityId',all.x=TRUE);
# Export the tidyData set
write.table(FinalSet, './FinalSet.txt',row.names=FALSE,sep='\t');
|
914feeead71ffb3556de940c2da8278c3bc61262 | 931fa3013fae888bd094eca978e78e2f3a0acec4 | /simulations/correlnb.R | 337be841944799382c148b06eddbb5695b853a28 | [] | no_license | LTLA/IndependentFilter2015 | 2f08ea27021668ae8184edccb6dce447f68c49a8 | e78afe3e112f19237ce24334ec84fa181821571f | refs/heads/master | 2021-05-16T10:17:49.257083 | 2019-01-01T10:15:21 | 2019-01-01T10:15:23 | 104,686,973 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,671 | r | correlnb.R | # This tests the behaviour of filtering on correlated tests with the NB mean filter.
ngenes <- 100000
design <- model.matrix(~gl(2, 3))
nlib <- nrow(design)
source("functions.R")
dir.create("results-misc", showWarning=FALSE)
# Testing the NB.
set.seed(634294)
mu <- 10
disp <- 0.1
filtrate <- 0.1
pdf("results-misc/correl.pdf", width=10, height=5)
par(mfrow=c(1,2))
for (nshared in 1:4) {
nleft <- 5 - nshared
pval.mean <- pval.sign <- list()
for (it in 1:10) {
# Using an additive approach to induce correlations between tests for the same samples.
shared <- matrix(rnbinom(ngenes*nlib, mu=mu*nshared, size=nshared/disp), nrow=ngenes)
unique1 <- matrix(rnbinom(ngenes*nlib, mu=mu*nleft, size=nleft/disp), nrow=ngenes)
unique2 <- matrix(rnbinom(ngenes*nlib, mu=mu*nleft, size=nleft/disp), nrow=ngenes)
insta1 <- shared + unique1
insta2 <- shared + unique2
y1 <- DGEList(insta1)
y2 <- DGEList(insta2)
# First filtering by mean.
criterion <- pmin(aveLogCPM(y1), aveLogCPM(y2))
keep.mean <- rank(-criterion) < ngenes*filtrate
y1m <- estimateDisp(y1[keep.mean,], design)
y2m <- estimateDisp(y2[keep.mean,], design)
fit1m <- glmQLFit(y1m, design, robust=TRUE)
fit2m <- glmQLFit(y2m, design, robust=TRUE)
result1m <- glmQLFTest(fit1m)
result2m <- glmQLFTest(fit2m)
pval.mean[[it]] <- c(result1m$table$PValue, result2m$table$PValue) # Symmetry, so it doesn't really matter.
# Now filtering by sign.
fit1 <- glmFit(y1, design, dispersion=0.05)
fit2 <- glmFit(y2, design, dispersion=0.05)
keep.sign <- (fit1$coefficients[,2] > 0)==(fit2$coefficients[,2] > 0)
keep.sign <- sample(which(keep.sign), filtrate*ngenes) # Picking a subset for speed.
y1s <- estimateDisp(y1[keep.sign,], design)
y2s <- estimateDisp(y2[keep.sign,], design)
fit1s <- glmQLFit(y1s, design, robust=TRUE)
fit2s <- glmQLFit(y2s, design, robust=TRUE)
result1s <- glmQLFTest(fit1s)
result2s <- glmQLFTest(fit2s)
pval.sign[[it]] <- c(result1s$table$PValue, result2s$table$PValue)
}
plotAlpha(pval.mean, main=paste0(nshared, " shared blocks"))
legend("topright", bty="n", legend="Mean-filtered")
plotAlpha(pval.sign, main="", col="red")
legend("topright", bty="n", legend="Sign-filtered")
}
dev.off()
# The sign-based filter fails with correlations.
# In this case, it's not so bad, but when the filter becomes more stringent (e.g., more than just two tests), you could imagine it would involve more problems.
|
7e8d2f8799252d583f9ceea8f8e356c75528f2f4 | f90115826c0234fbdfe7f580e6654925aa98e6b2 | /R-source/PdfRunApp.R | 2a4e9fe5d907c9c2c9beb23fa865faaae32cf078 | [] | no_license | mizma2k/FX-Git | 9272485edc4f7dfb4f12d3a4fbcaa74d9e2896d9 | b9629452419f30c4cade872928c2dcf119096b64 | refs/heads/master | 2022-12-30T23:11:03.398272 | 2020-09-09T03:40:11 | 2020-09-09T03:40:11 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | R | false | false | 1,907 | r | PdfRunApp.R | #|------------------------------------------------------------------------------------------|
#| PdfRunApp.R |
#| Copyright © 2012, Dennis Lee |
#| Assert Background |
#| This script is called from a Unix shell as a background process. For example: |
#| $ R -e "source('/home/rstudio/100 FxOption/103 FxOptionVerBack/080 Fx Git/R-source |
#| /PdfRunApp.R')" & |
#| |
#| Assert History |
#| 1.0.0 This script contains ONLY ONE (1) function PdfRunApp() and it requires the |
#| library "PlusPdf" 1.1.2. |
#|------------------------------------------------------------------------------------------|
source("~/100 FxOption/103 FxOptionVerBack/080 Fx Git/R-source/PlusPdf.R")
PdfRunApp <- function(silent=FALSE)
{
while( TRUE )
{
toChr <- c("dennislwm@yahoo.com.au")
fileStr <- paste0( RegGetRNonSourceDir(), "Job_02_pdf.rda" )
predStr <- paste0( RegGetRNonSourceDir(), "Job_02_mdl.rda" )
retNum <- PdfNomuraSeqNum(5, toChr=toChr, waitNum=20, silent=silent,
fileStr=fileStr, predStr=predStr)
while( retNum >= 5 )
{
retNum <- PdfNomuraSeqNum(5, toChr=toChr, waitNum=20, silent=silent,
fileStr=fileStr, predStr=predStr)
}
if(!silent)
{
print(Sys.time())
print("Sleeping for 4 hours...")
}
hourNum <- 60*60
Sys.sleep(4*hourNum)
}
}
PdfRunApp() |
58ecda5a5dbaab162f77dae625d1978da6d66cbc | 2260919a179172c731c9c0e503b324c97a01a5a7 | /Visualizations/Poster_Plot.R | f262a7a9a4b9ce1da1c65f4e6019fba9ffa5100a | [
"MIT"
] | permissive | victorfeagins/NDVIModeling | f1d34c2b2a0a0ae9ad8d73bbd48dddad02c71ad3 | 25cfece367cb257377ab3580f60b3e65944747b7 | refs/heads/main | 2023-08-22T06:01:21.174234 | 2021-10-28T14:20:56 | 2021-10-28T14:20:56 | 377,971,835 | 0 | 1 | null | 2021-06-28T20:03:55 | 2021-06-17T22:10:33 | R | UTF-8 | R | false | false | 656 | r | Poster_Plot.R |
library(ggplot2)
library(stringr)
library(dplyr)
inputdirectory <- "/projectnb/dietzelab/GOES_DataFTP/InputFilesNDVIModel/2021/"
#sitecodebook <- "/projectnb/dietzelab/vfeagins/Programming/NVDI_Modeling/GOESdownloadSites.csv"
inputfiles <- list.files(inputdirectory)
# 3 is good
# 10
inputfile = inputfiles[[10]]
df <- file.path(inputdirectory, inputfile ) %>%
read.csv()
inputfile %>%
str_extract("_[\\d]+_[\\d]+_") %>%
as.Date("_%Y_%j_")
ggplot(df) +
geom_point(mapping = aes(x,y)) +
ggthemes::theme_base() +
labs(title = "Appalachian State University Raw NDVI Time Series 2020/04/27",
y = "NDVI",
x = "Time (Hour)")
|
85e41ae0b114d0d2fb239718213d38ecb4e0bf3f | 0e27f48be18b30d93bea36ce1182e38e1973c074 | /practicas/AB Testing Practica/Data_AB_Testing.R | 181a8b842f931d5b063a677bb0aefe3962472003 | [] | no_license | joagonzalez/ditella-ventas-marketing | d6074d12d7ee603b677cdb137103cf2f086cc431 | 6df45126d9568e4a85aaec87dfd99fcea4c7d836 | refs/heads/master | 2023-08-14T10:01:46.019752 | 2021-09-27T00:50:54 | 2021-09-27T00:50:54 | 410,687,974 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,037 | r | Data_AB_Testing.R | # load the library
#1. Prepare the dataset and load the tidyverse library which contains the relevant packages used for the analysis.
library(tidyverse)
# set up your own directory
setwd('/Users/seguram/Documents/AB Test')
# Using read.csv base import function
ABTest <- read.csv("Website Results.csv",
header = TRUE)
# save in your own directory
save(ABTest, file = "~rda\\ABTest.rda")
#Let’s filter conversions for variants A & B and compute their corresponding conversion rates
# Let's filter out conversions for variant_A
conversion_subset_A <- ABTest %>%
filter(variant == "A" & converted == "TRUE")
# Total Number of Conversions for variant_A
conversions_A <- nrow(conversion_subset_A)
# Number of Visitors for variant_A
visitors_A <- nrow(ABTest %>%
filter(variant == "A"))
# Conversion_rate_A
conv_rate_A <- (conversions_A/visitors_A)
print(conv_rate_A) # 0.02773925
# Let's take a subset of conversions for variant_B
conversion_subset_B <- ABTest %>%
filter(variant == "B" & converted == "TRUE")
# Number of Conversions for variant_B
conversions_B <- nrow(conversion_subset_B)
# Number of Visitors for variant_B
visitors_B <- nrow(ABTest %>%
filter(variant == "B"))
# Conversion_rate_B
conv_rate_B <- (conversions_B/visitors_B)
print(conv_rate_B) # 0.05068493
#Let’s compute the relative uplift using conversion rates A & B. The uplift is a percentage of the increase
uplift <- (conv_rate_B - conv_rate_A) / conv_rate_A * 100
uplift # 82.72%
#Let’s compute the pooled probability, standard error, the margin of error, and difference in proportion (point estimate) for variants A & B
# Pooled sample proportion for variants A & B
p_pool <- (conversions_A + conversions_B) / (visitors_A +
visitors_B)
print(p_pool) # 0.03928325
# Let's compute Standard error for variants A & B (SE_pool)
SE_pool <- sqrt(p_pool * (1 - p_pool) * ((1 / visitors_A) +
(1 / visitors_B)))
print(SE_pool) # 0.01020014
# Let's compute the margin of error for the pool
MOE <- SE_pool * qnorm(0.975)
print(MOE) # 0.0199919
# Point Estimate or Difference in proportion
d_hat <- conv_rate_B - conv_rate_A
#Let’s compute the z-score
# Compute the Z-score so we
# can determine the p-value
z_score <- d_hat / SE_pool
print(z_score) # 2.249546
#Using this z-score, we can quickly determine the p-value via a look-up table, or using the code below:
# Let's compute p_value
# using the z_score value
p_value <- pnorm(q = -z_score,
mean = 0,
sd = 1) * 2
print(p_value) # 0.02447777
#Let’s compute the confidence interval for the pool
# Let's compute Confidence interval for the
# pool using pre-calculated results
ci <- c(d_hat - MOE, d_hat + MOE)
ci # 0.002953777 0.042937584
# Using same steps as already shown,
# let's compute the confidence
# interval for variants A separately
X_hat_A <- conversions_A / visitors_A
se_hat_A <- sqrt(X_hat_A * (1 - X_hat_A) / visitors_A)
ci_A <- c(X_hat_A - qnorm(0.975) * se_hat_A, X_hat_A
+ qnorm(0.975) * se_hat_A)
print(ci_A) # 0.01575201 0.03972649
# Using same steps as already shown,
# let's compute the confidence
# interval for variants B separately
X_hat_B <- conversions_B / visitors_B
se_hat_B <- sqrt(X_hat_B * (1 - X_hat_B) / visitors_B)
ci_B <- c(X_hat_B - qnorm(0.975) * se_hat_B,
X_hat_B + qnorm(0.975) * se_hat_B)
print(ci_B) # 0.03477269 0.06659717
#Let’s visualize the results computed so far in a dataframe (table):
vis_result_pool <- data.frame(
metric = c(
'Estimated Difference',
'Relative Uplift(%)',
'pooled sample proportion',
'Standard Error of Difference',
'z_score',
'p-value',
'Margin of Error',
'CI-lower',
'CI-upper'),
value = c(
conv_rate_B - conv_rate_A,
uplift,
p_pool,
SE_pool,
z_score,
p_value,
MOE,
ci_lower,
ci_upper
))
print (vis_result_pool)
|
d2113e6e1c32adbee0cac53277ee9ead2f734171 | 6cdd53ea5a130d609f9bdf3d1ebf5fd00bb10d1d | /similarity.R | a355359961b1adeb2b66cd7243a9240012737a81 | [] | no_license | lucasbic/dataIC | 73dffaa637cb3045ca9ae31ecbfb8944ee01e8c6 | 53f4ed19749f223f2df8017905794f2ac2cb37fe | refs/heads/master | 2023-04-15T09:45:42.930853 | 2021-04-22T19:17:37 | 2021-04-22T19:17:37 | 187,738,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,135 | r | similarity.R | ###########################################
#Arquivo: similarity.R
#Autor: Lucas Bicalho
###########################################
data_acid = read.table("data/ACIDENTES.csv", header=T, sep=";")
df = data.frame(dia_semana=weekdays(strptime(data_acid$data, "%d/%m/%Y %H:%M")),
tipo_acid=data_acid$tipo_acidente,
mortos=data_acid$mortos,
distrito=data_acid$distrito)
require(rpart)
mod = rpart(mortos ~ ., data=df, control = rpart.control(cp = 1e-3))
print(summary(mod))
plot(mod)
text(mod)
ds = weekdays(strptime(data_acid$data, "%d/%m/%Y %H:%M"))
library(data.table)
period = ceiling(hour(strptime(data_acid$data, "%d/%m/%Y %H:%M")))
h = cut(period, breaks = c(0,6,12,18,24), labels = c("Dawn", "Morning", "Afternoon", "Night"))
f = data_acid$feridos
df = data.frame(ds_h = paste0(ds, "_", h, sep=""), feridos=f)
tab = table(df$ds_h, df$feridos)
mult_tab = t(t(tab) * as.numeric(colnames(tab)))
ruas = NULL
for (i in 1:100)
ruas=rbind(ruas, data.frame(Rua=i, valores=t(rowSums(mult_tab) * runif(12, 1, 5))))
# Compute distances and hierarchical clustering
dd <- dist(scale(df), method = "euclidean")
hc <- hclust(dd, method = "ward.D2")
res.dist <- dist(mult_tab, method = "euclidean")
as.matrix(res.dist)[1:6, 1:6]
res.hc <- hclust(d = res.dist, method = "ward.D2")
# cex: label size
library("factoextra")
fviz_dend(res.hc, cex = 0.5)
# Compute cophentic distance
res.coph <- cophenetic(res.hc)
# Correlation between cophenetic distance and
# the original distance
cor(res.dist, res.coph)
res.hc2 <- hclust(res.dist, method = "average")
cor(res.dist, cophenetic(res.hc2))
# Cut tree into 4 groups
grp <- cutree(res.hc, k = 4)
head(grp, n = 4)
table(grp)
rownames(df)[grp == 1]
fviz_dend(res.hc, cex = 0.5, k = 4,
color_labels_by_k = FALSE, rect = TRUE)
# K-means clustering
# +++++++++++++++++++++
km.res <- kmeans(mult_tab, 16052)
# Visualize kmeans clustering
# use repel = TRUE to avoid overplotting
fviz_cluster(km.res, iris[, -5], ellipse.type = "norm") |
aea53430fe57f0d76d8304908f569695a96cf30d | 9345c3bd943697cf136ce1d66e8c954da56db0fd | /man/uwSurvival.Rd | b5afce38c08c7fdef74b746f56395320928724d2 | [] | no_license | jbirstler/biostatrpts | 20967f481a5f3f9ef630da3688260fbdb7d5ae2a | 079190440c9c20b2ffd0a692170e45742c2ba859 | refs/heads/master | 2022-06-09T19:08:39.102037 | 2020-05-06T21:10:11 | 2020-05-06T21:10:11 | 261,236,006 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,697 | rd | uwSurvival.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/uwSurvival.R
\name{uwSurvival}
\alias{uwSurvival}
\title{Create Survival Chart and Table}
\usage{
uwSurvival(
survData,
statusName,
trxName,
trxControl,
timeName,
fun = NULL,
color = NULL,
pTitle = "Survival by Treatment",
yLab = "Percent of Survival Probability",
xLab = "Days from Randomization",
yLim = NULL,
markCensor = FALSE,
Legend = TRUE,
LegendLoc = "topright",
LegendCex = 0.8,
printPVals = TRUE,
pValCex = 0.6,
abbrevN = 1,
printSS = TRUE,
SSinPlot = FALSE,
SScex = 0.5,
Ropen = FALSE,
firstCol.name = NULL,
GraphSSfreq = 1,
TableSSfreq = GraphSSfreq,
LatexFileName = "survival.tex",
LatexCaption = NULL,
numDec = 1,
cexAxis = 0.7,
confInt = FALSE,
...
)
}
\arguments{
\item{survData}{A dataframe that includes columns for treatment, patient
status (living/deceased for survival, event/no event for other
measurements), and the patients' survival days.}
\item{statusName}{String of the name for the status variable in survData}
\item{trxName}{String of the name for the treatment variable in survData}
\item{trxControl}{String of which level is the control level in trxName}
\item{timeName}{String of the name for the time variable in survData}
\item{fun}{String as the function utilized for plot.survfit. Only tested
for default and 'event'}
\item{color}{Color designations. Must be equal to nlevels of trxName}
\item{pTitle}{Title for the graph}
\item{yLab}{Label for y-axis}
\item{xLab}{Label for x-axis}
\item{yLim}{Limits for the y-axis. Defaults to c(-1, 100) when SSinPlot is
TRUE and defaults to c(-10, 100) when SS in Plot is FALSE}
\item{markCensor}{(Logical) TRUE marks where censored data occurs on the
curves}
\item{Legend}{(Logical) TRUE plots legend in spot given in LegendLoc}
\item{LegendLoc}{String of c('topright','top','topleft','left',
'center','right','bottomright','bottom','bottomleft')}
\item{LegendCex}{Numeric magnification of the legend size}
\item{printPVals}{(Logical) TRUE log-rank test p-value is given atop the
graph}
\item{pValCex}{Numeric magnification of the p-value print size}
\item{abbrevN}{(integer) indicating how many letters should abbreviation of
the treatments levels be when reported in margins.}
\item{printSS}{(Logical) Prints sample size still at risk at the designated
time points}
\item{SSinPlot}{(Logical) TRUE prints sample size at risk inside graph
itself in negative margins. Useful when survival is high throughout time
period}
\item{SScex}{Numeric magnification of the sample size print size}
\item{Ropen}{(Logical) TRUE collapses data across treatment levels}
\item{firstCol.name}{Sting for name of first column in the LaTeX tables}
\item{GraphSSfreq}{Whole number designating how often sample size should be
shown in the lower margin of the graph}
\item{TableSSfreq}{Whole number designating how often rows of the survfit
output should be printed in the LaTex table}
\item{LatexFileName}{Vector of strings. Individually give file path and
name for each LaTeX table which will be constructed for each treatment
level.}
\item{LatexCaption}{Vector of strings to be used as the caption for the
LaTeX tables}
\item{numDec}{Numeric indicating number of decimal places to be used in the
x-axis for time of follow-up}
\item{cexAxis}{Axis label magnifier}
\item{confInt}{Should confidence interval lines be printed}
\item{...}{Any other arguments that can be passed on to uwLatex()}
}
\description{
Charts survival by treatment
}
\details{
Additional packages required: library(chron)
If LaTeX table does not seem to print fully in the document try using
type='longtable' argument.
Only works well with layout(1)
}
\examples{
death <- sample(c(0, 1), 200, replace = TRUE, prob = c(0.4, 0.6))
trt <- rep(c("ABDC", "ABGH"), c(100, 100))
time <- ceiling(runif(200, 0, 100))
sData <- data.frame(death, trt, time)
layout(1)
uwSurvival(
survData = sData, statusName = "death", trxName = "trt", trxControl = "ABDC",
timeName = "time", pTitle = "Survival by Treatment",
yLab = "Percent of Survival Probability",
xLab = "Days from Randomization", pValCex = 0.8, SScex = 0.7,
markCensor = FALSE, Legend = TRUE, LegendLoc = "topright",
LatexFileName = c(
paste(getwd(), "surv1.tex", sep = ""),
paste(getwd(), "/surv2.tex", sep = "")
),
showTab = FALSE, printPVals = TRUE, printSS = TRUE,
Ropen = FALSE, GraphSSfreq = 5, TableSSfreq = 1, firstCol.name = NULL,
LatexCaption = NULL, SSinPlot = TRUE,
size = "scriptsize"
)
}
\author{
Scott Hetzel, M.S. Department of Biostatistics and Medical
Informatics. University of Wisconsin-Madison.
}
|
d6d8a7c3763f91ffd87c8856e7ace8c3c2f03114 | c5a921726a3805663d26a2dbaa47e49497931d4e | /DataCamp/Data Analysis in R the data.table Way.R | 1b743f9a8f00529633da05f8988f8d6e4dc843e8 | [] | no_license | snowdj/cs_course | a50d07548198b4202e8abde01ec572e2cce38ab3 | fa6504cb5145d10952f4615478fa745f4b35ba13 | refs/heads/master | 2020-03-17T15:18:52.190747 | 2018-05-13T08:08:51 | 2018-05-13T08:08:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,954 | r | Data Analysis in R the data.table Way.R | ## 1 - Data.table novice
##--
## Create and subset a data.table
## Welcome to the interactive exercises for your data.table course. Here you will learn the ins and outs of working with the data.table package.
## While most of the material is covered by Matt and Arun in the videos, you will sometimes need to show some street smarts to get to the right answer. Remember that before using the hint you can always have a look at the official documentation by typing ?data.table in the console.
## Let's start with some warm-up exercises based on the topics covered in the video. Recall from the video that you can use L after a numeric to specify that it is an integer. You can also give columns with different lengths when creating a data.table, and R will "recycle" the shorter column to match the length of the longer one by re-using the first items. In the example below, column x is recycled to match the length of column y:
## data.table(x = c("A", "B"), y = 1:4)
## x y
## 1: A 1
## 2: B 2
## 3: A 3
## 4: B 4
## You can also review the slides used in the videos by pressing the slides button.
## Instructions
## Create a data.table my_first_data_table with a column x = c("a", "b", "c", "d", "e") and a column y = c(1, 2, 3, 4, 5). Use the function data.table().
## Create a two-column data.table DT that contains the four integers 1, 2, 1 and 2 in the first column a and the letters A, B, C and D in the second column b. Use recycling so that the contents of a will be automatically used twice. Note that LETTERS[1] returns "A", LETTERS[2] returns "B", and so on.
## Select the third row of DT and just print the result to the console.
## Select the second and third rows without using commas and print the result to the console.
# The data.table package is preloaded
# Create my_first_data_table
my_first_data_table <- data.table(x = c("a", "b", "c", "d", "e"),
y = c(1, 2, 3, 4, 5))
# Create a data.table using recycling
DT <- data.table(a = c(1L, 2L), b = LETTERS[1:4])
# Print the third row to the console
DT[3]
# Print the second and third row to the console without using commas
DT[2:3]
##--
## Getting to know a data.table
## You can pass a data.table to base R functions like head() and tail() that accept a data.frame because data.tables are also data.frames. Also, keep in mind that the special symbol .N, when used inside square brackets, contains the number of rows. For example, DT[.N] and DT[nrow(DT)] will both return the last row in DT.
## Instructions
## Select the second to last row of the table using .N.
## Return the column names() of the data.table.
## Return the number of rows and number of columns of the data.table using the dim() function.
## Select row 2 twice and row 3 once, returning a data.table with three rows (two of which are identical).
# DT and the data.table package are pre-loaded
# Print the second to last row of DT using .N
DT[.N - 1]
# Print the column names of DT
colnames(DT)
# Print the number or rows and columns of DT
dim(DT)
# Print a new data.table containing rows 2, 2, and 3 of DT
DT[c(2, 2, 3)]
##--
## Subsetting data.tables
## As a reminder, DT[i, j, by] is pronounced
## Take DT, subset rows using i, then calculate j grouped by by.
## In the video, the second argument j was covered. j can be used to select columns by wrapping the column names in .().
## In addition to selecting columns, you can also call functions on them as if the columns were variables. For example, if you had a data.table heights storing people's heights in inches, you could compute their heights in feet as follows:
## name eye_color height_inch
## 1: Tom Brown 69
## 2: Boris Blue 71
## 3: Jim Blue 68
## > heights[, .(name,
## height_ft = height_inch / 12)]
## name height_ft
## 1: Tom 5.750000
## 2: Boris 5.916667
## 3: Jim 5.666667
## Instructions
## Create a subset containing the columns B and C for rows 1 and 3 of DT. Simply print out this subset to the console.
## From DT, create a data.table, ans with two columns: B and val, where val is the product of A and C.
## Fill in the blanks in the assignment of ans2, such that it equals the data.table specified in target. Use columns from the previously defined data.tables to produce the val column.
# DT and the data.table package are pre-loaded
# Subset rows 1 and 3, and columns B and C
DT[c(1, 3), .(B, C)]
# Assign to ans the correct value
ans <- DT[, .(B, val = A * C)]
# Fill in the blanks such that ans2 equals target
target <- data.table(B = c("a", "b", "c", "d", "e",
"a", "b", "c", "d", "e"),
val = as.integer(c(6:10, 1:5)))
ans2 <- DT[, .(B, val = c(C, A))]
##--
## The by basics
## In this section you were introduced to the last of the main parts of the data.table syntax: by. If you supply a j expression and a by list of expressions, the j expression is repeated for each by group. Time to master the by argument with some hands-on examples and exercises.
## First, just print iris to the console and observe that all rows are printed and that the column names scroll off the top of your screen. This is because iris is a data.frame. Scroll back up to the top to see the column names.
## Instructions
## Convert the iris dataset to a data.table DT. You're now ready to use data.table magic on it!
## Create a new column containing the mean Sepal.Length for each Species. Do not provide a name for this newly created column.
## Do exactly the same as in the instruction above, but this time, group by the first letter of the Species name instead. Use substr() for this.
# iris is already available in your workspace
# Convert iris to a data.table: DT
DT <- as.data.table(iris)
# For each Species, print the mean Sepal.Length
DT[, mean(Sepal.Length), by = Species]
# Print mean Sepal.Length, grouping by first letter of Species
DT[, mean(Sepal.Length), by = substr(Species, 1, 1)]
##--
## Using .N and by
## You saw earlier that .N can be used in i and that it designates the number of rows in DT. There, it is typically used for returning the last row or an offset from it. .N can be used in j too and designates the number of rows in this group. This becomes very powerful when you use it in combination with by.
## DT, a data.table version of iris, is already loaded in your workspace, so you can start experimenting right away. In this exercise, you will group by sepal area. Though sepals aren't rectangles, just multiply the length by the width to calculate the area.
## Instructions
## Group the specimens by Sepal area (Sepal.Length * Sepal.Width) to the nearest 10 cm2
## . Count how many occur in each group by specifying .N in j. Simply print the resulting data.table. Use the template in the sample code by filling in the blanks.
## Copy and adapt the solution to the above question, to name the columns Area and Count, respectively.
# data.table version of iris: DT
DT <- as.data.table(iris)
# Group the specimens by Sepal area (to the nearest 10 cm2) and count how many occur in each group.
DT[, .N, by = 10 * round(Sepal.Length * Sepal.Width / 10)]
# Now name the output columns `Area` and `Count`
DT[, .(Count = .N), by = .(Area = 10 * round(Sepal.Length * Sepal.Width / 10))]
##--
## Return multiple numbers in j
## In the previous exercises, you've returned only single numbers in j. However, this is not necessary. You'll experiment with this via a new data.table DT, which has already been specified in the sample code.
## Instructions
## Create a new data.table DT2 with 3 columns, A, B and C, where C is the cumulative sum of the C column of DT. Call the cumsum() function in the j argument, and group by .(A, B) (i.e. both columns A and B).
## Select from DT2 the last two values of C using the tail() function, and assign that to column C while you group by A alone. Make sure the column names don't change.
# Create the data.table DT
DT <- data.table(A = rep(letters[2:1], each = 4L),
B = rep(1:4, each = 2L),
C = sample(8))
# Create the new data.table, DT2
DT2 <- DT[, .(C = cumsum(C)), by = .(A, B)]
# Select from DT2 the last two values from C while you group by A
DT2[, .(C = tail(C, 2)), by = A]
## 2 - Data.table yeoman
##--
## Chaining, the basics
## Now that you are comfortable with data.table's DT[i, j, by] syntax, it is time to practice some other very useful concepts in data.table. Here, we'll have a more detailed look at chaining.
## Chaining allows the concatenation of multiple operations in a single expression. It's easy to read because the operations are carried out from left to right. Furthermore, it helps to avoid the creation of unnecessary temporary variables (which could quickly clutter one's workspace).
## Instructions
## In the previous section, you calculated DT2 by taking the cumulative sum of C while grouping by A and B. Next, you selected the last two values of C from DT2 while grouping by A alone. This code is included in the sample code. Use chaining to restructure the code. Simply print out the result of chaining.
# The data.table package has already been loaded
# Build DT
DT <- data.table(A = rep(letters[2:1], each = 4L),
B = rep(1:4, each = 2L),
C = sample(8))
# Combine the two steps in a one-liner
DT[, .(C = cumsum(C)), by = .(A, B)][, .(C = tail(C, 2)), by = A]
##--
## Chaining your iris dataset
## In the previous chapter, you converted the iris dataset to a data.table DT. This DT is already available in your workspace. Print DT to the console to remind yourself of its contents. Now, let's see how you can use chaining to simplify manipulations and calculations.
## Instructions
## Get the median of each of the four columns Sepal.Length, Sepal.Width, Petal.Length and Petal.Width, while grouping by Species. Reuse the same column names (e.g. the column containing the median Sepal.Length is still called Sepal.Length). Next, order() Species in descending order using chaining. This is deliberately repetitive, but we have a solution for you in the next exercise!
# The data.table DT is loaded in your workspace
# Perform chained operations on DT
DT[, .(Sepal.Length = median(Sepal.Length),
Sepal.Width = median(Sepal.Width),
Petal.Length = median(Petal.Length),
Petal.Width = median(Petal.Width)),
by = Species][order(-Species)]
##--
## Programming time vs readability
## It is a good idea to make use of familiar functions from base R to reduce programming time without losing readability.
## The data.table package provides a special built-in variable .SD. It refers to the subset of data for each unique value of the by argument. That is, the number of observations in the output will be equal to the number of unique values in by.
## Recall that the by argument allows us to separate a data.table into groups. We can now use the .SD variable to reference each group and apply functions separately. For example, suppose we had a data.table storing information about dogs:
## Sex Weight Age Height
## M 40 1 12
## F 30 4 7
## F 80 12 9
## M 90 3 14
## M 40 6 12
## We could then use
## dogs[, lapply(.SD, mean), by = Sex]
## to produce average weights, ages, and heights for male and female dogs separately:
## Sex Weight Age Height
## 1: M 56.66667 3.333333 12.66667
## 2: F 55.00000 8.000000 8.00000
## A data.table DT has been created for you and is available in the workspace. Type DT in the console to print it out and inspect it.
## Instructions
## Get the mean of columns y and z grouped by x by using .SD.
## Get the median of columns y and z grouped by x by using .SD.
# A new data.table DT is available
# Mean of columns
DT[, lapply(.SD, mean), by = x]
# Median of columns
DT[, lapply(.SD, median), by = x]
##--
## Introducing .SDcols
## .SDcols specifies the columns of DT that are included in .SD. Using .SDcols comes in handy if you have too many columns and you want to perform a particular operation on a subset of the columns (apart from the grouping variable columns).
## Using .SDcols allows you to apply a function to all rows of a data.table, but only to some of the columns. For example, consider the dog example from the last exercise. If you wanted to compute the average weight and age (the second and third columns) for all dogs, you could assign .SDcols accordingly:
## dogs[, lapply(.SD, mean), .SDcols = 2:3]
## Weight Age
## 1: 56 5.2
## While learning the data.table package, you may want to occasionally refer to the documentation. Have a look at ?data.table for more info on .SDcols.
## Yet another data.table, DT, has been prepared for you in your workspace. Start by printing it to the console.
## Instructions
## Calculate the sum of the columns that start with Q, using .SD and .SDcols. Set .SDcols equal to 2:4.
## Set .SDcols to be the result of a function call. This time, calculate the sum of columns H1 and H2 using paste0() to specify the .SDcols argument.
## Finally, select all but the first row of the groups names 6 and 8, returning only the grp column and the columns that start with Q. Use -1 in i of .SD and use paste0() again. Type desired_result into the console to see what your answer should look like.
# A new data.table DT is available
# Calculate the sum of the Q columns
DT[, lapply(.SD, sum), .SDcols = 2:4]
# Calculate the sum of columns H1 and H2
DT[, lapply(.SD, sum), .SDcols = paste0("H", 1:2)]
# Select all but the first row of groups 1 and 2, returning only the grp column and the Q columns
DT[, .SD[-1], by = grp, .SDcols = paste0("Q", 1:3)]
##--
## Mixing it together: lapply, .SD, .SDcols and .N
## This exercise is a challenging one, so don't give up! It's important to remember that whenever the j argument is a list (e.g. if it contains .SD or a call to lapply()), a data.table is returned. For example:
## dogs[, lapply(.SD, mean), by = sex, .SDcols = c("weight", "age")]
## will return a data.table containing average weights and ages for dogs of each sex.
## It's also helpful to know that combining a list with a vector results in a new longer list. Lastly, note that when you select .N on its own, it is renamed N in the output for convenience when chaining.
## For this exercise, DT, which contains variables x, y, and z, is loaded in your workspace. You must combine lapply(), .SD, .SDcols, and .N to get your call to return a specific output. Good luck!
## Instructions
## Get the sum of all columns x, y and z and the number of rows in each group while grouping by x. Your answer should be identical to this:
## x x y z N
## 1: 2 8 26 30 4
## 2: 1 3 23 26 3
## Get the cumulative sum of column x and y while grouping by x and z > 8 such that the answer looks like this:
## by1 by2 x y
## 1: 2 FALSE 2 1
## 2: 2 FALSE 4 6
## 3: 1 FALSE 1 3
## 4: 1 FALSE 2 10
## 5: 2 TRUE 2 9
## 6: 2 TRUE 4 20
## 7: 1 TRUE 1 13
# DT is pre-loaded
# Sum of all columns and the number of rows
DT[, c(lapply(.SD, sum), .N), by = x, .SDcols = c("x", "y", "z")]
# Cumulative sum of column x and y while grouping by x and z > 8
DT[, lapply(.SD, cumsum), by = .(by1 = x, by2 = z > 8), .SDcols = c("x", "y")]
##--
## Adding, updating and removing columns
## As you now know, := is defined for use in j only, and is used to update data.tables by reference. One way of using := is the LHS := RHS form, where LHS is a character vector of columns (referenced by name or number) you wish to update and RHS is the corresponding value for each column (Note: LHS stands for "left hand side" and RHS stands for "right hand side" in what follows).
## For example, the following line multiplies every row of column C by 10 and stores the result in C:
## DT[, C := C * 10]
## This first exercise will thoroughly test your understanding of := used in the LHS := RHS form. It's time for you to show off your knowledge! A data.table DT has been defined for you in the sample code.
## Instructions
## Add a column to DT by reference, named Total, that contains sum(B) for each group in column A.
## Add 1L to the values in column B, but only in the rows 2 and 4.
## Add a new column Total2 that contains sum(B) grouped by A but just over rows 2, 3 and 4.
## Remove the Total column from DT.
## Use [[ to select the third column as a vector. Simply print it out to the console.
# The data.table DT
DT <- data.table(A = letters[c(1, 1, 1, 2, 2)], B = 1:5)
# Add column by reference: Total
DT[, Total := sum(B), by = A]
# Add 1 to column B
DT[c(2, 4), B := B + 1L]
# Add a new column Total2
DT[2:4, Total2 := sum(B), by = A]
# Remove the Total column
DT[, Total := NULL]
# Select the third column using `[[`
DT[[3]]
##--
## The functional form
## You've had practice with using := in the LHS := RHS form. The second way to use := is with functional form:
## DT[, `:=`(colA = colB + colC)]
## Notice that the := is surrounded by two tick marks! Otherwise data.table will throw a syntax error. It is also important to note that in the generic functional form above, my_fun() can refer to any function, including the basic arithmetic functions. The nice thing about the functional form is that you can get both the RHS alongside the LHS so that it's easier to read.
## Time for some experimentation. A data.table DT has been prepared for you in the sample code.
## Instructions
## Update B with B + 1, add a new column C with A + B, and add a new column D of just 2's.
## A variable my_cols has already been defined. Use it to delete these columns from DT.
## Finally, delete column D using the column number (2), not its name (D).
# A data.table DT has been created for you
DT <- data.table(A = c(1, 1, 1, 2, 2), B = 1:5)
# Update B, add C and D
DT[, `:=`(B = B + 1, C = A + B, D = 2)]
# Delete my_cols
my_cols <- c("B", "C")
DT[, (my_cols) := NULL]
# Delete column 2 by number
DT[, 2 := NULL]
##--
## Ready, set(), go!
## The set() function is used to repeatedly update a data.table by reference. You can think of the set() function as a loopable, low overhead version of the := operator, except that set() cannot be used for grouping operations. The structure of the set() function looks like this:
## set(DT, index, column, value)
## The function takes four arguments:
## A data.table with the columns you wish to update
## The index used in a loop (e.g. the i in for(i in 1:5))
## The column or columns you wish to update in the loop
## How the column or columns should be updated
## In the next two exercises, you will focus on using set() and its siblings setnames() and setcolorder(). You are two exercises away from becoming a data.table yeoman!
## Instructions
## A data.table DT has been created for you in the workspace. Check it out!
## Loop through columns 2, 3, and 4, and for each one, select 3 rows at random and set the value of that column to NA.
## Change the column names to lower case using the tolower() function. When setnames() is passed a single input vector, that vector needs to contain all the new names.
## Print the resulting DT to the console to see what changed.
# Set the seed
set.seed(1)
# Check the DT that is made available to you
DT
# For loop with set
for (i in 2:4) set(DT, sample(10, 3), i, NA)
# Change the column names to lowercase
setnames(DT, tolower(names(DT)))
# Print the resulting DT to the console
DT
##--
## The set() family
## A summary of the set() family:
## set() is a loopable, low overhead version of :=.
## You can use setnames() to set or change column names.
## setcolorder() lets you reorder the columns of a data.table.
## A data.table DT has been defined for you in the sample code.
## Instructions
## First, add a suffix "_2" to all column names of DT. Use paste0() here.
## Next, use setnames() to change a_2 to A2.
## Lastly, reverse the order of the columns with setcolorder().
# Define DT
DT <- data.table(a = letters[c(1, 1, 1, 2, 2)], b = 1)
# Add the suffix "_2" to all column names
setnames(DT, paste0(names(DT), "_2"))
# Change column name a_2 to A2
setnames(DT, "a_2", "A2")
# Reverse the order of the columns
setcolorder(DT, c("b_2", "A2"))
# Alternative solution using column numbers
# setcolorder(DT, 2:1)
##--
## Selecting rows the data.table way
## In the video, Matt showed you how to use column names in i to select certain rows. Since practice makes perfect, and since you will find yourself selecting rows over and over again, it'll be good to do a small exercise on this with the familiar iris dataset.
## Instructions
## Convert the iris dataset to a data.table and store the result as iris.
## Select all the rows where Species is "virginica".
## Select all the rows where Species is either "virginica" or "versicolor".
# The data.table package is pre-loaded
# Convert iris to a data.table
iris <- as.data.table(iris)
# Species is "virginica"
iris[Species == "virginica"]
# Species is either "virginica" or "versicolor"
iris[Species %in% c("virginica","versicolor")]
##--
## Removing columns and adapting your column names
## In the previous exercise, you selected certain rows from the iris data.table based on the column names. Now you have to take your understanding of the data.table package to the next level by using standard R functions and regular expressions to remove columns and change column names. To practice this, you'll do a little manipulation to prepare for the next exercise.
## Since regular expressions can be tricky, here is a quick refresher:
## Metacharacters allow you to match certain types of characters. For example, . means any single character, ^ means "begins with", and $ means "ends with".
## If you want to use any of the metacharacters as actual text, you need to use the \\ escape sequence.
## Instructions
## Simplify the names of the columns in iris that contain "Sepal." by removing the "Sepal." prefix. Use gsub() along with the appropriate regular expression inside a call to setnames().
## Remove the two columns that start with "Petal" from the iris data.table.
# iris as a data.table
iris <- as.data.table(iris)
# Remove the "Sepal." prefix
setnames(iris, gsub("^Sepal\\.", "", names(iris)))
# Remove the two columns starting with "Petal"
iris[, grep("^Petal", names(iris)) := NULL]
##--
## Understanding automatic indexing
## You've been introduced to the rule that "if i is a single variable name, it is evaluated in the calling scope, otherwise inside DT's scope". This is a very important rule if you want to conceptually understand what is going on when using column names in i. Only single columns on the left side of operators benefit from automatic indexing.
## The iris data.table with the variable names you updated in the previous exercise is available in your workspace.
## Instructions
## Select the rows where the area is greater than 20 square centimeters.
## Add a new boolean column containing Width * Length > 25 and call it is_large. Remember that := can be used to create new columns.
## Select the rows for which the value of is_large is TRUE.
# Cleaned up iris data.table
iris
# Area is greater than 20 square centimeters
iris[ Width * Length > 20 ]
# Add new boolean column
iris[, is_large := Width * Length > 25]
# Now large observations with is_large
iris[is_large == TRUE]
iris[(is_large)] # Also OK
##--
## Selecting groups or parts of groups
## The previous exercise illustrated how you can manually set a key via setkey(DT, A, B). setkey() sorts the data by the columns that you specify and changes the table by reference. Having set a key will allow you to use it, for example, as a super-charged row name when doing selections. Arguments like mult and nomatch then help you to select only particular parts of groups.
## Furthermore, two of the instructions will require you to make use of by = .EACHI. This allows you to run j for each group in which each item in i joins too. The last instruction will require you to produce a side effect inside the j argument in addition to selecting rows.
## Instructions
## A data.table DT has already been created for you with the keys set to A and B.
## Select the "b" group without using ==.
## Select the "b" and "c" groups, again without using ==.
## Select the first row of the "b" and "c" groups using mult.
## Use by = .EACHI and .SD to select the first and last row of the "b" and "c" groups.
## Extend the previous command to print out the group before returning the first and last row from it. You can use curly brackets to include two separate instructions inside the j argument.
# The 'keyed' data.table DT
DT <- data.table(A = letters[c(2, 1, 2, 3, 1, 2, 3)],
B = c(5, 4, 1, 9, 8, 8, 6),
C = 6:12)
setkey(DT, A, B)
# Select the "b" group
DT["b"]
# "b" and "c" groups
DT[c("b", "c")]
# The first row of the "b" and "c" groups
DT[c("b", "c"), mult = "first"]
# First and last row of the "b" and "c" groups
DT[c("b", "c"), .SD[c(1, .N)], by = .EACHI]
# Copy and extend code for instruction 4: add printout
DT[c("b", "c"), { print(.SD); .SD[c(1, .N)] }, by = .EACHI]
##--
## Rolling joins - part one
## In the last video, you learned about rolling joins. The roll applies to the NA values in the last join column. In the next three exercises, you will learn how to work with rolling joins in a data.table setting.
## Instructions
## The same keyed data.table from before, DT, has been provided in the sample code.
## Get the key of DT through the key() function.
## Use the super-charged row names to look up the row where A == "b" and B == 6, without using ==! Verify here that column C is NA.
## Based on the query that was written in the previous instruction, return the prevailing row before this "gap". Specify the roll argument.
## Again, start with the code from the second instruction, but this time, find the nearest row. Specify the roll argument accordingly.
# Keyed data.table DT
DT <- data.table(A = letters[c(2, 1, 2, 3, 1, 2, 3)],
B = c(5, 4, 1, 9, 8, 8, 6),
C = 6:12,
key = "A,B")
# Get the key of DT
key(DT)
# Row where A == "b" and B == 6
DT[.("b", 6)]
# Return the prevailing row
DT[.("b", 6), roll = TRUE]
# Return the nearest row
DT[.("b", 6), roll = "nearest"]
##--
## Rolling joins - part two
## It is time to move on to the rollends argument. The rollends argument is actually a vector of two logical values, but remember that you can always look this up via ?data.table. You were introduced to this argument via the control ends section. If you want to roll for a certain distance, you should continue to use the roll argument.
## Instructions
## For the group where column A is equal to "b", print out the sequence when column B is set equal to (-2):10. Remember, A and B are the keys for this data.table.
## Extend the code you wrote for the first instruction to roll the prevailing values forward to replace the NAs.
## Extend your code with the appropriate rollends value to roll the first observation backwards.
# Keyed data.table DT
DT <- data.table(A = letters[c(2, 1, 2, 3, 1, 2, 3)],
B = c(5, 4, 1, 9, 8, 8, 6),
C = 6:12,
key = "A,B")
# Print the sequence (-2):10 for the "b" group
DT[.("b", (-2):10)]
# Add code: carry the prevailing values forwards
DT[.("b", (-2):10), roll = TRUE]
# Add code: carry the first observation backwards
DT[.("b", (-2):10), roll = TRUE, rollends = TRUE]
DT[.("b", (-2):10), roll = TRUE, rollends = c(TRUE, TRUE)] # also OK
|
df262d86135ffc659b386f4366b3ab83493c0223 | 26c0d024a84f6bcf461eb5f4ae97e7ca1fd9eaba | /R/trawlCast.R | 1d2d16005c18b235fe8bb36841b1313f5cbfec3a | [] | no_license | rBatt/trawlData | 11deca8341155dbd09afbdb0fcab046e4ff06c3f | 266c5cda94b78790474ed8a5b3e8a66b6bde04d8 | refs/heads/master | 2021-01-21T08:57:44.568307 | 2018-06-24T20:55:19 | 2018-06-24T20:55:19 | 44,205,244 | 8 | 3 | null | 2017-06-28T20:43:56 | 2015-10-13T21:14:11 | R | UTF-8 | R | false | false | 6,933 | r | trawlCast.R | #' Cast Trawl
#'
#' Cast a data.table of trawl data to an array
#'
#' @param x A data.table with column names to be used in \code{formula}, \code{valueName}, and potentially \code{allNA_noSamp}
#' @param formula Formula describing array dimensions, in order as would be given by \code{\link{dim}}. Passed to \code{formula} in \code{\link{acast}}.
#' @param valueName Column name whose elements will fill the array. Passed to \code{value.var} in \code{\link{acast}}.
#' @param valFill Value to use for filling in missing combinations; defaults to NA. Passed to \code{fill} in \code{\link{acast}}.
#' @param fixAbsent Logical (default TRUE) to indicate the need to fill one value for no sampling (\code{valFill}), and another for a true absence (\code{valAbsent}). See 'Details'.
#' @param allNA_noSamp A character indicator the column/ dimension, which, if all its elements are NA's, indicates a no-sampling event, as opposed to an absence. When \code{all(is.na(allNA_noSamp))} is FALSE for a combination of the other dimensions in \code{formula}, \code{valAbsent} will be used instead of \code{valFill}.
#' @param valAbsent value to be used in lieu of \code{valFill} to indicate an absence as opposed to not-sampled.
#' @param grandNamesOut Grand dimension names for output array (e.g., \code{names(dimnames(x))})
#' @param ... Other arguments to be passed to \code{\link{acast}}.
#'
#' @details
#' Many columns in bottom trawl data can be described as summarizing 3 aspects of metadata: when, where, and what. This same logic is expressed in the function \code{\link{trawlAgg}}, which prompts users to conceptualize aggregating trawl data as aggregating at different specificities for time, space, and biological dimensions. In this function's default for \code{formula}, the "where" is described by "stratum" (a sampling site), "when" by "year", and "what" by "spp" (species). The "K" value is a replicate, which could mean either "when" or "what" (and is similar to "haulid" in \code{\link{trawlAgg}}, which describes it as being indicative of both time and space). Given those identifying dimensions, we can then appropriately contextualize a measured value, e.g. "weight". Not all cases need these same dimensions to be in \code{formula} (e.g., if the measured value is bottom temperature ("btemp") the "what" dimension is not needed), which is why this function doesn't impose as much structure on what categories of columns should comprise \code{formula}.
#'
#' However, it can be useful to think of that structure for \code{formula} when trying to understand the distinction and between elements to be filled with \code{valFill} vs. \code{valAbsent}.
#'
#' For species data, there is an important distinction between a species not being present, and no sampling occurring. For example, entries for species data often do not include 0's, but 0's are implied for Species X when a site is sampled and no value is reported for Species X, even though a value is reported for other species in this instance and Species X is reported in other sampling events. In this case, the observation is 0, not NA.
#'
#' In the context just described, \code{valFill} would be NA (the default); if we wanted to change Species X (-esque) values from NA to 0 (under appropriate conditions), set \code{fixAbsent} to TRUE (default) and \code{valAbsent} to 0 (default). More generally, the \code{allNA_noSamp} argument defines the array dimension(s) that, if all elements are NA while varying \code{allNA_noSamp} and holding other dimensions constant, that the NA values are appropriate and that those NA's should not be switched to \code{valAbsent}when \code{fixAbsent=TRUE}. For the species example given above, the default \code{allNA_noSamp="spp"} would be appropriate. In general, it may be fair to say that \code{allNA_noSamp} should be set to the "what" dimension(s) (as described above), and that \code{valAbsent} should be set to the value taken on by \code{valueName} when a measurement is attempted for a particular factor level of \code{valueName} that is absent.
#'
#' As implied the previous Details, casting data expands the number of explicit \code{valueName} elements in the data set. This function casts to an array because casting to a data.frame or data.table will take up far more RAM. The the difference in RAM increases with the number of identifying variables and how many unique levels they have (but also depends on whether those identifying variables are encoded as characters, factors, integers, doubles, etc).
#'
#' @return An array with dimensions equal to the number of unique values in each column in \code{formula}.
#'
#' @examples
#' mini.t <- trawlTrim(copy(clean.ebs), c.add="Picture")[Picture=="y"][pick(spp, 9)]
#' mini.a <- trawlAgg(
#' mini.t,
#' FUN=meanna,
#' bio_lvl="spp",
#' space_lvl="stratum",
#' time_lvl="year",
#' metaCols="common",
#' meta.action="unique1"
#' )
#'
#' mini.c <- trawlCast(mini.a, time_lvl~stratum~spp, grandNamesOut=c("t","j","i"))
#' (smry <- t(apply(mini.c, 3,
#' function(x){
#' c(
#' "NA"=sum(is.na(x)),
#' "0"=sum(!is.na(x)&x==0),
#' ">0"=sum(!is.na(x)&x>0)
#' )
#' }
#' )))
#'
#' \dontrun{
#' par(mfrow=c(3,3), mar=c(0.15,0.15,0.15,0), ps=8)
#' for(i in 1:nrow(smry)){
#' tspp <- rownames(smry)[i]
#' sppImg(tspp,
#' mini.a[spp==tspp,unique(common)],
#' side=3, line=-2, xpd=T
#'
#' )
#' }
#' }
#' @export
trawlCast <- function(x, formula=stratum~K~spp~year, valueName="wtcpue", valFill=NA, fixAbsent=TRUE, allNA_noSamp="spp", valAbsent=0, grandNamesOut=c("j","k","i","t"), ...){
xa <- reshape2::acast(x, formula=formula, value.var=valueName, fill=valFill, drop=FALSE, ...)
if(fixAbsent){
if(class(formula)=="formula"){
formulaNames <- unlist(strsplit(deparse(formula), "\\s*~\\s*"))
}else if(class(formula)=="character"){
formulaNames <- unlist(strsplit(formula, "\\s*~\\s*"))
}
apermDimLogic <- !formulaNames%in%allNA_noSamp
apermDim <- which(apermDimLogic)
apermDimNot <- which(!apermDimLogic)
apermDimNames <- formulaNames[apermDimLogic]
do_aperm <- c(apermDim,apermDimNot)
undo_aperm <- order((1:length(dim(xa)))[do_aperm])
unsamp.JK0 <- apply(xa, apermDim, function(x)all(is.na(x)))
unsamp.JK <- array(unsamp.JK0, dim=dim(xa)[do_aperm])
unsamp.JK <- aperm(unsamp.JK, perm=undo_aperm)
# apply(unsamp.JK, 2, sum) # how often each K had an NA
xa.fix <- xa
fix0 <- (is.na(xa) & !unsamp.JK) # had an NA, but actually sampled (some species non-NA)
fixNA <- (is.na(xa) & unsamp.JK) # had an NA, probably not sampled (no species non-NA)
stopifnot(all(is.na(xa[fixNA]))) # making sure unsamp.JK got it right
xa.fix[fix0] <- 0 # switch NA but sampled to 0's
stopifnot(all(is.na(xa.fix[fixNA]))) # making sure fix0 got it right
stopifnot(all((xa.fix[fix0]==0))) # making sure fixNA still right
xa <- xa.fix
}
names(dimnames(xa)) <- grandNamesOut
return(xa)
}
|
67975ed6da0f42c817194fdbac86b785bc96db13 | e5e29b60f2111cf6998b46a80cb5e034962b2cc5 | /rstuff/rmongodb/rmongodb/man/mongo.gridfile.destroy.Rd | 87e2b1ac12294bc309d1930e037135785d4a78da | [
"Apache-2.0"
] | permissive | BigBlueBox/GoodAndBad | 24e326862a5456b673b1928ffbb14bc1c47a4d4b | 4d2b8d3de523c3595fc21aa062eddf739f6c3a20 | refs/heads/master | 2021-01-01T20:35:55.494474 | 2014-08-24T17:56:29 | 2014-08-24T17:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 917 | rd | mongo.gridfile.destroy.Rd | % File rmongodb/man/mongo.gridfs.destroy.Rd
\name{mongo.gridfile.destroy}
\alias{mongo.gridfile.destroy}
\title{Destroy a mongo.gridfile object}
\description{
Releases the resources associated with a \link{mongo.gridfile} object.\cr
These are created by \code{\link{mongo.gridfs.find}()}.
It is not absolutely necessary to call this function since R's garbage collection will eventually
get around to doing it for you.
}
\usage{
mongo.gridfile.destroy(gridfile)
}
\arguments{
\item{gridfile}{A (\link{mongo.gridfile}) object.}
}
\value{
NULL
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
gridfs <- mongo.gridfs.create(mongo, "grid")
gf <- mongo.gridfs.find(gridfs, "test.R")
print(mongo.gridfile.get.upload.date(gf))
mongo.gridfile.destroy(gf)
mongo.gridfs.destroy(gridfs)
}
}
\seealso{
\code{\link{mongo.gridfs.find}},\cr
\link{mongo.gridfile},\cr
\link{mongo.gridfs}.
}
|
9f7aa741278b8874ed132bc60d8dedb923ee3e5a | 12087b38abc7ab2a111d141eb32249f385d7a77f | /src/bike-load.R | 88390788da42f95bd71337da655326747ee12df7 | [
"MIT"
] | permissive | philparadis/final-report-bike-sharing | 41a3c79f0b47a7041379a32726d3b44aeaa644b4 | 2fd857e4da124d3f62390cd30ed757beca8b9995 | refs/heads/master | 2021-06-07T04:32:54.799207 | 2016-11-16T22:54:19 | 2016-11-16T22:54:19 | 67,766,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,652 | r | bike-load.R | # default.work.dir <- "D:/stat5703w/bike-sharing"
# hedos.work.dir <- "/proj/stat5703w/bike-sharing"
# yue.work.dir <- "C:/Users/Yue/Desktop/master's/2015 Winter/Data mining/bike-sharing"
#
# switch(Sys.info()[['user']],
# # Working directory for user "hedos"
# hedos = { work.dir <- hedos.work.dir },
# # Working directory for user "yue"
# Yue = { work.dir <- yue.work.dir },
# # If no matching username was found, use default working directory
# { work.dir <- default.work.dir })
# setwd(work.dir)
#
# #Create 'figures' and 'objects' subdirectories if they don't exist
# setwd(work.dir)
dir.create("figures", showWarnings = FALSE)
dir.create("objects", showWarnings = FALSE)
# =========================================
# Dataset characteristics
# =========================================
# Both hour.csv and day.csv have the following fields, except hr which is not available in day.csv
#
# - instant: record index
# - dteday : date
# - season : season (1:springer, 2:summer, 3:fall, 4:winter)
# - yr : year (0: 2011, 1:2012)
# - mnth : month ( 1 to 12)
# - hr : hour (0 to 23)
# - holiday : weather day is holiday or not (extracted from http://dchr.dc.gov/page/holiday-schedule)
# - weekday : day of the week
# - workingday : if day is neither weekend nor holiday is 1, otherwise is 0.
# + weathersit :
# - 1: Clear, Few clouds, Partly cloudy, Partly cloudy
# - 2: Mist + Cloudy, Mist + Broken clouds, Mist + Few clouds, Mist
# - 3: Light Snow, Light Rain + Thunderstorm + Scattered clouds, Light Rain + Scattered clouds
# - 4: Heavy Rain + Ice Pallets + Thunderstorm + Mist, Snow + Fog
# - temp : Normalized temperature in Celsius. The values are divided to 41 (max)
# - atemp: Normalized feeling temperature in Celsius. The values are divided to 50 (max)
# - hum: Normalized humidity. The values are divided to 100 (max)
# - windspeed: Normalized wind speed. The values are divided to 67 (max)
# - casual: count of casual users
# - registered: count of registered users
# - cnt: count of total rental bikes including both casual and registered
# Helper function to read and convert a date to POSIXlt type
as.myDate <- function(date)
{
strptime(paste(date, time), format="%m/%d/%y", tz="UTC")
}
setClass("myDate")
setAs("character", "myDate", function (from) strptime(from, "%Y-%m-%d", tz="UTC"))
### Read the Bike Sharing Dataset
hourly.filename <- "data/hour.csv"
bike.hourly.raw <-read.table(hourly.filename, header=TRUE, sep=",",
colClasses = c("integer",
"myDate",
"integer",
"integer",
"integer",
"integer",
"integer",
"integer",
"integer",
"integer",
"numeric",
"numeric",
"numeric",
"numeric",
"integer",
"integer",
"integer"))
### Validate that data was loaded correctly
dim(bike.hourly.raw) # Print dimensions
bike.hourly.raw[1:5,] # Print first 5 rows...
tail(bike.hourly.raw, 5) # Print last 5 rows...
### Read the Bike Sharing Dataset
daily.filename <- "data/day.csv"
bike.daily.raw <-read.table(daily.filename, header=TRUE, sep=",",
colClasses = c("integer",
"myDate",
"integer",
"integer",
"integer",
"integer",
"integer",
"integer",
"integer",
"numeric",
"numeric",
"numeric",
"numeric",
"integer",
"integer",
"integer"))
### Validate that data was loaded correctly
dim(bike.daily.raw) # Print dimensions
bike.daily.raw[1:5,] # Print first 5 rows...
tail(bike.daily.raw, 5) # Print last 5 rows...
#############################################
# Data pre-processing
#############################################
levels.binary <- as.factor(c(0, 1))
bike.hourly <- with(bike.hourly.raw,
data.frame(instant=instant,
date=dteday,
datetime=dteday + hr*3600,
season=factor(season, levels=c(1,2,3,4),
labels=c("spring","summer","fall","winter")),
yr=factor(yr, levels=c(0,1), labels=c("2011","2012")),
mnth=factor(mnth, levels=1:12,
labels=c("Jan","Feb","Mar","Apr",
"May","Jun","Jul","Aug",
"Sep","Oct","Nov","Dec")),
hr=factor(hr, levels=0:23, labels=as.character(0:23)),
holiday=factor(holiday, levels=levels.binary),
weekday=factor(weekday, levels=0:6,
labels=c("Sun","Mon","Tue","Wed","Thur","Fri","Sat")),
workingday=factor(workingday, levels=levels.binary),
weathersit=factor(weathersit, levels=c(1,2,3,4),
labels=c("clear","misty","rainy","stormy")),
atemp=atemp,
temp=temp,
hum=hum,
windspeed=windspeed,
casual=casual,
registered=registered,
cnt=cnt))
bike.daily <- with(bike.daily.raw,
data.frame(instant=instant,
date=dteday,
season=factor(season, levels=c(1,2,3,4),
labels=c("spring","summer","fall","winter")),
yr=factor(yr, levels=c(0,1), labels=c("2011","2012")),
mnth=factor(mnth, levels=1:12,
labels=c("Jan","Feb","Mar","Apr",
"May","Jun","Jul","Aug",
"Sep","Oct","Nov","Dec")),
holiday=factor(holiday, levels=levels.binary),
weekday=factor(weekday, levels=0:6,
labels=c("Sun","Mon","Tue","Wed","Thur","Fri","Sat")),
workingday=factor(workingday, levels=levels.binary),
weathersit=factor(weathersit, levels=c(1,2,3,4),
labels=c("clear","misty","rainy","stormy")),
atemp=atemp,
temp=temp,
hum=hum,
windspeed=windspeed,
casual=casual,
registered=registered,
cnt=cnt))
###############################################################
# Data-preprocessing II
# - Convert categorical variables into binary variables
# (Split any categorical variable with more than 2 classes
# into multiple binary variables)
# - Also, we transform "hr" into a categorical variable first.
###############################################################
bike.hourly.tmp <- bike.hourly
bike.hourly.binarized <- with(bike.hourly.tmp,
cbind(data.frame(instant=instant,
date=date,
datetime=datetime),
model.matrix(~ season + 0),
model.matrix(~ yr + 0),
model.matrix(~ mnth + 0),
model.matrix(~ hr + 0),
data.frame(holiday=as.numeric(levels(holiday))[holiday]),
model.matrix(~ weekday + 0),
data.frame(workingday=as.numeric(levels(workingday))[workingday]),
model.matrix(~ weathersit + 0),
data.frame(temp=temp,
atemp=atemp,
hum=hum,
windspeed=windspeed,
casual=casual,
registered=registered,
cnt=cnt)))
|
03beb18daa8bb85189a8b9b87caf318d9defe980 | fdab4f0f411b09444cbc727d7116997a21233de2 | /man/tsort.Rd | 25716ad76a5a7e8fc58b28fefc7f57510504a15e | [
"BSL-1.0"
] | permissive | Bioconductor/RBGL | 548f9313243faa7e9e3fc433ddb57cb84b62e89b | 9d7bde9cbc9db5651cdd018d26abf9acddfddb04 | refs/heads/devel | 2023-05-28T08:40:00.413700 | 2023-05-15T15:22:58 | 2023-05-15T15:22:58 | 102,150,272 | 1 | 6 | NOASSERTION | 2023-04-13T19:34:29 | 2017-09-01T20:23:47 | C++ | UTF-8 | R | false | false | 1,312 | rd | tsort.Rd | \name{tsort}
\alias{tsort}
%- Also NEED an `\alias' for EACH other topic documented here.
\title{ topological sort of vertices of a digraph }
\description{ returns vector of zero-based indices of vertices
of a DAG in topological sort order }
\usage{
tsort(x) # now x assumed to be Bioconductor graph graphNEL
}
%- maybe also `usage' for other objects documented here.
\arguments{
\item{x}{ instance of class graphNEL from Bioconductor graph class}
}
\details{
calls to the topological\_sort algorithm of BGL. will check in BGL
whether the input is a DAG and return a vector of zeroes (of length
length(nodes(x))) if it is not. Thus this function can be used to check for
cycles in a digraph.
}
\value{
A character vector of vertices in the topological sort sequence.
}
\references{
Boost Graph Library ( www.boost.org/libs/graph/doc/index.html )
The Boost Graph Library: User Guide and Reference Manual;
by Jeremy G. Siek, Lie-Quan Lee, and Andrew Lumsdaine;
(Addison-Wesley, Pearson Education Inc., 2002), xxiv+321pp.
ISBN 0-201-72914-8
}
\author{ VJ Carey <stvjc@channing.harvard.edu> }
\examples{
data(FileDep)
tsind <- tsort(FileDep)
tsind
FD2 <- FileDep
# now introduce a cycle
FD2 <- addEdge("bar_o", "dax_h", FD2, 1)
tsort(FD2)
}
\keyword{ graphs }% at least one, from doc/KEYWORDS
|
e3509d02cb4cac11c52a40e40e00fb098d7b6b3a | 447b1868dcf993df08a88ca7e09bc1abcec0739e | /kaggle/sberbank/code/columns.R | d9b9d37e36b9b92739777a8f8c66590cf0f1f4b8 | [] | no_license | rareitmeyer/data_science | 5469d51800115748e8c3e6573c05a43897dac47f | a986e7fa4f86d0981692ffb7c1c386431cd717ac | refs/heads/master | 2020-05-31T21:59:33.497279 | 2017-07-30T21:02:55 | 2017-07-30T21:02:55 | 94,050,327 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,651 | r | columns.R | # Column groupings
predict_col = 'price_doc'
id_col = 'id'
raw_property_features <- c(
'full_sq',
'life_sq',
'floor',
'max_floor',
'material',
'build_year',
'num_room',
'kitch_sq',
'state',
'product_type'
)
raw_distance_features <- c(
'ID_metro',
'metro_min_avto',
'metro_km_avto',
'metro_min_walk',
'metro_km_walk',
'kindergarten_km',
'school_km',
'park_km',
'green_zone_km',
'industrial_km',
'water_treatment_km',
'cemetery_km',
'incineration_km',
'railroad_station_walk_km',
'railroad_station_walk_min',
'ID_railroad_station_walk',
'railroad_station_avto_km',
'railroad_station_avto_min',
'ID_railroad_station_avto',
'public_transport_station_km',
'public_transport_station_min_walk',
'water_km',
'water_1line',
'mkad_km',
'ttk_km',
'sadovoe_km',
'bulvar_ring_km',
'kremlin_km',
'big_road1_km',
'ID_big_road1',
'big_road1_1line',
'big_road2_km',
'ID_big_road2',
'railroad_km',
'railroad_1line',
'zd_vokzaly_avto_km',
'ID_railroad_terminal',
'bus_terminal_avto_km',
'ID_bus_terminal',
'oil_chemistry_km',
'nuclear_reactor_km',
'radiation_km',
'power_transmission_line_km',
'thermal_power_plant_km',
'ts_km',
'big_market_km',
'market_shop_km',
'fitness_km',
'swim_pool_km',
'ice_rink_km',
'stadium_km',
'basketball_km',
'hospice_morgue_km',
'detention_facility_km',
'public_healthcare_km',
'university_km',
'workplaces_km',
'shopping_centers_km',
'office_km',
'additional_education_km',
'preschool_km',
'big_church_km',
'church_synagogue_km',
'mosque_km',
'theater_km',
'museum_km',
'exhibition_km',
'catering_km'
)
location_id_features <- c('sub_area', raw_distance_features)
raw_raion_features <- c(
'area_m',
'raion_popul',
'green_zone_part',
'indust_part',
'children_preschool',
'preschool_quota',
'preschool_education_centers_raion',
'children_school',
'school_quota',
'school_education_centers_raion',
'school_education_centers_top_20_raion',
'hospital_beds_raion',
'healthcare_centers_raion',
'university_top_20_raion',
'sport_objects_raion',
'additional_education_raion',
'culture_objects_top_25',
'culture_objects_top_25_raion',
'shopping_centers_raion',
'office_raion',
'thermal_power_plant_raion',
'incineration_raion',
'oil_chemistry_raion',
'radiation_raion',
'railroad_terminal_raion',
'big_market_raion',
'nuclear_reactor_raion',
'detention_facility_raion',
'full_all',
'male_f',
'female_f',
'young_all',
'young_male',
'young_female',
'work_all',
'work_male',
'work_female',
'ekder_all',
'ekder_male',
'ekder_female',
'X0_6_all',
'X0_6_male',
'X0_6_female',
'X7_14_all',
'X7_14_male',
'X7_14_female',
'X0_17_all',
'X0_17_male',
'X0_17_female',
'X16_29_all',
'X16_29_male',
'X16_29_female',
'X0_13_all',
'X0_13_male',
'X0_13_female',
'raion_build_count_with_material_info',
'build_count_block',
'build_count_wood',
'build_count_frame',
'build_count_brick',
'build_count_monolith',
'build_count_panel',
'build_count_foam',
'build_count_slag',
'build_count_mix',
'raion_build_count_with_builddate_info',
'build_count_before_1920',
'build_count_1921.1945',
'build_count_1946.1970',
'build_count_1971.1995',
'build_count_after_1995'
)
raw_raion_id_features <- c('sub_area', raw_raion_features)
raw_neighborhood_features <- c(
'ecology',
'green_part_500',
'prom_part_500',
'office_count_500',
'office_sqm_500',
'trc_count_500',
'trc_sqm_500',
'cafe_count_500',
'cafe_sum_500_min_price_avg',
'cafe_sum_500_max_price_avg',
'cafe_avg_price_500',
'cafe_count_500_na_price',
'cafe_count_500_price_500',
'cafe_count_500_price_1000',
'cafe_count_500_price_1500',
'cafe_count_500_price_2500',
'cafe_count_500_price_4000',
'cafe_count_500_price_high',
'big_church_count_500',
'church_count_500',
'mosque_count_500',
'leisure_count_500',
'sport_count_500',
'market_count_500',
'green_part_1000',
'prom_part_1000',
'office_count_1000',
'office_sqm_1000',
'trc_count_1000',
'trc_sqm_1000',
'cafe_count_1000',
'cafe_sum_1000_min_price_avg',
'cafe_sum_1000_max_price_avg',
'cafe_avg_price_1000',
'cafe_count_1000_na_price',
'cafe_count_1000_price_500',
'cafe_count_1000_price_1000',
'cafe_count_1000_price_1500',
'cafe_count_1000_price_2500',
'cafe_count_1000_price_4000',
'cafe_count_1000_price_high',
'big_church_count_1000',
'church_count_1000',
'mosque_count_1000',
'leisure_count_1000',
'sport_count_1000',
'market_count_1000',
'green_part_1500',
'prom_part_1500',
'office_count_1500',
'office_sqm_1500',
'trc_count_1500',
'trc_sqm_1500',
'cafe_count_1500',
'cafe_sum_1500_min_price_avg',
'cafe_sum_1500_max_price_avg',
'cafe_avg_price_1500',
'cafe_count_1500_na_price',
'cafe_count_1500_price_500',
'cafe_count_1500_price_1000',
'cafe_count_1500_price_1500',
'cafe_count_1500_price_2500',
'cafe_count_1500_price_4000',
'cafe_count_1500_price_high',
'big_church_count_1500',
'church_count_1500',
'mosque_count_1500',
'leisure_count_1500',
'sport_count_1500',
'market_count_1500',
'green_part_2000',
'prom_part_2000',
'office_count_2000',
'office_sqm_2000',
'trc_count_2000',
'trc_sqm_2000',
'cafe_count_2000',
'cafe_sum_2000_min_price_avg',
'cafe_sum_2000_max_price_avg',
'cafe_avg_price_2000',
'cafe_count_2000_na_price',
'cafe_count_2000_price_500',
'cafe_count_2000_price_1000',
'cafe_count_2000_price_1500',
'cafe_count_2000_price_2500',
'cafe_count_2000_price_4000',
'cafe_count_2000_price_high',
'big_church_count_2000',
'church_count_2000',
'mosque_count_2000',
'leisure_count_2000',
'sport_count_2000',
'market_count_2000',
'green_part_3000',
'prom_part_3000',
'office_count_3000',
'office_sqm_3000',
'trc_count_3000',
'trc_sqm_3000',
'cafe_count_3000',
'cafe_sum_3000_min_price_avg',
'cafe_sum_3000_max_price_avg',
'cafe_avg_price_3000',
'cafe_count_3000_na_price',
'cafe_count_3000_price_500',
'cafe_count_3000_price_1000',
'cafe_count_3000_price_1500',
'cafe_count_3000_price_2500',
'cafe_count_3000_price_4000',
'cafe_count_3000_price_high',
'big_church_count_3000',
'church_count_3000',
'mosque_count_3000',
'leisure_count_3000',
'sport_count_3000',
'market_count_3000',
'green_part_5000',
'prom_part_5000',
'office_count_5000',
'office_sqm_5000',
'trc_count_5000',
'trc_sqm_5000',
'cafe_count_5000',
'cafe_sum_5000_min_price_avg',
'cafe_sum_5000_max_price_avg',
'cafe_avg_price_5000',
'cafe_count_5000_na_price',
'cafe_count_5000_price_500',
'cafe_count_5000_price_1000',
'cafe_count_5000_price_1500',
'cafe_count_5000_price_2500',
'cafe_count_5000_price_4000',
'cafe_count_5000_price_high',
'big_church_count_5000',
'church_count_5000',
'mosque_count_5000',
'leisure_count_5000',
'sport_count_5000',
'market_count_5000'
)
neighborhood_id_features <- raw_raion_id_features
|
a4fda22696394df1bd340f3f85680e59c0c9c5fc | 933bd62bb736226b5d8bc29d93ca6b9be2ffe4eb | /man/PlotSaturationEstimates.Rd | 610516eb30decbd8e0f947e542efe3fe4454e03f | [] | no_license | kharchenkolab/dropestr | c2c983089961cc00bb8fa7c62e4a0f9d889e9d1c | 23d6caa8f927493bcfea2e1ac98d13fa3b21b5dc | refs/heads/master | 2022-12-13T19:50:26.715802 | 2020-09-17T02:42:02 | 2020-09-17T02:42:12 | 292,871,624 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 430 | rd | PlotSaturationEstimates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quality_control.R
\name{PlotSaturationEstimates}
\alias{PlotSaturationEstimates}
\title{Plot estimated library saturation}
\usage{
PlotSaturationEstimates(preseq.estimates)
}
\arguments{
\item{preseq.estimates}{named list of results of EstimateSaturation calls}
}
\value{
ggplot object with the plot
}
\description{
Plot estimated library saturation
}
|
55918c2527b477c63bea843e66d9e59ff374abcc | 20b1b50f86dd29003c560c2e375086946e23f19a | /gwas/hwe_replace_filters.R | f49aaf65881d959c563aee14139c2c46f0cdf6a9 | [] | no_license | ericksonp/diapause-scripts-clean | cfc3ed0433114ee756019105b364f40e03b2419d | c93a052e9d63b9f7c60f7c18c1ad004385b59351 | refs/heads/master | 2021-07-09T07:16:03.326322 | 2020-07-31T17:00:37 | 2020-07-31T17:00:37 | 170,798,493 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,526 | r | hwe_replace_filters.R | library(GWASTools)
library(data.table)
library(SNPRelate)
library(gdsfmt)
library(doMC)
registerDoMC(20)
library(foreach)
##this script generates a master data table with informtion about each snp that is used for filtering and for permutation analysis, including:
# of times the snp was heterozygous in a founder
# of times the snp was replaced due to being missing in parent or in a "bad path" (haplotype path < 1 Mb)
#hardy weinberg equilibrium of each snp in cage A, cage B, and combined
#fst between cage A and cage B
# population allele frequency
#calculate the number of heterozygous parental sites in each individual
bad.paths=fread("/scratch/pae3g/final_reconstruction2/bad_paths.csv")
import_data_het<-function(fn){
sample=strsplit(fn,split="[.]")[[1]][1]
if (!file.size(fn) == 0) {
#read in data and rename columns
dat<-fread(paste('zcat ', fn, sep=""))
names(dat)=c("chr", "position", "h1", "h2")
dat[,sample.id:=sample]
print(sample)
setkey(dat, chr, position)
bad.paths.dat=bad.paths[sample.id==sample]
#for each bad path segment, mask the appropriate haplotype
foreach(bad.chr=bad.paths.dat$chr, bad.start=bad.paths.dat$cons.start, bad.stop=bad.paths.dat$cons.stop, bad.hap=bad.paths.dat$haplotype)%do%{
if(bad.hap=="par1"){
dat[chr==bad.chr & position>=bad.start & position<=bad.stop, h1:="."]
}
if(bad.hap=="par2") {
dat[chr==bad.chr & position>=bad.start & position<=bad.stop, h2:="."]
}
}
return(rbind(dat[h1=="0/1", .(chr, position, sample.id)], dat[h2=="0/1", .(chr, position, sample.id)]))
}
}
swarm.het<-foreach(swarm=c("A", "B"))%do%{
wd=paste("/scratch/pae3g/genome-reconstruction/", swarm, sep="")
setwd(wd)
file_list <- list.files(pattern = "PAE.*.14.vcf.gz$")
obj <- foreach(fn=file_list)%dopar%{
import_data_het(fn)
}
obj=rbindlist(obj)
return(obj)
}
swarm.het=rbindlist(swarm.het)
swarm.het.sum=swarm.het[,.(n=.N), .(chr, position)][order(chr, position)]
swarm.het.samp=swarm.het[,.(n=.N), .(chr, position, sample.id)][order(chr, position)]
write.table(swarm.het.sum, "/scratch/pae3g/final_reconstruction2/parent_het_per_snp.txt", sep="\t", quote=F, row.names=F)
#calculate HWE for every SNP in swarm A, swarm B, and both
geno <- snpgdsOpen("/scratch/pae3g/genome-reconstruction/final2_draw1_replaced.vcf.gds", allow.fork=TRUE, readonly = FALSE)
phenos <- fread("/scratch/pae3g/phenos/phenos_062018.txt")
setkey(phenos, id)
genotyped_samples<- read.gdsn(index.gdsn(geno, "sample.id"))
#add in metadata to gds to file
metadata=fread("/scratch/pae3g/hybrid/PAE_AOB_library_metadata_all.txt", header=FALSE)
names(metadata)=c("sample.id", "generation", "library", "swarm")
metadata=metadata[sample.id%in%genotyped_samples]
metadata=metadata[match(genotyped_samples, metadata$sample.id),]
samp.annot <- metadata[,.(generation, swarm)]
add.gdsn(geno, "sample.annot", samp.annot)
#make list of A and B samples
A=intersect(phenos[swarm=="A", id],genotyped_samples)
B=intersect(phenos[swarm=="B", id],genotyped_samples)
#calculate HWE for each swarm and combine with snp information
a.hwe=snpgdsHWE(geno, sample.id=A, snp.id=NULL, with.id=T)
a.info=snpgdsSNPList(geno, sample.id=A)
a.dat=data.table(snp.id=a.hwe$snp.id,id=a.info$rs.id,chr=a.info$chromosome, pos=a.info$position, a.hwe=a.hwe$pvalue, a.freq=a.info$afreq)
b.hwe=snpgdsHWE(geno, sample.id=B, snp.id=NULL, with.id=T)
b.info=snpgdsSNPList(geno, sample.id=B)
b.dat=data.table(snp.id=b.hwe$snp.id, id=b.info$rs.id,chr=b.info$chromosome, pos=b.info$position, b.hwe=b.hwe$pvalue, b.freq=b.info$afreq)
hwe=snpgdsHWE(geno, snp.id=NULL, with.id=T)
info=snpgdsSNPList(geno )
dat=data.table(snp.id=hwe$snp.id, id=info$rs.id,chr=info$chromosome, pos=info$position, hwe=hwe$pvalue, freq=info$afreq)
#merge all together
hwe.filter=merge(a.dat, merge(dat, b.dat, by=c("snp.id", "id", "chr", "pos")), by=c("snp.id", "id", "chr", "pos"))
#load in genotype replacement data from the logs produced by replace_missing_data_vcf.sh
a.replace=fread("/scratch/pae3g/genome-reconstruction/replace_A_1_better.log")
a.replace=a.replace[2:.N] #removes #CHROM row
names(a.replace)=c("chr", "pos", "nRef.a", "nAlt.a", "mostLikelyGeno.a", "n.a", "total.a")
b.replace=fread("/scratch/pae3g/genome-reconstruction/replace_B_1_better.log")
b.replace=b.replace[2:.N, 3:7] #removes #CHROM row
names(b.replace)=c("nRef.b", "nAlt.b", "mostLikelyGeno.b", "n.b", "total.b")
#merge with other data
replace=cbind(a.replace, b.replace)
replace[,pos:=as.integer(pos)]
hwe.filter=merge(hwe.filter, replace, by=c("chr", "pos"))
hwe.filter=hwe.filter[chr%in%(c("2L", "2R", "3L", "3R", "X"))]
# Fst estimation between cages A and B
group <- as.factor(read.gdsn(index.gdsn(geno, "sample.annot/swarm")))
v <- snpgdsFst(geno, population=group, method="W&C84",autosome.only=FALSE, with.id=T, remove.monosnp=F)
fst=data.table(snp.id=v$snp.id, fst=v$FstSNP)
#merge fst with other data
hwe.filter=merge(hwe.filter, fst, by="snp.id")
#merge in heterozygosity data calculated aboave
het=fread("/scratch/pae3g/final_reconstruction2/parent_het_per_snp.txt")
setnames(het, "position", "pos")
setnames(het, "n", "n.het")
hwe.filter=merge(hwe.filter, het, by=c("chr", "pos"), all=T)
hwe.filter[is.na(n.het), n.het:=0]
#calculate the total number of individuals that had imputed (heterozygous parents)
hwe.filter[,n.imputed:=n.a+n.b]
#calculate the total number of repacedsites
hwe.filter[,total.replaced:=n.a+n.b+n.het]
#rename as "dat"
dat<-hwe.filter
#make a filter column
dat[,qc_filter:="PASS"]
dat[,map_filter:="PASS"]
#filter snps with more than 10% of genotypes replaced
dat[total.replaced>566, qc_filter:="FAIL"]
#filter things out of hwe in one or both cages
dat[a.hwe<10^-20, qc_filter:="FAIL"] #12800
dat[b.hwe<10^-20, qc_filter:="FAIL"] #13000
dat[hwe<10^-20, qc_filter:="FAIL"] #16000
#filter things with high fst between pops
dat[fst>.2, qc_filter:="FAIL"] #13000
#filter things that are monomorphic within a cage
dat[a.freq==0|a.freq==1|b.freq==0|b.freq==1, qc_filter:="FAIL"] #~1 million sites
#leaves ~1.2 million snps
#for mapping don't have to be as stringent; can keep things that are monomorphic in a cage
dat[total.replaced>566, map_filter:="FAIL"]
#filter things out of hwe in one or both cages
dat[a.hwe<10^-20, map_filter:="FAIL"] #12800
dat[b.hwe<10^-20, map_filter:="FAIL"] #13000
dat[hwe<10^-20, map_filter:="FAIL"] #16000
dat[fst>.2, map_filter:="FAIL"]
dat[freq==0, map_filter:="FAIL"]
#this should result in a good snpset for PCA and gwas to save computational time
write.table(dat, "/scratch/pae3g/final_reconstruction2/hwe_missing_maf_filters.txt", sep="\t", quote=F, row.names=F)
#add in bin information and create new filter for adaptive permutations
library(data.table)
library(Hmisc)
dat<-fread("/scratch/pae3g/final_reconstruction2/hwe_missing_maf_filters.txt")
dat[,snpID:=snp.id]
dat[,MAF:=ifelse(freq>.5, 1-freq, freq)]
dat[,mis.bin:=cut2(total.replaced, cuts=seq(0,600, by=100))]
dat[,maf.bin:=cut2(MAF, cuts=seq(0, .5, by=.05))]
bins<-dat[map_filter=="PASS", .(n.in.bin=.N), .(maf.bin, mis.bin)]
dat<-merge(dat, bins, all=T, by=c("maf.bin", "mis.bin"))
dat[, adaptive_perm_filter:=map_filter]
dat[n.in.bin>10000, adaptive_perm_filter:="FAIL"]
write.table(dat, "/scratch/pae3g/final_reconstruction2/hwe_missing_maf_filters.txt", sep="\t", quote=F, row.names=F)
|
fa2a5e13e52f2bb2f5b87152dfc5514e28ac3dbc | 160622f50fc2fe9a6aaa3095849f7a8bd2caa496 | /R/bhit_map.R | 4b0ff6406ffc227c444c48e3ca8bcc48e1370c56 | [] | no_license | jayhesselberth/platetools | 53fd0d16deca84ec6efd0c1f838d7a9fed835a1b | 617a33fc4a3482b85fc1fd8b38dcc82a53a10176 | refs/heads/master | 2020-03-22T01:45:56.557163 | 2018-06-25T13:36:13 | 2018-06-25T13:36:13 | 139,328,253 | 1 | 0 | null | 2018-07-01T12:42:49 | 2018-07-01T12:42:48 | null | UTF-8 | R | false | false | 4,595 | r | bhit_map.R | #' Platemap to identify 'hits' following a B-score normalisation
#'
#' Produces a platemap with colours indicating wells above or below selected threshold
#' after normalising for systematic plate effects via B-score smooth. The threshold is
#' definined calculated from a z-score, i.e plus or minus standard deviations from the
#' plate mean.
#'
#' @param data Vector of numerical values
#' @param well Vector of well identifiers, e.g "A01"
#' @param plate Number of wells in whole plate (96, 384 or 1536)
#' @param threshold Standard deviations from the plate average to indicate a hit.
#' default is set to +/- 2 SD.
#' @param palette RColorBrewer palette
#'
#' @return ggplot plot
#'
#' @import ggplot2
#' @importFrom RColorBrewer brewer.pal
#'
#' @export
#'
#' @examples
#' df <- data.frame(vals = rnorm(384),
#' well = num_to_well(1:384, plate = 384))
#'
#' bhit_map(data = df$vals,
#' well = df$well,
#' plate = 384,
#' threshold = 3)
bhit_map <- function(data, well,
plate = 96,
threshold = 2,
palette = "Spectral"){
# need to transform columns of wellID and data into
# matrix corresponding to well positions:
platemap <- plate_map(data, well)
# ensure data is ordered properly before passing to matrix()
platemap <- platemap[order(platemap$Row, platemap$Column), ]
check_plate_input(well, plate)
if (plate == 96L) {
# transform into 12*8 matrix (96-well plate)
# fills matrix in a row-wise fashion i.e, A01, A02 ...
mat_plate_map <- matrix(data,
nrow = 8,
ncol = 12,
byrow = TRUE)
} else if (plate == 384L) {
# transform into 24*16 matrix (384-well plate)
# fills matrix in a row-wise fashion, i.e A01, A02 ...
mat_plate_map <- matrix(data,
nrow = 16,
ncol = 24,
byrow = TRUE)
} else if (plate == 1536L) {
mat_plate_map <- matrix(data,
nrow = 32,
ncol = 24,
byrow = TRUE)
} else {
stop("Not a plate format.\nArgument 'plate' should be 96, 384 or 1536.",
call. = FALSE)
}
# median polish of the data
data_pol <- medpolish(mat_plate_map,
na.rm = TRUE,
trace.iter=FALSE)
# transpose of residual matrix (as counts in column-wise fashion)
# now well numbers correspond i.e t_out[12] = A12, t_out[13] = B01
t_out <- t(data_pol$residuals)
# 1:96 elements of residuals corresponding to 1:96 of num_to_well
# produce dataframe of two columns
df <- NULL
for (num in 1:length(t_out)) {
df$residual[num] <- t_out[num]
df$well[num] <- num_to_well(num, plate = plate)
}
df <- as.data.frame(cbind("well" = df$well, "residual" = df$residual))
# change residuals from factor to numeric
df$residual <- as.numeric(as.character(df$residual))
platemap$values <- scale(df$residual)[, ]
platemap$hit <- NA
# calculate whether values are beyond the threshold; defined as hit or null
for (row in 1:nrow(platemap)) {
if (platemap[row, 'values'] > threshold) {
platemap$hit[row] <- "hit"
} else if (platemap[row, 'values'] < (-1*threshold)) {
platemap$hit[row] <- "neg_hit"
} else {
platemap$hit[row] <- "null"
}
}
# change name of hit to values
# plt96 and plt384 are colouring points by value, in this case needs to be hit
platemap$actual_vales <- platemap$values
platemap$values <- platemap$hit
# RColorBrewerPallette
my_cols <- brewer.pal(3, palette)
my_colours <- c(hit = my_cols[1], neg_hit = my_cols[3], null = my_cols[2])
if (plate == 96){
# produce a 96-well plate map layout in ggplot
plt <- plt96(platemap) +
scale_fill_manual("hit", values = my_colours) +
theme_bw()
} else if (plate == 384) {
# produce a 384-well plate map layout in ggplot
plt <- plt384(platemap) +
scale_fill_manual("hit", values = my_colours) +
theme_bw()
} else if (plate == 1536L) {
plt <- plt1536(platemap) +
scale_fill_manual("hit", values = my_colours) +
theme_bw()
} else {
stop("Not a valid plate format. Either 96 or 384.", call. = FALSE)
}
return(plt)
}
|
72a66301bde4b1ea42c03a29bc2d8db60a4b8df3 | 6e4616b3a160a88e67eb53ad7d61c7f743c07da0 | /Commodity code update.R | 88bd62ce0c83139991fbfdc0231359b452b60b42 | [] | no_license | FoodStandardsAgency/TradeDataVis | df3d3da0e12b174fd7b50f0fd335b34c0b2307ce | e9b13ca3113ee6213403b71611eda77c6c823a4c | refs/heads/master | 2022-10-13T08:19:29.061012 | 2022-09-28T17:46:21 | 2022-09-28T17:46:21 | 156,387,739 | 2 | 1 | null | 2022-09-28T17:46:22 | 2018-11-06T13:27:23 | R | UTF-8 | R | false | false | 6,264 | r | Commodity code update.R | # SCRIPT START ################################################################
# LIBRARY IMPORT AND CONSTANTS ================================================
if(require("readr") == FALSE) {install.packages("readr")}
library("readr")
if(require("dplyr") == FALSE) {install.packages("dplyr")}
library("dplyr")
if(require("stringr") == FALSE) {install.packages("stringr")}
library("stringr")
if(require("RPostgreSQL") == FALSE) {install.packages("RPostgreSQL")}
library("RPostgreSQL")
if(require("readxl") == FALSE) {install.packages("readxl")}
library("readxl")
# make names db safe: no '.' or other illegal characters,
# all lower case and unique
dbSafeNames = function(names) {
names = gsub('[^a-z0-9]+','_',tolower(names))
names = make.names(names, unique=TRUE, allow_=TRUE)
names = gsub('.','_',names, fixed=TRUE)
names
}
library("RPostgreSQL")
library("readr")
library("rvest")
library("dplyr")
library("data.table")
library("tidyr")
library("tibble")
# Set the working directory.
setwd(
paste0(
"C:/Users/901538/OneDrive - Food Standards Agency/Documents/",
"Trade database update/Credentials Write"
)
)
# Connect to the database.
pg <- dbDriver("PostgreSQL")
db_env <- read_delim(".env", delim = "=", col_names = FALSE, trim_ws = TRUE)
trade_data <- dbConnect(pg, user = db_env[1, 2], password = db_env[2, 2],
host = db_env[3, 2], port = db_env[4, 2], dbname = db_env[5, 2])
# COMCODE TABLE BUILD =========================================================
# Read Data using readr and tidy as appropriate -------------------------------
CNURL <- "http://ec.europa.eu/eurostat/ramon/nomenclatures/index.cfm?TargetUrl=ACT_OTH_CLS_DLD&StrNom=CN_2022&StrFormat=CSV&StrLanguageCode=EN&IntKey=&IntLevel=&bExport="
CN <- read_csv2(CNURL)
CN <- tibble(CommodityCode = CN$Code...5,Parent = CN$Parent...6,Description = CN[[8]])
colnames(CN) <- c("CommodityCode", "Parent", "Description")
CN$CommodityCode <- gsub(" ", "", CN$CommodityCode)
CN$Parent <- gsub(" ", "", CN$Parent)
CN <- CN[is.na(CN$CommodityCode) == FALSE,]
# store indices for Section Characters (roman numerals)
Sections <- tibble(Section = CN$CommodityCode[grep("^.*(I|V|X).*$",CN$CommodityCode)],
Code = CN$CommodityCode[grep("^.*(I|V|X).*$",CN$CommodityCode)+1],
Description = CN$Description[grep("^.*(I|V|X).*$",CN$CommodityCode)])
control <- dbGetQuery(trade_data, "SELECT mk_comcode,mk_commodity_alpha_1 FROM public.control")
control <- tibble(commoditycode = control$mk_comcode, description = control$mk_commodity_alpha_1)
control <- arrange(control,commoditycode)
# Create a complete Codes and Parents column ----------------------------------
# Extracts numeric comcodes (IE: not section level).
codesonly <- CN$CommodityCode[grepl("^.*(I|V|X).*$",CN$CommodityCode) == FALSE]
codesonly <- unique(c(codesonly, control$commoditycode))
# Commodity Nomenclature codes contain a ragged hierarchy. This means that
# a child-parent relationship can span levels in the hierarchy. Therefore it
# isn't simply a case of chopping off the last two digits of a code gives the
# parent's code - it may not exist! The code below generates a parent through
# recursive removal of last two digits in the commodity code.
# Once all the parents can be found within the commodity code vector, create tibble.
parents <- substr(codesonly,1, nchar(codesonly)-2)
thisrecur <- parents
thisrecur <- vapply(thisrecur,function(x){
if (x %in% codesonly == FALSE){
x <- substr(x,1,nchar(x)-2)
} else {
return(x)
}
}, character(1), USE.NAMES = FALSE)
#names(thisrecur) = NULL # for some reason lastrecur becomes names in thisrecur...
parents <- thisrecur
codeandparent <- tibble(commoditycode = codesonly, parent = parents)
# Joining data ----------------------------------------------------------------
colnames(CN) <- dbSafeNames(colnames(CN))
colnames(codeandparent) <- dbSafeNames(colnames(codeandparent))
colnames(control) <- dbSafeNames(colnames(control))
# joins new parent vector to tibble, merges cols.
CN <- full_join(CN, codeandparent, by = "commoditycode")
CN$parent.z <- ifelse(is.na(CN$parent.x), CN$parent.y, CN$parent.x)
CN <- tibble(commoditycode = CN$commoditycode, parent = CN$parent.z, description = CN$description)
# joins descriptions from control file to tibble, merges cols
CN <- full_join(CN, control, by = "commoditycode")
CN <- arrange(CN, commoditycode)
CN$description.z <- ifelse(is.na(CN$description.x), CN$description.y, CN$description.x)
CN <- tibble(commoditycode = CN$commoditycode, parent = CN$parent, description = CN$description.z)
# remember the indices object? we can now put section numbers back in the right place
# using the data stored in that tibble! In reverse order so add_row doesn't get confused...
for (i in length(Sections$Section):1){
CN <- add_row(CN, commoditycode = Sections$Section[i],
parent = "",
description = Sections$Description[i],
.before = grep(paste("^", Sections$Code[i], "$", sep=""),CN$commoditycode))
}
CN <- CN[!duplicated(CN$commoditycode),]
#Join new list of commodity codes (CN) to old list of commodity codes (comcode)
comcode <- dbGetQuery(trade_data, "SELECT * FROM comcode")
newcomcode <- full_join(CN, comcode, by = "commoditycode")
newcomcode$parent.z <- ifelse(is.na(newcomcode$parent.x), newcomcode$parent.y, newcomcode$parent.x)
newcomcode$description.z <- ifelse(is.na(newcomcode$description.x), newcomcode$description.y, newcomcode$description.x)
newcomcode <- tibble(commoditycode = newcomcode$commoditycode, parent = newcomcode$parent.z, description = newcomcode$description.z)
# creates new table, adds to db -----------------------------------------------
dbWriteTable(trade_data, 'comcode', newcomcode, row.names=FALSE)
dbSendQuery(trade_data, "delete from comcode")
dbSendQuery(trade_data, "SET client_encoding = 'LATIN1'")
try(dbSendQuery(trade_data, "alter table comcode add constraint control_pkey PRIMARY KEY (CommodityCode)"))
dbWriteTable(trade_data,'comcode', newcomcode, row.names=FALSE, append = TRUE) |
d5f89257daa168238a96e92ef05e79406f0ffe9c | 3d08da73ff2cee676f5087eb86dc46ddc34a78d0 | /IGV_gistic.R | f17af8926904694d4616e5455592b0e4c088278d | [] | no_license | Jkang-alien/GISTIC2IGV | 748ec07b479b4ee177f436d81123cedcfcba9cbd | bcd681289dbc05e6e7942ab4d982c408ad4b7935 | refs/heads/master | 2021-01-10T16:33:28.331068 | 2015-11-27T04:20:07 | 2015-11-27T04:20:07 | 46,955,058 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,617 | r | IGV_gistic.R | #setwd('/home/jun/Programs/GISTIC2.0/melanoma/score.gistic/')
filenames <- list.files(path = ".", pattern = '^score_*', all.files = FALSE,
full.names = FALSE, recursive = FALSE,
ignore.case = FALSE, include.dirs = FALSE, no.. = FALSE)
for (i in filenames){
a <- read.delim(i)
index = data.frame(Type = rep('Amp',2), Chromosome = rep(23,2), Start = c(10000001, 30000001),
End = c(30000001, 40000001), X.log10.q.value. = rep(0,2), G.score = c(2,4),
average.amplitude = rep(0,2), frequency = rep(0,2))
b <- rbind.data.frame(a, index)
c <- b[,c(1:4,6,5,7,8)]
c$G.score <- round(2^round(c$G.score, digits = 1)-1,2)
write.table(c, paste('igv', i, sep = ''), quote = FALSE, sep = "\t",
row.names = FALSE, append = FALSE,
col.names = c('Type', 'Chromosome', 'Start',
'End', 'G-score', '-log10(q-value)', 'Average-amplitude', 'frequency')
)
}
braf <- read.delim('scores_braf.gistic')
index = data.frame(Type = rep('Amp',2), Chromosome = rep(23,2), Start = c(1000000, 3000000),
End = c(2000000, 4000000), X.log10.q.value. = rep(0,2), G.score = c(1,2),
average.amplitude = rep(0,2), frequency = rep(0,2))
braf <- rbind.data.frame(braf, index)
summary(index)
summary(braf)
colnames(braf)
braf <- braf[,c(1:4,6,5,7,8)]
head(braf)
tail(braf)
braf$G.score <- round(2^round(braf$G.score, digits = 1)-1,2)
write.table(braf, 'braf.gistic', quote = FALSE, sep = "\t ",
row.names = FALSE,
col.names = TRUE) |
aca513c0f0fd1e402aee09de2a01b52eb67eb43f | 8a6cbc8bc19c939e63ceefebe3a2b2e64d690967 | /catch/catch.R | e8f90e3b5f4d5541fffc12e599f836b81a5fc847 | [] | no_license | mburgass/noba | 2cb065838009ff401822b31f022948a04922c1c5 | b6844f42cafba69ee7a943664b08f52762c47a04 | refs/heads/master | 2021-01-19T00:33:01.210045 | 2019-04-10T13:58:56 | 2019-04-10T13:58:56 | 87,182,378 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,363 | r | catch.R | closeAllConnections()
library(tidyverse)
#Read base case
read.csv("catch/bc_catch.csv") %>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="fmsy1") %>% filter(Year<2016) -> bc_catch
#Read fisheries scenarios
read.csv("catch/fmsy2_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="fmsy2") %>% filter(Year<2016) -> fmsy2_catch
read.csv("catch/fmsy1_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="fmsy1")%>% filter(Year<2016) -> fmsy1_catch
read.csv("catch/fmsy05_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="fmsy05") %>% filter(Year<2016) -> fmsy05_catch
fmsy_catch<- rbind(fmsy05_catch, bc_catch, fmsy2_catch)
ggplot(fmsy_catch, aes(Year, catch)) +geom_line(aes(colour=species))+
facet_wrap(~scenario)
ggplot(fmsy_catch, aes(Year, catch)) + geom_line(aes(colour=scenario)) +
facet_wrap(~species, scales="free")
###Commercial fish biomass (filter hashed out for all)
read.csv("lpi_final/biomass_new/adjusted_species/fmsy2_biomass_adjusted2.csv", check.names = F)%>% gather("Year", "biomass", 3:153) %>%
filter(Year<2016, Year>1980) %>% filter(Binomial %in% c('REO', 'GRH', 'HAD', 'SAI', 'RED', 'NCO', 'CAP'))%>%
select(-ID) %>% mutate(scenario="fmsy2")-> fmsy2_biomass
read.csv("lpi_final/biomass_new/adjusted_species/fmsy1_biomass_adjusted2.csv", check.names = F)%>% gather("Year", "biomass", 3:153) %>%
filter(Year<2016, Year>1980) %>% filter(Binomial %in% c('REO', 'GRH', 'HAD', 'SAI', 'RED', 'NCO', 'CAP'))%>%
select(-ID)%>% mutate(scenario="fmsy1")-> fmsy1_biomass
read.csv("lpi_final/biomass_new/adjusted_species/fmsy0_biomass_adjusted2.csv", check.names = F)%>% gather("Year", "biomass", 3:153) %>%
filter(Year<2016, Year>1980) %>% filter(Binomial %in% c('REO', 'GRH', 'HAD', 'SAI', 'RED', 'NCO', 'CAP'))%>%
select(-ID)%>% mutate(scenario="fmsy0")-> fmsy0_biomass
rbind(fmsy0_biomass, fmsy1_biomass, fmsy2_biomass) %>% rename(species=Binomial) ->fmsy_biomass
fmsy_biomass$Year<- as.integer(fmsy_biomass$Year)
fmsy_biomass %>% spread(species, biomass) %>% rename("Capelin"="CAP", "Greenland Halibut"="GRH", "Haddock"="HAD",
"North-East Atlantic Cod"="NCO", "Redfish" = "RED", "Golden Redfish"="REO",
"Saithe"="SAI") %>%
gather(species, biomass, 3:9)->fmsy_biomass_new
ggplot(fmsy_biomass_new, aes(Year, biomass)) + geom_line(aes(colour=scenario), lwd=2) +
facet_wrap(~species, scales="free")+
xlab("Year") +
ylab("Biomass (tonnes)")+
scale_color_brewer(palette="Dark2")+
theme(axis.text=element_text(size=15))+theme(axis.title.x=element_text(size=25))+
theme(axis.title.y=element_text(size=25))+ theme(legend.text=element_text(size=25))+
theme(legend.title=element_text(size=25))+
theme(strip.text.x = element_text(size = 15))
##Read MPA Scenarios
read.csv("catch/mpa10_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="mpa10")%>% filter(Year<2101) -> mpa10_catch
read.csv("catch/mpa25_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="mpa25")%>% filter(Year<2101) -> mpa25_catch
read.csv("catch/mpa50_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="mpa50")%>% filter(Year<2101)-> mpa50_catch
mpa_catch<- rbind(bc_catch, mpa10_catch, mpa25_catch, mpa50_catch)
ggplot(mpa_catch, aes(Year, catch)) +geom_line(aes(colour=species))+
facet_wrap(~scenario)
ggplot(mpa_catch, aes(Year, catch)) +geom_line(aes(colour=scenario)) +
facet_wrap(~species, scales="free")
##Read Climate Change Scenarios
read.csv("catch/cc2_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="cc2")%>% filter(Year<2101) -> cc2_catch
read.csv("catch/cc3_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="cc3")%>% filter(Year<2101) -> cc3_catch
read.csv("catch/oa005_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="oa005")%>% filter(Year<2101)-> oa005_catch
read.csv("catch/oa01_catch.csv")%>% select(Year, REO, GRH, HAD, SAI, RED, NCO, CAP) %>%
gather("species", "catch", 2:8) %>% mutate(scenario="oa01")%>% filter(Year<2101)-> oa01_catch
cc_catch<- rbind(bc_catch, cc2_catch, cc3_catch, oa005_catch, oa01_catch)
ggplot(cc_catch, aes(Year, catch)) +geom_line(aes(colour=species))+
facet_wrap(~scenario)
ggplot(cc_catch, aes(Year, catch)) +geom_line(aes(colour=scenario)) +
facet_wrap(~species, scales="free")
all_catch<- rbind(bc_catch, fmsy05_catch, fmsy1_catch, fmsy2_catch, mpa10_catch, mpa25_catch, mpa50_catch, cc2_catch, cc3_catch, oa005_catch, oa01_catch)
ggplot(all_catch, aes(Year, catch)) +geom_line(aes(colour=species))+
facet_wrap(~scenario)
ggplot(all_catch, aes(Year, catch)) +geom_line(aes(colour=scenario)) +
facet_wrap(~species, scales="free")
|
ad693af0eeb74dcb4ae2bbcf0f35cd1d5f6598d6 | 6663bc0d1a8729761bc42f797db498b042b18599 | /inst/scripts/roc.R | f5a5a78a5f86ea0a022a3acd1ec6d22304f2bf35 | [] | no_license | niuneo/gbcode | 61cfb5c15f0e5aeb2958de58e65ce469f32d3898 | b69be8d3033c23e423a04f81aa2c85bff75f09f1 | refs/heads/master | 2020-05-24T11:18:23.401583 | 2019-03-19T10:02:01 | 2019-03-19T10:02:01 | 187,245,627 | 1 | 0 | null | 2019-05-17T16:03:27 | 2019-05-17T16:03:27 | null | UTF-8 | R | false | false | 498 | r | roc.R | ## "INFOF422 Statistical foundations of machine learning" course
## R package gbcode
## Author: G. Bontempi
mu.p<-2
sd.p<-1
mu.n<--1
sd.n<-1
TT<-seq(-10,10,by=.05)
FPR<-numeric(length(TT))
SE<-numeric(length(TT))
R<-100000
for (tt in 1:length(TT)){
thr<-TT[tt]
DNp<-rnorm(R,mu.p,sd.p)
DNn<-rnorm(R,mu.n,sd.n)
FN<-length(which(DNp<thr))
FP<-length(which(DNn>thr))
TN<-length(which(DNn<thr))
TP<-length(which(DNp>thr))
FPR[tt]<-FP/(FP+TN)
SE[tt]<-TP/(TP+FN)
}
plot(FPR,SE)
|
47c2c67703463e1de21c240b422b0f1d5898c56f | d1ae91e3d5dcce5056b0e3d2ca76bfb50e05de2e | /auctioneda.R | 5200dba89c4f0b5580c25884e71ed6b50977ecca | [] | no_license | noahleavi11/dontgetkicked_kaggle | ec3af8509d17fb108a536c69da4a438e35d2e11b | ab4e02c264431a88b15e1b173df126487ee8e499 | refs/heads/main | 2023-01-22T08:14:19.910260 | 2020-12-08T05:16:16 | 2020-12-08T05:16:16 | 319,533,477 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,623 | r | auctioneda.R | library(tidyverse)
library(stringi)
library(mltools)
library(caret)
library(data.table)
cars <- read_csv("training.csv")
cars.test <- read_csv("test.csv")
#####################################################################################################
# Exploring data frame
#####################################################################################################
cars %>% group_by(Make) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(Model) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(Size) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(Transmission) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(WheelTypeID) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(WheelType) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(PRIMEUNIT) %>% summarize(count=n()) %>% arrange(desc(count)) #drop; so many null values
cars %>% group_by(AUCGUART) %>% summarize(count=n()) %>% arrange(desc(count)) #also tons of null values
cars %>% group_by(BYRNO) %>% summarize(count=n()) %>% arrange(desc(count))
cars %>% group_by(IsOnlineSale) %>% summarize(count=n()) %>% arrange(desc(count)) # really skewed to 0
cars %>% group_by(Nationality) %>% summarize(count=n()) %>%
arrange(desc(count)) #maybe combine make and nationality for the smaller brands of the cars
#know I want to keep:
# (vehYear, Purchase date, auction, make, model?, transmission, wheel type, Odometer
# size, byrno, vehbcost, warranty, cleanprice*2)
####################################################################################################
#extract year and month from purchase date
topmakes <- cars %>% group_by(Make) %>% summarize(count=n()) %>%
arrange(desc(count)) %>% do(head(., n=10)) %>% pull(Make)
MMRAcquisitionAuctionAveragePrice <- cars$MMRAcquisitionAuctionAveragePrice
cars <- cars %>%
mutate(
IsBadBuy = if_else(IsBadBuy == 0, "yes","no"),
generalMake = if_else(Make %in% topmakes, Make, Nationality), # decreases number of makes by half
MMRAcqAucAvg = if_else(grepl("[[:digit:]]",
MMRAcquisitionAuctionAveragePrice),
as.numeric(MMRAcquisitionAuctionAveragePrice), 0), # these lines clean up avg mmr prices
MMRAcqRetAvg = if_else(grepl("[[:digit:]]",
MMRAcquisitionRetailAveragePrice),
as.numeric(MMRAcquisitionRetailAveragePrice), 0),
MMRCurrAucAvg = if_else(grepl("[[:digit:]]",
MMRCurrentAuctionAveragePrice),
as.numeric(MMRCurrentAuctionAveragePrice), 0),
MMRCurrRetAvg = if_else(grepl("[[:digit:]]",
MMRCurrentRetailAveragePrice),
as.numeric(MMRCurrentRetailAveragePrice), 0),
Transmission = if_else(Transmission %in% c("MANUAL","AUTO", "Manual"), toupper(Transmission), "MANUAL")
)
levels(cars$Transmission)
cars.cleaned <- cars %>%
mutate(
PurchDate = as.numeric(stri_extract_last_regex(PurchDate, "\\d{4}")),
IsBadBuy = as.factor(IsBadBuy),
Auction = as.factor(Auction),
Size = as.factor(Size),
generalMake = as.factor(generalMake),
Transmission = as.factor(Transmission)
) %>%
select(RefId, IsBadBuy, VehYear, PurchDate, Auction, generalMake, Transmission, VehOdo,
VehBCost, MMRAcqAucAvg, MMRAcqRetAvg, MMRCurrAucAvg, MMRCurrRetAvg,
WarrantyCost, BYRNO) #left out size because needs to be cleaned
cars.cleaned <- one_hot(as.data.table(cars.cleaned))
cars.test <- cars.test %>%
mutate(
generalMake = if_else(Make %in% topmakes, Make, Nationality), # decreases number of makes by half
MMRAcqAucAvg = if_else(grepl("[[:digit:]]",
MMRAcquisitionAuctionAveragePrice),
as.numeric(MMRAcquisitionAuctionAveragePrice), 0), # these lines clean up avg mmr prices
MMRAcqRetAvg = if_else(grepl("[[:digit:]]",
MMRAcquisitionRetailAveragePrice),
as.numeric(MMRAcquisitionRetailAveragePrice), 0),
MMRCurrAucAvg = if_else(grepl("[[:digit:]]",
MMRCurrentAuctionAveragePrice),
as.numeric(MMRCurrentAuctionAveragePrice), 0),
MMRCurrRetAvg = if_else(grepl("[[:digit:]]",
MMRCurrentRetailAveragePrice),
as.numeric(MMRCurrentRetailAveragePrice), 0),
Transmission = if_else(Transmission %in% c("MANUAL","AUTO", "Manual"), toupper(Transmission), "MANUAL")
)
cars.test.cleaned <- cars.test %>%
mutate(
PurchDate = as.numeric(stri_extract_last_regex(PurchDate, "\\d{4}")),
Auction = as.factor(Auction),
Size = as.factor(Size),
generalMake = as.factor(generalMake),
Transmission = as.factor(Transmission)
) %>%
select(RefId, VehYear, PurchDate, Auction, generalMake, Transmission, VehOdo,
VehBCost, MMRAcqAucAvg, MMRAcqRetAvg, MMRCurrAucAvg, MMRCurrRetAvg,
WarrantyCost, BYRNO) #left out size because needs to be cleaned
cars.test.cleaned <- one_hot(as.data.table(cars.test.cleaned))
write_csv(cars.test.cleaned, "pytest.csv")
write_csv(cars.cleaned, "pytrain.csv")
##################################################
############ Done Here ###########################
################################################## |
5259df1cf195072d4e8bbac4e0c2982fc11a24d5 | 2e9faf2f73476db35ee7a72359a8979b79a86605 | /man/permuteTrees.Rd | 380d961ba77c171e83e14a62e1ec856007fd320f | [] | no_license | Hackout3/saphy | 0450efe1014b5e1d850850709ce77adb6347e923 | d588c1788a29cd9f18441f2c9ffa095b2b099d4d | refs/heads/master | 2021-01-19T07:09:56.358549 | 2016-06-23T22:42:08 | 2016-06-23T22:42:08 | 61,653,942 | 4 | 6 | null | 2016-06-24T18:52:00 | 2016-06-21T17:36:19 | R | UTF-8 | R | false | true | 675 | rd | permuteTrees.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permuteTrees.R
\name{permuteTrees}
\alias{permuteTrees}
\title{Produces a list of permuted trees with the same tip and internal node times for bootstrapping purposes}
\usage{
permuteTrees(tree, reps = 100)
}
\arguments{
\item{tree}{a phylogenetic tree (as a \code{phylo} object)}
\item{reps}{the number of permuted trees}
}
\value{
A list of permuted trees
}
\description{
\code{permuteTrees} returns the length of a tip of a tree, given either (a) the tip name or
(b) the index of the tip (in the order of the tip labels in the tree).
}
\author{
OJ Watson (\email{o.watson15@imperial.ac.uk})
}
|
3cbba51ba6578907aba3b816e4ce5c958f6b9057 | c838cd05540859304880878dec4577095be84744 | /universal-scripts/03_load-data.R | 44890a018d0d4f878305145797b9e53c1d434ec0 | [
"MIT"
] | permissive | sbw78/etc | c0ac1e5fa831cded567ee50e2c0075654b776fa7 | caa3436caed558c9be53ec27e31663fbe14ecc2f | refs/heads/master | 2023-03-21T05:48:22.273344 | 2021-03-11T18:34:28 | 2021-03-11T18:42:01 | 261,046,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 444 | r | 03_load-data.R | # ____________________________________________________________________________
# Load Data ####
### . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . ..
### 1. Install packages and standard custom functions ####
source(here::here("command_files/01_install-packages.R"))
source(here::here("command_files/02_custom-functions.R"))
|
b5808fdbafae21f02756389be0025b9a965de25a | b81d6e1f27954ce188000b3e051d5b3f9de2fd24 | /TITANIC_2/first_preiction.R | c37743bae64fa31a3d89c978861fa9aa887c9cb8 | [] | no_license | tanviredu/R_project | 2ee5a14e83403528653eedb9806bd5c3fdf2b41d | 604a86102d75f0e6801c3c5090510c745097d965 | refs/heads/master | 2020-05-25T22:46:24.789005 | 2019-05-22T12:09:52 | 2019-05-22T12:09:52 | 188,021,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,001 | r | first_preiction.R | ##first set the directory
setwd('/home/vagrant/tensorflow_machine_learning/R_TITANIC_KAGGLE_COMP/')
Titanic.train <-read.csv('data/train.csv',stringsAsFactors = FALSE,header=TRUE )
Titanic.test <- read.csv('data/test.csv',stringsAsFactors = FALSE,header = TRUE)
## adding the train flag to it
Titanic.train$IsTrainset <-TRUE
Titanic.test$IsTrainset <-FALSE
View(Titanic.train)
View(Titanic.test)
Titanic.test$Survived <-NA
View(Titanic.test)
Titanic.full <-rbind(Titanic.train,Titanic.test)
View(Titanic.full)
table(Titanic.full$IsTrainset)
table(Titanic.full$Embarked)
which(Titanic.full$Embarked=='')
## 62 830 is not full
Titanic.full$Embarked[c(62,830)] <-'S'
table(is.na(Titanic.full$Age))
table(is.na(Titanic.full$Age))
age.median <- median(Titanic.full$Age,na.rm = TRUE)
## calculated the median not taking NA
age.median
Titanic.full$Age[is.na(Titanic.full$Age)] <-age.median
Titanic.full$Fare[is.na(Titanic.full$Fare)]
fare.median <- median(Titanic.full$Fare,na.rm = TRUE)
Titanic.full$Fare[is.na(Titanic.full$Fare)] <-fare.median
train <-Titanic.full[Titanic.full$IsTrainset==TRUE,]
test <-Titanic.full[Titanic.full$IsTrainset==FALSE,]
table(is.na(train))
table(is.na(test))
table(test$Survived)
##### convert all to the factor
Titanic.full$Pclass=as.factor(Titanic.full$Pclass)
Titanic.full$Sex=as.factor(Titanic.full$Sex)
Titanic.full$Embarked=as.factor(Titanic.full$Embarked)
train$Survived=as.factor(train$Survived)
library(randomForest)
Survived.equation <- "Survived ~Pclass+Sex+Age+SibSp"
survived.formula <-as.formula(survived.equation)
survived.formula
train_x=train[c('Pclass','Sex','Age','Sibsp','Fare','Embarked')]
train_y=train[c('Survived')]
fit1<-randomForest(x = train_x,y = train_y,importance = TRUE,ntree = 1000)
fit1
test_x=test[c('Pclass','Sex','Age','Sibsp','Fare','Embarked')]
test_y=test[c('Survived')]
final_predict<-predict(fit1,Titanic.test)
print(final_predict)
## thats wired the level is 2 and 4 its never done that before
|
1b49f2e86ca21246e9e626861157ee92c06f62cf | 889cb74fb30712b17516e34439e773eb880e18a2 | /plot3.r | 4e2994dd3aacfc34f3c64c7fc2426b54bc06c611 | [] | no_license | yetingxu/ExData_Plotting1 | 8ac677f75ef3e9e7a0aa4a60414dc10ac0dcce9f | 1bc63d14009d4e7e760f793c8ba570a4e574047c | refs/heads/master | 2020-04-27T10:01:15.086982 | 2019-03-09T00:53:21 | 2019-03-09T00:53:21 | 174,236,971 | 0 | 0 | null | 2019-03-06T23:30:55 | 2019-03-06T23:30:55 | null | UTF-8 | R | false | false | 759 | r | plot3.r | library(sqldf)
setwd("/Users/xuy089/Downloads")
data<- read.csv.sql("household_power_consumption.txt","select * from file where Date = '1/2/2007' or Date = '2/2/2007'", sep = ";")
data$Date<-as.Date(data$Date,format = "%d/%m/%Y")
data$date_time<-with(data,paste(Date,Time))
data$date_time<-strptime(data$date_time,format = "%Y-%m-%d %H:%M:%S",tz = "")
with(data,plot(date_time,Sub_metering_1,type="l",col="black",ylab = "Energy sub metering",xlab=""))
lines(data$date_time,data$Sub_metering_2,col="red")
lines(data$date_time,data$Sub_metering_3,col="blue")
legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lty=1)
dev.copy(png,file= "plot3.png",width = 480, height = 480, units = "px")
dev.off()
|
45d975ed89fec30ca29ed62d867dd1ad0f43866d | bd1f3cbc0ccdd145be1cc43119e728f180b85fd6 | /man/fit-distr.Rd | aa857c675299c6978e9a37e65390de2fb49dd5a9 | [] | no_license | fbreitwieser/isobar | 277118c929098690490d0e9f857dea90a42a0924 | 0779e37bf2760c3355c443749d4e7b8464fecb8a | refs/heads/master | 2020-12-29T02:20:51.661485 | 2016-05-17T17:20:00 | 2016-05-17T17:20:00 | 1,152,836 | 7 | 6 | null | 2017-03-17T15:27:24 | 2010-12-09T10:45:01 | R | UTF-8 | R | false | false | 1,283 | rd | fit-distr.Rd | \name{fit distributions}
\alias{fitCauchy}
\alias{fitTlsd}
\alias{fitNorm}
\alias{fitWeightedNorm}
\alias{fitNormalCauchyMixture}
\alias{fitGaussianMixture}
\title{Fit weighted and unweighted Cauchy and Normal distributions}
\description{
Functions to fit the probability density functions
on ratio distribution.
}
\usage{
fitCauchy(x)
fitNorm(x, portion = 0.75)
fitWeightedNorm(x, weights)
fitNormalCauchyMixture(x)
fitGaussianMixture(x, n = 500)
fitTlsd(x)
}
\arguments{
\item{x}{Ratios}
\item{weights}{Weights}
\item{portion}{Central portion of data to take for computation}
\item{n}{number of sampling steps}
}
\value{
\code{\link[distr]{Cauchy}},\code{\link[distr]{Norm}}
}
\author{
Florian P Breitwieser, Jacques Colinge.
}
\seealso{
\link{proteinRatios}
}
\examples{
library(distr)
data(ibspiked_set1)
data(noise.model.hcd)
# calculate protein ratios of Trypsin and CERU_HUMAN. Note: this is only
# for illustration purposes. For estimation of sample variability, data
# from all protein should be used
pr <- proteinRatios(ibspiked_set1,noise.model=noise.model.hcd,
cl=as.character(c(1,1,2,2)),combn.method="intraclass",protein=c("136429","P00450"))
# fit a Cauchy distribution
ratiodistr <- fitCauchy(pr$lratio)
plot(ratiodistr)
}
|
381f4d2cd0b2f129f92a3ef4fd6e9658b508343a | 2b51637e78bce532d8051b1bceb6ef133fabfcf8 | /L1 Normalization/Assignment2.R | ba981cc668b099eb29506cb4720be41d3c23d56b | [] | no_license | deepaksisodia/ML-Algorithms | c4c19f7b82a91252227073b1c2c4ab03265ea8dd | 5f12eb607891526b020ab2ca0e8616325dfa8330 | refs/heads/master | 2021-01-11T22:00:27.541134 | 2017-02-23T00:51:26 | 2017-02-23T00:51:26 | 78,896,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,347 | r | Assignment2.R | setwd("C:\\Users\\Deepak Kumar Sisodia\\Desktop\\ML Assignments\\Lasso Regression\\Assignment2")
getfeaturematrix=function(inp.data,feature.names){
inp.feature.matrix=as.matrix(inp.data[,feature.names])
inp.feature.matrix=cbind(rep(1,nrow(inp.feature.matrix)),inp.feature.matrix)
return(inp.feature.matrix)
}
normalize_features= function(inp.data.frame){
# inp.data.frame=data.frame(c1=c(1,2,3,4,5),c2=c(6,7,8,9,10),c3=c(2,7,9,4,7))
# feature.names=c("c1","c2")
normalized.matrix=sapply(inp.data.frame,function(x) x/sqrt(sum(x^2)))
return(normalized.matrix)
}
norms=function(inp.data.frame){
#inp.data.frame=data.frame(c1=c(1,2,3,4,5),c2=c(6,7,8,9,10))
normvector=sapply(inp.data.frame,function(x) sqrt(sum(x^2)))
return(normvector)
}
lasso_weights=function(norm.feature.matrix,output.matrix,new.weights,L1penalty,Tolerance){
continueloop = TRUE
while(continueloop){
old.weights= new.weights
for (i in 1:length(new.weights)){
new.weights.matrix=as.matrix(new.weights)
predicted.values=norm.feature.matrix %*% new.weights.matrix
errors.matrix=output.matrix - predicted.values + new.weights[i]*norm.feature.matrix[,i]
rho=t(norm.feature.matrix[,i]) %*% errors.matrix
if(i==1){
new.weights[i]=rho
}
else if(rho > L1penalty/2){
new.weights[i]=rho - L1penalty/2
}
else if(rho < -L1penalty/2){
new.weights[i]=rho + L1penalty/2
}
else{
new.weights[i]=0
}
}
#print(new.weights)
weights_change=abs(new.weights - old.weights)
continueloop=ifelse(max(weights_change)>=Tolerance,TRUE,FALSE)
}
return(new.weights)
}
sales=read.csv("kc_house_data.csv")
str(sales)
columns=c("sqft_living","bedrooms")
output.sales.matrix=as.matrix(sales$price)
tail(output.sales.matrix)
input.sales.feature.matrix=getfeaturematrix(sales,columns)
input.sales.feature.df=data.frame(input.sales.feature.matrix)
#Normalize the features
normalized.sales.feature.matrix=normalize_features(input.sales.feature.df)
head(normalized.sales.feature.matrix)
norm.sales.vector=norms(input.sales.feature.df)
###################################################################
# Set initial weights
initial.weights=c(1,4,1)
loopcount=length(initial.weights)
initial.weights.matrix=as.matrix(initial.weights)
predicted.values=normalized.sales.feature.matrix %*% initial.weights.matrix
for (i in 1:loopcount){
errors.matrix=output.sales.matrix - predicted.values + initial.weights[i]*normalized.sales.feature.matrix[,i]
rho=t(normalized.sales.feature.matrix[,i]) %*% errors.matrix
initial.weights[i]=rho
}
2*initial.weights[2]
2*initial.weights[3]
##########################################################
initial.weights=c(0,0,0)
L1penalty = 1e7
Tolerance = 1.0
weights= lasso_weights(normalized.sales.feature.matrix,output.sales.matrix,initial.weights,L1penalty,Tolerance)
cat("final weights : ",weights,"\n")
weights.matrix=as.matrix(weights)
Residuals= output.sales.matrix - (normalized.sales.feature.matrix %*% weights.matrix)
RSS=sum(Residuals^2)
print(paste("RSS : ",RSS))
#####################################################################
train=read.csv("kc_house_train_data.csv")
str(train)
columns=c("bedrooms","bathrooms","sqft_living","sqft_lot","floors","waterfront","view","condition","grade",
"sqft_above","sqft_basement","yr_built","yr_renovated")
str(train)
output.train.matrix=as.matrix(train$price)
tail(output.train.matrix)
input.feature.matrix=getfeaturematrix(train,columns)
input.feature.df=data.frame(input.feature.matrix)
#Normalize the features
normalized.feature.matrix=normalize_features(input.feature.df)
head(normalized.feature.matrix)
norm.vector=norms(input.feature.df)
initial.weights=rep(0,14)
Tolerance = 1.0
L1penalty = 1e7
weights1e7 = lasso_weights(normalized.feature.matrix,output.train.matrix,initial.weights,L1penalty,Tolerance)
cat("final weights : ",weights1e7,"\n")
L1penalty = 1e8
weights1e8 = lasso_weights(normalized.feature.matrix,output.train.matrix,initial.weights,L1penalty,Tolerance)
cat("final weights : ",weights1e8,"\n")
L1penalty = 1e4
Tolerance = 5e5
weights1e4 = lasso_weights(normalized.feature.matrix,output.train.matrix,initial.weights,L1penalty,Tolerance)
cat("final weights : ",weights1e4,"\n")
weights1e7_normalized = weights1e7 / norm.vector
weights1e7_normalized_matrix=as.matrix(weights1e7_normalized)
weights1e8_normalized = weights1e8 / norm.vector
weights1e8_normalized_matrix=as.matrix(weights1e8_normalized)
weights1e4_normalized = weights1e4 / norm.vector
weights1e4_normalized_matrix=as.matrix(weights1e4_normalized)
test=read.csv("kc_house_test_data.csv")
output.test.matrix=as.matrix(test$price)
test.feature.matrix=getfeaturematrix(test,columns)
head(test.feature.matrix)
Test.predict.weights1e7=test.feature.matrix %*% weights1e7_normalized_matrix
Test.RSS.weights1e7 = sum((output.test.matrix - Test.predict.weights1e7)^2)
Test.predict.weights1e8=test.feature.matrix %*% weights1e8_normalized_matrix
Test.RSS.weights1e8 = sum((output.test.matrix - Test.predict.weights1e8)^2)
Test.predict.weights1e4=test.feature.matrix %*% weights1e4_normalized_matrix
Test.RSS.weights1e4 = sum((output.test.matrix - Test.predict.weights1e4)^2)
|
703bb41e02563b4cf369288d6785a9df816eb7e1 | cfec6dd88f857a84a474e275240c0888fab6d266 | /R/SourceDLM.r | ab88e4e6d5c2803fddf0eb78f4e61b78ab95662f | [] | no_license | jongguri80/DLM | e50e0a8f9f561eaada8f202d6199afaec250ed59 | a1cf6095393df813285dff98517c27cda878995e | refs/heads/master | 2020-06-25T02:20:36.531284 | 2017-07-12T01:51:30 | 2017-07-12T01:51:30 | 96,951,797 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,823 | r | SourceDLM.r | # This code demonstrates how to fit the DLMs in the paper
# "Distributed Lag Models: Examining Associations between the Built environment and Health"
# by Baek et al.
# The code below is in two stages:
# (1) R functions used to fit the DLM which follows closely the notations used in the eAppendix for the paper.
# (2) an example of how to use these functions to fit and plot outputs from the DLM
############################ Part 1: R Functions to fit DLM ########################
####################################################################################
LagFunc = function(Lag){
# This function calculates a cubic radial basis function which transformed for parameter estimation.
# For more details, please see the second paragraph in the estimation section of eAppendix.
# The input for this function is a vector or distances r_1, r_2,..., r_L
# The outputs of the function are several matrices/vectors needed in other functions below
### center lags at its mean to improve stability for models ###
Lag_ct = Lag - mean(Lag)
L = length(Lag)
## calculate a natural cubic radial basis ##
C0 = cbind(rep(1, L), Lag_ct)
C1 = matrix(nrow = L, ncol = L)
for(i in 1:L)for(j in 1:L) C1[i,j] = abs(Lag_ct[i] - Lag_ct[j])^3
## to get matrix M1 and M2 ##
qr_Q = qr.Q(qr(cbind(C0, C1)))
M1 = qr_Q[,-c(1,2)] ## M1 is orthogonal matrix to C0.
M2 = t(M1) %*% C1 %*% M1
inv_M2_sqrt = svd(M2)$v %*% diag(1/sqrt(svd(M2)$d)) %*% t(svd(M2)$u)
Omg = cbind(C0, C1 %*% M1 %*% inv_M2_sqrt)
## C0, C1 %*% M1 %*% inv_M2_sqrt, and Omg are the same matrix used in the third paragraph in the estimation section of eAppendix.
return(list(Lag = Lag, C0=C0, K1 = C1 %*% M1 %*% inv_M2_sqrt, Omg = Omg, M1 = M1, inv_M2_sqrt = inv_M2_sqrt))
}
dlm = function(Y, X, Z, Lag){
# This function fits the DLM using the frequentist framework
#
# The inputs for this function are
# Y: the outcome, a vector of length n
# X: the matrix of size n x L of DL covariates (i.e., the covariate value within each ring-shaped area)
# Z: other covariates including intercept with size n x p
# Lag: a vector or distances r_1, r_2,..., r_L used to construct the DL covariates
#
# The outputs from this function are:
# thetas: parameter estimates for coefficients of Z, X*, and Z* in eAppendix
# cov_thetas: covariance matrix of estimates for coefficients of Z, X*, and Z* in eAppendix
# Table: summary table driven by "lme" function object.
# Lag: same as the input vector of Lag.
# betas_Z: parameter estimates for coefficients of other covariate Z
# cov_betas_Z: covariance matrix of betas_Z
# summary_betas_Z: summary table for other covariate Z (generated by "lme" function object).
# DL_coeff: estimated DL coefficients
# cov_DL_coeff: covariance matrix of DL_coeff
# summary_DL_coeff: estimated DL coefficients with their 95% confidence interval
L = length(Lag)
Const = LagFunc(Lag)
Y = as.matrix(Y); Z = as.matrix(Z); X = as.matrix(X);
X_star = X %*% Const$C0; Z_star = X %*% Const$K1;
D_star = cbind(Z, X_star, Z_star);
n_z = ncol(Z);n_fix = ncol(X_star) + n_z; n_random = ncol(Z_star); n = length(Y)
sub = rep(1, n)
fit = lme(Y ~ -1 + Z + X_star, random=list(sub=pdIdent(~Z_star-1)))
b1 = unlist(fit$coef$random)
thetas = c(fit$coef$fixed, b1)
sigma_b_sq = (fit$sigma*exp(unlist(fit$modelStruct)))^2
tau_sq = fit$sigma^2
G = diag(c(rep(0, n_fix), rep(1/sigma_b_sq, n_random)))
cov_thetas = solve(t(D_star) %*% D_star/tau_sq + G)
#### estimated coefficients of other covariates (confounders) and their cov ####
betas_Z = thetas[1:n_z]
cov_betas_Z = cov_thetas[1:n_z, 1:n_z]
summary_betas_Z = summary(fit)$tTable[1:n_z,]
#### estimated DL coefficients and their cov ####
cov_DL_coeff = Const$Omg %*% cov_thetas[-(1:n_z),-(1:n_z)] %*% t(Const$Omg)
DL_coeff = Const$Omg %*% thetas[-(1:n_z)]
CI_DL_coeff = cbind(DL_coeff -1.96*sqrt(diag(cov_DL_coeff)), DL_coeff + 1.96*sqrt(diag(cov_DL_coeff)))
summary_DL_coeff = cbind(DL_coeff, CI_DL_coeff)
colnames(summary_DL_coeff) = c("est", "2.5%", "97.5%")
rownames(DL_coeff) = rownames(cov_DL_coeff) = colnames(cov_DL_coeff) = rownames(summary_DL_coeff) = Const$Lag
return(list(thetas = thetas, cov_thetas = cov_thetas, Table = summary(fit), Lag = Const$Lag,
betas_Z = betas_Z, cov_betas_Z = cov_betas_Z, summary_betas_Z = summary_betas_Z,
DL_coeff = DL_coeff, cov_DL_coeff = cov_DL_coeff, summary_DL_coeff = summary_DL_coeff))
}
Bayes_dlm = function(Y, X, Z, Lag, n_sim = 5000, n_save = 1000, burn_in = 2000, per_print=1000,
prior_list = list(a_tau = .1, b_tau = 1e-6, a_sigma_b = .1, b_sigma_b = 1e-6),
int_list = list(thetas = rep(0, length(Lag)+ncol(Z)), sigma_b_sq = .1)){
#
# This function fits the DLM using the Bayesian framework
#
# The inputs for this function are
# Y: the outcome, a vector of length n
# X: the matrix of size n x L of DL covariates (i.e., the covariate value within each ring-shaped area)
# Z: other covariates including intercept with size n x p
# Lag: a vector or distances r_1, r_2,..., r_L used to construct the DL covariates
# n_sim: number of iteration to sample posterior samples
# n_save: number of saved posterior samples after thinning
# burn_in: number of posterior samples in the burn-in period
# per_print: print the current number of simulation per input number.
# sigma_b_sq: variance of random effects for smoothing spline.
# prior_list: hyper-prior value for tau^2 (variance of residuals), and sigma_b^2 (variance of random effects for smoothing spline).
# int_list: initial values for thetas and sigma_b_sq
#
# The outputs from this function are:
# post_thetas: posterior samples for coefficients of Z, X*, and Z* in eAppendix
# summary_thetas: posterior summaries of post_thetas (median, 95% credible interval)
# post_betas_Z: posterior samples for coefficients of Z
# summary_betas_Z: posterior summaries of post_betas_Z (median, 95% credible interval)
# post_tau_sq: posterior samples for variance of residual errors
# post_sigma_b_sq: posterior samples for variance of random effects for smoothing spline.
# post_DL_coeff: posterior samples for DL coefficients.
# summary_DL_coeff: posterior summaries of post_DL_coeff (median, 95% credible interval)
# DIC: Deviance information criterion
# Lag: same as the input vector of Lag.
#
L = length(Lag)
Const = LagFunc(Lag)
Y = as.matrix(Y); Z = as.matrix(Z); X = as.matrix(X);
X_star = X %*% Const$C0; Z_star = X %*% Const$K1;
D_star = cbind(Z, X_star, Z_star);
n_z = ncol(Z); n_fix = ncol(X_star) + n_z; n_random = ncol(Z_star);
n_thetas = n_fix + n_random;n = length(Y);
idx_ran = (n_fix+1):n_thetas
thinning = floor((n_sim - burn_in)/n_save)
save = kk = 1
post_tau_sq = post_sigma_b_sq = post_lb = rep(0, n_save);
post_thetas = matrix(0, nrow = n_save, ncol = n_thetas)
thetas = int_list$thetas; sigma_b_sq = int_list$sigma_b_sq
G = diag(c(rep(0, n_fix), rep(1, n_random)))
XX = t(D_star) %*% D_star; XY = t(D_star) %*% Y
for(s in 1:n_sim){
res = Y - D_star %*% thetas
tau_sq = 1/rgamma(1, prior_list$a_tau + n/2, t(res) %*% res/2 + prior_list$b_tau)
Sigma = solve(XX/tau_sq + G/sigma_b_sq)
mu = Sigma%*%(XY/tau_sq)
thetas = t(chol(Sigma)) %*% rnorm(n_thetas) + mu
sigma_b_sq = 1/rgamma(1, prior_list$a_sigma_b + (L-2)/2, prior_list$b_sigma_b + t(thetas)%*%G%*%thetas/2)
if(kk == s/per_print){
print(paste("iteration #:", s))
kk = kk+1
}
if(s > burn_in && s%%thinning == 0 && save <= n_save){
res = Y - D_star %*% thetas
post_thetas[save,] = thetas; post_tau_sq[save] = tau_sq;
post_sigma_b_sq[save] = sigma_b_sq;
post_lb[save] = n*log(tau_sq) + t(res) %*% res/tau_sq + n*log(2*pi) ## -2*log_likelihood
save = save + 1
}
}
#### DIC ####
thetas_bar = colMeans(post_thetas)
tau_sq_bar = mean(post_tau_sq)
res = Y - D_star %*% thetas_bar
D_bar = mean(post_lb)
D_hat = n*log(tau_sq_bar) + t(res) %*% res/tau_sq_bar + n*log(2*pi)
DIC = 2*D_bar - D_hat
#### summary thetas ####
summary_thetas = matrix(nrow = n_thetas, ncol = 3)
for(i in 1:n_thetas) summary_thetas[i,] = quantile(post_thetas[,i], prob = c(0.5, 0.025, 0.975))
rownames(summary_thetas) = c(paste("Z", 1:n_z, sep=""), "X_star1", "X_star2", paste("b", 1:n_random, sep=""))
colnames(summary_thetas) = c("50%", "2.5%", "97.5%")
#### posterior samples and summary for coefficients of other covariates (confounders) ####
post_betas_Z = post_thetas[, 1:n_z]
summary_betas_Z = summary_thetas[1:n_z,]
#### Transforming for posterior DL coefficients ####
post_DL_coeff = matrix(nrow = n_save, ncol = L)
for(s in 1:n_save) post_DL_coeff[s,] = Const$Omg %*% post_thetas[s,-(1:ncol(Z))]
summary_DL_coeff = matrix(nrow = L, ncol = 3)
for(i in 1:L) summary_DL_coeff[i,] = quantile(post_DL_coeff[,i], prob = c(0.5, 0.025, 0.975))
rownames(summary_DL_coeff) = c(Const$Lag)
colnames(summary_DL_coeff) = c("50%", "2.5%", "97.5%")
return(list(post_thetas = post_thetas, summary_thetas=summary_thetas,
post_betas_Z = post_betas_Z, summary_betas_Z = summary_betas_Z,
post_tau_sq = post_tau_sq, post_sigma_b_sq = post_sigma_b_sq,
post_DL_coeff = post_DL_coeff, summary_DL_coeff = summary_DL_coeff,
DIC = DIC, Lag = Const$Lag))
}
plot.dlm = function(dlm_object, ...){
DL_coeff = dlm_object$summary_DL_coeff[,1]
DL_coeff_LCI = dlm_object$summary_DL_coeff[,2]
DL_coeff_UCI = dlm_object$summary_DL_coeff[,3]
ylim = c(min(dlm_object$summary_DL_coeff), max(dlm_object$summary_DL_coeff))
## plot the estimated DL coefficients ##
Lag = rep(dlm_object$Lag, 3)
plot(Lag, c(DL_coeff, DL_coeff_LCI, DL_coeff_UCI), ylab="DL coefficients", type="n", lwd=2, ...)
lines(dlm_object$Lag, DL_coeff, lwd=2)
lines(dlm_object$Lag, DL_coeff_LCI, lty=2, col="grey", lwd=2)
lines(dlm_object$Lag, DL_coeff_UCI, lty=2, col="grey", lwd=2)
abline(h = 0, lty=2, col="2")
}
#### Estimate DL coefficients at a certain distance ####
Bayes_DL_coeff_pred = function(Bayes_dlm_object, dist){
# This function estimates DL coefficients at a specified distance
#
# The inputs for this function are
# Bayes_dlm_object: an object from fitted "Bayes_dlm" function.
# dist: a scalar of a certain distance
#
# The outputs from this function are:
# post_DL_coeff_pred: posterior sample for DL coefficient at a specified distance
# summary_DL_coeff_pred: posterior summaries of post_DL_coeff_pred (median, 95% credible interval)
#
Lag = Bayes_dlm_object$Lag
L = length(Lag); n_post = nrow(Bayes_dlm_object$post_thetas)
Const = LagFunc(Lag)
Lag_ct = Lag - mean(Lag)
Lag_ct_pred = dist - mean(Lag)
C0_pred = cbind(1, Lag_ct_pred)
C1_pred = matrix(nrow = 1, ncol = L)
n_z = ncol(Bayes_dlm_object$post_betas_Z)
idx_lag = (n_z+1):(n_z+L)
for(j in 1:L) C1_pred[1,j] = abs(Lag_ct[j] - Lag_ct_pred)^3
Omg_pred = cbind(C0_pred, C1_pred %*% Const$M1 %*% Const$inv_M2_sqrt)
post_DL_coeff_pred = matrix(nrow = n_post, ncol = 1)
for(i in 1:n_post) post_DL_coeff_pred[i,] = Omg_pred %*% Bayes_dlm_object$post_thetas[i,idx_lag]
summary_DL_coeff_pred = quantile(post_DL_coeff_pred, prob = c(0.5, 0.025, 0.975))
return(list(post_DL_coeff_pred = post_DL_coeff_pred, summary_DL_coeff_pred = summary_DL_coeff_pred))
}
#### Estimate Average effects up to a certain distance ####
Bayes_avg_eff = function(Bayes_dlm_object, dist){
# This function estimates an average buffer effect up to a specified distance
#
# The inputs for this function are
# Bayes_dlm_object: an object from fitted "Bayes_dlm" function.
# dist: a scalar of a certain distance
#
# The outputs from this function are:
# post_avg_eff: posterior sample for an average buffer effect up to a specified distance
# summary_avg_eff: posterior summaries of post_avg_eff (median, 95% credible interval)
#
Lag = Bayes_dlm_object$Lag
Const = LagFunc(Lag)
L = length(Lag)
n_post = nrow(Bayes_dlm_object$post_thetas)
if(min(Lag)<dist){
idx_p = tail(which(Lag < dist), n=1)
wts = pi*(c(Lag[1:idx_p], dist)^2 - c(0, Lag[1:idx_p])^2)/(pi*dist^2)
tmp_beta = Bayes_DL_coeff_pred(Bayes_dlm_object,dist)$post_DL_coeff_pred
avg_eff = rowSums(cbind(Bayes_dlm_object$post_DL_coeff[,1:idx_p], tmp_beta)*
matrix(wts, nrow = n_post, ncol=idx_p+1, byrow = T))
}
if(min(Lag)>=dist) avg_eff = c(Bayes_DL_coeff_pred(Bayes_dlm_object,dist)$post_DL_coeff_pred)
summary_avg_eff = quantile(avg_eff, prob = c(0.5, 0.025, 0.975))
return(list(post_avg_eff = avg_eff, summary_avg_eff = summary_avg_eff))
}
#### Estimate DL coefficients at a certain distance ####
DL_coeff_pred = function(dlm_object, dist){
# This function estimates DL coefficients at a specified distance
#
# The inputs for this function are
# dlm_object: an object from fitted "dlm" function.
# dist: a scalar of a certain distance.
#
# The outputs from this function are:
# DL_coeff_pred: point estimate for DL coefficient at a specified distance.
# var_DL_coeff_pred: variance of estimate for DL coefficient at a specified distance.
# summary_DL_coeff_pred: estimated DL coefficient with its 95% confidence interval
#
Lag = dlm_object$Lag
L = length(Lag);
Const = LagFunc(Lag)
Lag_ct = Lag - mean(Lag)
Lag_ct_pred = dist - mean(Lag)
C0_pred = cbind(1, Lag_ct_pred)
C1_pred = matrix(nrow = 1, ncol = L)
n_z = length(dlm_object$betas_Z)
idx_lag = (n_z+1):(n_z+L)
for(j in 1:L) C1_pred[1,j] = abs(Lag_ct[j] - Lag_ct_pred)^3
Omg_pred = cbind(C0_pred, C1_pred %*% Const$M1 %*% Const$inv_M2_sqrt)
DL_coeff_pred = Omg_pred %*% dlm_object$thetas[idx_lag]
var_DL_coeff_pred = Omg_pred %*% dlm_object$cov_thetas[-(1:n_z),-(1:n_z)] %*% t(Omg_pred)
CI_DL_coeff_pred = c(DL_coeff_pred -1.96*sqrt(var_DL_coeff_pred), DL_coeff_pred + 1.96*sqrt(var_DL_coeff_pred))
summary_DL_coeff_pred = c(DL_coeff_pred, CI_DL_coeff_pred)
names(summary_DL_coeff_pred) = c("est", "2.5%", "97.5%")
return(list(DL_coeff_pred = DL_coeff_pred, var_DL_coeff_pred = var_DL_coeff_pred,
summary_DL_coeff_pred = summary_DL_coeff_pred))
}
#### Estimate Average effects up to a certain distance ####
Avg_eff = function(dlm_object, dist){
# This function estimates an average buffer effect up to a specified distance.
#
# The inputs for this function are
# dlm_object: an object from fitted "dlm" function.
# dist: a scalar of a certain distance.
#
# The outputs from this function are:
# avg_eff: point estimate for an average buffer effect up to a specified distance.
# var_avg_eff: variance of estimate for an average buffer effect up to a specified distance.
# summary_avg_eff: estimated average effect with its 95% confidence interval
#
Lag = dlm_object$Lag
L = length(Lag);
Const = LagFunc(Lag)
Lag_ct = Lag - mean(Lag)
Lag_ct_pred = dist - mean(Lag)
C0_pred = cbind(1, Lag_ct_pred)
C1_pred = matrix(nrow = 1, ncol = L)
n_z = length(dlm_object$betas_Z)
idx_lag = (n_z+1):(n_z+L)
for(j in 1:L) C1_pred[1,j] = abs(Lag_ct[j] - Lag_ct_pred)^3
Omg_pred = cbind(C0_pred, C1_pred %*% Const$M1 %*% Const$inv_M2_sqrt)
if(min(Lag)<dist){
idx_p = tail(which(Lag < dist), n=1)
Omg = rbind(Const$Omg[1:idx_p,], Omg_pred)
DL_coeff = c(dlm_object$DL_coeff[1:idx_p], DL_coeff_pred(dlm_object,dist)$DL_coeff_pred)
cov_DL_coeff = Omg %*% dlm_object$cov_thetas[-(1:n_z),-(1:n_z)] %*% t(Omg)
wts = pi*(c(Lag[1:idx_p], dist)^2 - c(0, Lag[1:idx_p])^2)/(pi*dist^2)
avg_eff = wts %*% DL_coeff
var_avg_eff = wts %*% cov_DL_coeff %*% wts
CI_avg_eff = c(avg_eff -1.96*sqrt(var_avg_eff), avg_eff + 1.96*sqrt(var_avg_eff))
summary_avg_eff = c(avg_eff, CI_avg_eff)
}
if(min(Lag)>=dist){
avg_eff = DL_coeff_pred(dlm_object,dist)$DL_coeff_pred
var_avg_eff = DL_coeff_pred(dlm_object,dist)$var_DL_coeff_pred
summary_avg_eff = DL_coeff_pred(dlm_object,dist)$summary_DL_coeff_pred
}
names(summary_avg_eff) = c("est", "2.5%", "97.5%")
return(list(avg_eff = avg_eff, var_avg_eff = var_avg_eff, summary_avg_eff = summary_avg_eff))
}
DL_X = function(pts_outcome, pts_X, Lag, unit="ED"){
# This function calculates distributed lag covariates using outcome locations, locations of X (e.g., convenience stores), and Lag.
#
# The inputs for this function are
# pts_outcome: a matrix form of locations of outcomes; (x, y) or (longitude, latitude)
# pts_X: a matrix form of locations of environmental factors (e.g., fast food restaurants, convenience stores); (x, y) or (longitude, latitude)
# Lag: a vector of distance lag; e.g., c(r_1, r_2, ..., r_L)
# unit: a unit of Lag. Options:"ED" is Euclidean distance, "km" is kilo-meter, or " "mile" is mile.
# km and mile are calculated with longitude and latitude.
#
# The outputs from this function are:
# X: DL covariate matrix
## Calculate a distance between a school and all , otherPoints of stores ##
pts_outcome = as.matrix(pts_outcome); pts_X = as.matrix(pts_X);
n = nrow(pts_outcome)
a_mile = 1.609344#km
L = length(Lag)
X = matrix(nrow = n, ncol = L)
for(i in 1:n){
if(unit=="ED") dist = spDistsN1(pts_X, pts_outcome[i,])
if(unit=="km") dist = spDistsN1(pts_X, pts_outcome[i,], longlat=TRUE)
if(unit=="mile") dist = spDistsN1(pts_X, pts_outcome[i,], longlat=TRUE)/a_mile
X[i,1] = sum(dist <= Lag[1])
## Count the number of pts within a ring-shaped area
for(j in 2:L) X[i, j] = sum(dist > Lag[j-1] & dist <= Lag[j])
}
return(X)
}
|
dcff4e55be20028575264c0a49f887be465d2036 | 53f6608a8f31d2aa39fae0e899b144c98818ff54 | /man/FFDplot.Rd | aa08799e3004870cd1061f7d2976232f3c227044 | [] | no_license | ncss-tech/sharpshootR | 4b585bb1b1313d24b0c6428182a5b91095355a6c | 1e062e3a4cdf1ea0b37827f9b16279ddd06d4d4a | refs/heads/master | 2023-08-20T06:16:35.757711 | 2023-08-08T19:11:52 | 2023-08-08T19:11:52 | 54,595,545 | 18 | 6 | null | 2023-02-24T21:00:28 | 2016-03-23T21:52:31 | R | UTF-8 | R | false | true | 577 | rd | FFDplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FFD.R
\name{FFDplot}
\alias{FFDplot}
\title{Plot output from FFD()}
\usage{
FFDplot(s, sub.title = NULL)
}
\arguments{
\item{s}{output from \code{\link{FFD}}, with \code{returnDailyPr = TRUE}}
\item{sub.title}{figure subtitle}
}
\value{
nothing, function is called to generate graphical output
}
\description{
Plot output from FFD()
}
\examples{
# 11 years of data from highland meadows
data('HHM', package = 'sharpshootR')
x.ffd <- FFD(HHM, returnDailyPr = TRUE, frostTemp=32)
FFDplot(x.ffd)
}
|
8a4b93313c8877a74e60a6a2d182f48ed8cb5aae | 1f926bb87c2ac41c7b77c09663023f7d2cc2aa7f | /LooPred.R | 183cb81a236ccd0bcc29701df15c4b55e4b289c7 | [
"MIT"
] | permissive | hly89/ComboPred | 8a065a70b40b3e4c271133b419c28c3699134b6a | 8e6a5bfb90b251f539b3782cb9c689702ffc559c | refs/heads/master | 2021-01-01T15:53:13.949159 | 2017-11-22T08:02:23 | 2017-11-22T08:02:23 | 97,724,212 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 959 | r | LooPred.R | # leave-one-out for predicting single compound/combination response
# rm.idx: the indexes for being left out
LooPred <- function(training.data, response, rm.idx) {
loo.ctrl <- matrix(NA, nrow = 1, ncol = 3)
colnames(loo.ctrl) <- c("id", "compound", "DSS.pred")
for(j in rm.idx) {
# remove from response data
tmp.resp <- response[-j, ]
# remove from training data
tmp.training <- training.data[-j, ]
tmp.model <- randomForest(tmp.training, tmp.resp$dss)
tmp <- matrix(NA, nrow = 1, ncol = 3)
colnames(tmp) <- c("id", "compound", "DSS.pred")
tmp[1, 1] <- response$id[j]
tmp[1, 2] <- response$compound[j]
set.seed(1)
tmp[1,3] <- predict(tmp.model, training.data[j,])
loo.ctrl <- rbind(loo.ctrl, tmp)
}
loo.ctrl <- loo.ctrl[-1, ]
loo.ctrl <- data.frame(loo.ctrl, stringsAsFactors = F)
loo.ctrl$id <- as.numeric(loo.ctrl$id)
loo.ctrl$DSS.pred <- as.numeric(loo.ctrl$DSS.pred)
return(loo.ctrl)
} |
9be3a102ae5cb982ee972c45adb0ac7294252581 | 42767feddc1ebd7a799041247b408fc45729f56a | /Lab7.R | 1c102c4f63dd1626a5e767de93f8c597c5501e56 | [] | no_license | 5atish/R-code | 73047309a4ecd673acdca091fc8f4c2d57f5c87f | 955490b03abc0370ca3122270adfc0ec043a1b61 | refs/heads/master | 2020-03-24T00:06:40.382716 | 2018-08-09T10:26:41 | 2018-08-09T10:26:41 | 142,273,834 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,962 | r | Lab7.R | #####################################################################
################## 1 Principal Component Analysis ###################
#####################################################################
states <- row.names(USArrests)
states
names(USArrests)
apply(USArrests,2,mean) ## apply() function allows us to apply a function(in this case, the mean() function) to each row or column of the data set.
##The second input here denotes whether we wish to compute the mean of the rows, 1, or the columns,2.
apply(USArrests,1,mean) ## apply() function to calculate mean of each row of the dataset.
apply(USArrests,2,var) ## apply() function to calculate variance of each column
pr.out <- prcomp(USArrests, scale= TRUE) ## prcomp() function is used to perform PCA which centres the variable to have mean zero.
## by using scale=TRUE, we scale the variables to have standard deviation 1
names(pr.out)
pr.out$center ## components correspond to the means of the variables that were used for scaling prior to implementing PCA
pr.out$scale ## components correspond to the Standard Deviation of the variables that were used for scaling prior to implementing PCA
pr.out$rotation ## The rotation matrix provides the principal component loadings i.e. each column of pr.out$rotation contains the corresponding principal component loading vector.
pr.out$x
dim(pr.out$x)
biplot(pr.out, scale = 0) ## The scale=0 argument to biplot() ensures that the arrows are scaled
pr.out$rotation <- -pr.out$rotation
pr.out$x <- -pr.out$x
biplot(pr.out, scale = 0)
pr.out$sdev ## standard deviation of each principal component
pr.var <- pr.out$sdev^2
pr.var ## variance of each principal componen
pve <- pr.var/sum(pr.var)
pve ## To compute the proportion of variance explained by each principal component, we simply divide the variance explained by each principal component by the total variance explained by all four principal components
plot(pve, xlab = "Principal Component", ylab = "Propertion of Variance", ylim = c(0,1), type = "b")
plot(cumsum(pve), xlab = "Principal Component", ylab = "Propertion of Variance", ylim = c(0,1), type = "b")
a <- c(1,2,8,-3)
cumsum(a)
#####################################################################
########################## 2 Exercise ###############################
#####################################################################
## 1.Generate a simulated data set with 20 observations in each of three classes (i.e., 60 observations total), and 50 variables.
#Hint: There are a number of functions in R that you can use to generate data. One example is the rnorm() function; runif() is another option. Be sure to add a mean shift to the observations in each class so that there are three distinct classes.
set.seed(383)
x <- matrix(rnorm(20*3*50),ncol = 50)
fix(x)
dim(x)
x[1:20,10] <- x[1:20,10]+100
x[21:40,20] <- x[21:40,20]+400
x[41:60,30] <- x[41:60,30]-200
## 2.Perform PCA on the 60 observations and plot the first two principal component score vectors. Use a different colour to indicate the observations in each of the three classes. If the three classes appear separated in this plot, then continue on to part 3. If not, then return to part 1 and modify the simulation so that there is greater separation between the three classes. Do not continue to part 3 until the three classes show at least some separation in the first two principal component score vectors.
pr.out <- prcomp(x, scale = TRUE)
pr.out
pr.out$center
pr.out$scale
pr.out$sdev
pr.out$rotation
pr.out$x
dim(pr.out$x)
plot(pr.out$x[,1], pr.out$x[,2], col = c(rep(1,20), rep(2,20), rep(3,20)), scale=0)
biplot(pr.out,scale = 0)
## 3.Perform K-means clustering of the observations with K = 3. How well do the clusters that you obtained in K-means clustering compare to the true class labels?
km.out <- kmeans(x,3, nstart = 20)
table(km.out$cluster,c(rep(1,20), rep(2,20), rep(3,20)))
# quality of cluster is good and we can see it coincide the classes
## 4.Now perform K-means clustering with K = 3 on the first two principal component score vectors, rather than on the raw data. That is, perform K-means clustering on the 60 x 2 matrix of which the first column is the first principal component score vector, and the second column is the second principal component score vector. Comment on your results.
km.out <- kmeans(pr.out$x[,1:2], 3, nstart = 20)
km.out$cluster
table(km.out$cluster,c(rep(1,20), rep(2,20), rep(3,20)))
## quality of cluster good but the perfect
#####################################################################
#####################################################################
x <- matrix(c(1,1,1,2,2,1,3,3,1), ncol = 3)
x <- matrix(c(2,2,0,2,2,0,0,0,0), ncol = 3)
eigen(x)
eigen(x)
pr.out <- prcomp(x)
pr.out$center
pr.out$scale
pr.out$x
`` |
7fdbcf2fcf61530363f2fb0d444df8933d79606d | aba55c7ed6c36fa9e3058378758471219a9268ad | /R_Scripts/2_1_ces65_recode.R | a017fedd1f529a8bc2e7b2fa75094d7e55878485 | [] | no_license | sjkiss/CES_Analysis | b69165323d992808a9d231448bcc3fe507b26aee | 4c39d30f81cbe01b20b7c72d516051fc3c6ed788 | refs/heads/master | 2023-08-18T15:18:44.229776 | 2023-08-07T22:49:06 | 2023-08-07T22:49:06 | 237,296,513 | 0 | 1 | null | 2020-05-07T19:19:08 | 2020-01-30T20:12:01 | R | UTF-8 | R | false | false | 7,410 | r | 2_1_ces65_recode.R |
#load data
data("ces65")
#recode Gender (v337)
look_for(ces65, "sex")
ces65$male<-Recode(ces65$v337, "1=1; 2=0")
val_labels(ces65$male)<-c(Female=0, Male=1)
names(ces65)
#checks
val_labels(ces65$male)
table(ces65$male)
#recode Union Household (V327)
look_for(ces65, "union")
ces65$union<-Recode(ces65$v327, "1=1; 5=0")
val_labels(ces65$union)<-c(None=0, Union=1)
#checks
val_labels(ces65$union)
table(ces65$union)
#Union Combined variable (identical copy of union)
ces65$union_both<-ces65$union
#checks
val_labels(ces65$union_both)
table(ces65$union_both)
#recode Education (v307)
look_for(ces65, "school")
ces65$degree<-Recode(ces65$v307, "16:30=1; 0:15=0")
val_labels(ces65$degree)<-c(nodegree=0, degree=1)
#checks
val_labels(ces65$degree)
table(ces65$degree)
#recode Region (v5)
look_for(ces65, "province")
ces65$region<-Recode(ces65$v5, "0:3=1; 5=2; 6:9=3; 4=NA")
val_labels(ces65$region)<-c(Atlantic=1, Ontario=2, West=3)
#checks
val_labels(ces65$region)
table(ces65$region)
#recode Quebec (v5)
look_for(ces65, "province")
ces65$quebec<-Recode(ces65$v5, "0:3=0; 5:9=0; 4=1")
val_labels(ces65$quebec)<-c(Other=0, Quebec=1)
#checks
val_labels(ces65$quebec)
table(ces65$quebec)
#recode Age (v335)
look_for(ces65, "age")
ces65$age<-ces65$v335
#check
table(ces65$age)
#recode Religion (v309)
look_for(ces65, "church")
ces65$religion<-Recode(ces65$v309, "0=0; 10:19=2; 20=1; 70:71=1; else=3")
val_labels(ces65$religion)<-c(None=0, Catholic=1, Protestant=2, Other=3)
#checks
val_labels(ces65$religion)
table(ces65$religion)
#recode Language (v314)
look_for(ces65, "language")
ces65$language<-Recode(ces65$v314, "2=0; 1=1; else=NA")
val_labels(ces65$language)<-c(French=0, English=1)
#checks
val_labels(ces65$language)
table(ces65$language)
#recode Non-charter Language (v314)
look_for(ces65, "language")
ces65$non_charter_language<-Recode(ces65$v314, "1:2=0; 3:8=1; else=NA")
val_labels(ces65$non_charter_language)<-c(Charter=0, Non_Charter=1)
#checks
val_labels(ces65$non_charter_language)
table(ces65$non_charter_language)
#recode Employment (v54)
look_for(ces65, "employ")
ces65$employment<-Recode(ces65$v54, "2=1; 1=0; else=NA")
val_labels(ces65$employment)<-c(Unemployed=0, Employed=1)
#checks
val_labels(ces65$employment)
table(ces65$employment)
#No Sector variable
#recode Party ID (v221)
look_for(ces65, "identification")
ces65$party_id<-Recode(ces65$v221, "20=1; 10=2; 30=3; 40:60=0; else=NA")
val_labels(ces65$party_id)<-c(Other=0, Liberal=1, Conservative=2, NDP=3)
#checks
val_labels(ces65$party_id)
table(ces65$party_id)
#recode Vote (v262)
look_for(ces65, "vote")
ces65$vote<-Recode(ces65$v262, "12=1; 11=2; 13=3; 14:19=0; else=NA")
val_labels(ces65$vote)<-c(Other=0, Liberal=1, Conservative=2, NDP=3, Bloc=4, Green=5)
#checks
val_labels(ces65$vote)
table(ces65$vote)
#recode Occupation (v306)
look_for(ces65, "occupation")
ces65$occupation<-Recode(ces65$v306, "10=1; 20=2; 32:37=3; 69=3; 40=4; 70:80=5; else=NA")
val_labels(ces65$occupation)<-c(Professional=1, Managers=2, Routine_Nonmanual=3, Skilled=4, Unskilled=5)
#checks
val_labels(ces65$occupation)
table(ces65$occupation)
#recode Income (v336)
look_for(ces65, "income")
ces65$income<-Recode(ces65$v336, "1:3=1; 4:5=2; 6=3; 7:8=4; 9:11=5; else=NA")
val_labels(ces65$income)<-c(Lowest=1, Lower_Middle=2, Middle=3, Upper_Middle=4, Highest=5)
#Simon's Version
val_labels(ces65$income)
table(ces65$income)
look_for(ces65, "income")
ces65$v336
ces65$income2<-Recode(ces65$v336, "1:4=1; 5:7=2; 8:9=3; 10=4; 11=5; else=NA")
val_labels(ces65$income2)<-c(Lowest=1, Lower_Middle=2, Middle=3, Upper_Middle=4, Highest=5)
#checks
val_labels(ces65$income)
table(ces65$income)
val_labels(ces65$v336)
ces65$income_tertile<-Recode(ces65$v336, "1:5=1; 6:8=2;9:11=3; 97=NA")
prop.table(table(ces65$income_tertile))
val_labels(ces65$income_tertile)<-c(Lowest=1, Middle=2, Highest=3)
#recode Religiosity (v310)
look_for(ces65, "church")
ces65$religiosity<-Recode(ces65$v310, "5=1; 4=2; 3=3; 2=4; 1=5; else=NA")
val_labels(ces65$religiosity)<-c(Lowest=1, Lower_Middle=2, MIddle=3, Upper_Middle=4, Highest=5)
#checks
val_labels(ces65$religiosity)
table(ces65$religiosity)
#recode Personal Retrospective (v49)
look_for(ces65, "situation")
ces65$personal_retrospective<-Recode(ces65$v49, "1=1; 2=0; 3=0.5; else=NA", as.numeric=T)
val_labels(ces65$personal_retrospective)<-c(Worse=0, Same=0.5, Better=1)
#checks
val_labels(ces65$personal_retrospective)
table(ces65$personal_retrospective, ces65$v49 , useNA = "ifany" )
#recode turnout (v262)
look_for(ces65, "vote")
ces65$turnout<-Recode(ces65$v262, "11:19=1; 0=0;98=0; else=NA")
val_labels(ces65$turnout)<-c(No=0, Yes=1)
#checks
val_labels(ces65$turnout)
table(ces65$turnout)
table(ces65$turnout, ces65$vote)
#### recode political efficacy ####
#recode No Say (v45)
look_for(ces65, "political power")
ces65$efficacy_internal<-Recode(ces65$v45, "1=0; 2=1; else=NA", as.numeric=T)
val_labels(ces65$efficacy_internal)<-c(low=0, high=1)
#checks
val_labels(ces65$efficacy_internal)
table(ces65$efficacy_internal)
table(ces65$efficacy_internal, ces65$v45 , useNA = "ifany" )
#recode MPs lose touch (v46)
look_for(ces65, "lose touch")
ces65$efficacy_external<-Recode(ces65$v46, "1=0; 2=1; else=NA", as.numeric=T)
val_labels(ces65$efficacy_external)<-c(low=0, high=1)
#checks
val_labels(ces65$efficacy_external)
table(ces65$efficacy_external)
table(ces65$efficacy_external, ces65$v46 , useNA = "ifany" )
#recode Official Don't Care (v43)
look_for(ces65, "don't care")
ces65$efficacy_external2<-Recode(ces65$v43, "1=0; 2=1; else=NA", as.numeric=T)
val_labels(ces65$efficacy_external2)<-c(low=0, high=1)
#checks
val_labels(ces65$efficacy_external2)
table(ces65$efficacy_external2)
table(ces65$efficacy_external2, ces65$v43 , useNA = "ifany" )
ces65 %>%
mutate(political_efficacy=rowMeans(select(., c("efficacy_external", "efficacy_external2", "efficacy_internal")), na.rm=T))->ces65
ces65 %>%
select(starts_with("efficacy")) %>%
summary()
#Check distribution of political_efficacy
qplot(ces65$political_efficacy, geom="histogram")
table(ces65$political_efficacy, useNA="ifany")
#Calculate Cronbach's alpha
library(psych)
ces65 %>%
select(efficacy_external, efficacy_external2, efficacy_internal) %>%
psych::alpha(.)
#Check correlation
ces65 %>%
select(efficacy_external, efficacy_external2, efficacy_internal) %>%
cor(., use="complete.obs")
#recode foreign born (v312)
look_for(ces65, "birth")
ces65$foreign<-Recode(ces65$v312, "209=0; 199=1; 300:999=0; else=NA")
val_labels(ces65$foreign)<-c(No=0, Yes=1)
#checks
val_labels(ces65$foreign)
table(ces65$foreign, ces65$v312, useNA="ifany")
#recode Most Important Question (v8a)
look_for(ces65, "most")
ces65$mip<-Recode(ces65$v8a, "11=9; 12:13=7; 14=6; 15=0; 16:17=15; 18=8; 19=0; 20=4; 21:22=14; 23:26=12;
27:29=16; 30:32=11; 33:34=3; 35:36=0; 37=13; 38=2; 39=6; 40:81=0; else=NA")
val_labels(ces65$mip)<-c(Other=0, Environment=1, Crime=2, Ethics=3, Education=4, Energy=5, Jobs=6, Economy=7, Health=8, Taxes=9, Deficit_Debt=10,
Democracy=11, Foreign_Affairs=12, Immigration=13, Socio_Cultural=14, Social_Programs=15, Brokerage=16)
table(ces65$mip)
#Empty variables that are not available pre-88
ces65$redistribution<-rep(NA, nrow(ces65))
ces65$market_liberalism<-rep(NA, nrow(ces65))
ces65$traditionalism2<-rep(NA, nrow(ces65))
ces65$immigration_rates<-rep(NA, nrow(ces65))
|
7e9fb27b12a6c52126fe7e2971ed25f341a1e428 | 9adc8c6da1ed43422fe584a522c94a4433464a4c | /R/LUCS_KDD_CBA.R | ea0831cb025995a16232c627bc87a991ff976206 | [] | no_license | klainfo/arulesCBA | 314206c434d8d0986baaf7f3ef27ed86ea9add1e | 627a318caa984177b7faf02db37b498bcccbc036 | refs/heads/master | 2022-04-19T08:26:11.582798 | 2020-04-20T12:50:08 | 2020-04-20T12:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,827 | r | LUCS_KDD_CBA.R | ## FIXME: Default class???
### Find java and javac
# from http://stackoverflow.com/a/34031214/470769
.Sys.which2 <- function(cmd) {
stopifnot(length(cmd) == 1)
if (.Platform$OS.type == "windows") {
suppressWarnings({
pathname <- shell(sprintf("where %s 2> NUL", cmd), intern=TRUE)[1]
})
if (!is.na(pathname)) return(dQuote(stats::setNames(pathname, cmd)))
}
found <- Sys.which(cmd)
if(found == "") stop(cmd, " not found! Make sure it is installed correcly.")
found
}
.java <- function() .Sys.which2("java")
.javac <- function() .Sys.which2("javac")
## Install and compile
.getLUCS_KDD <- function(what, stop = TRUE) {
dir <- what
if(what == "CPAR") dir <- "FOIL_PRM_CPAR"
path <- file.path(system.file(package = "arulesCBA"), "LUCS_KDD", dir)
attr(path, "exists") <- file.exists(path)
if(!attr(path, "exists") && stop)
stop("You need to install ", what, ". See ? ", what," for instructions.")
path
}
install_LUCS_KDD_CPAR <- function(force = FALSE,
source = "https://cgi.csc.liv.ac.uk/~frans/KDD/Software/FOIL_PRM_CPAR/foilPrmCpar.tgz") {
path <- .getLUCS_KDD("CPAR", stop = FALSE)
if(attr(path, "exists") && !force) {
cat(paste0("LUCS-KDD CPAR is already installed.\nLocation: ", path, "\n"))
return(invisible())
}
directory <- file.path(system.file(package = "arulesCBA"), "LUCS_KDD")
src <- file.path(system.file("LUCS_KDD_java", package = "arulesCBA"))
dir.create(directory, showWarnings = FALSE)
message("You are about to download and compile the LUCS-KDD Software Library implementations of the CPAR algorithms.",
"\nThis requires a working installation of the Java JDK including java and javac.",
"\nNote: The algorithms are only free to use for **non-commercial purpose**!",
"\nFor details see: https://cgi.csc.liv.ac.uk/~frans/KDD/Software/",
"\n")
# check for java/javac (stops if not found)
.java()
.javac()
if(!is.null(options()$LUCS_KDD_CPAR_FILE))
source <- paste0("file://", normalizePath(options()$LUCS_KDD_CPAR_FILE))
cat("Installing from:", source, "\n")
utils::download.file(source, destfile = file.path(directory, "foilPrmCpar.tgz"))
utils::untar(file.path(directory, "foilPrmCpar.tgz"), exdir = file.path(directory))
cat("to:", path, "\n")
file.copy(file.path(src, "runCPAR.java"), path)
file.copy(file.path(src, "runFOIL.java"), path)
file.copy(file.path(src, "runPRM.java"), path)
cat("Compiling.\n")
exe <- paste(.javac(), "-cp", path, file.path(path, "runCPAR.java"))
ret <- system(exe, intern = TRUE)
exe <- paste(.javac(), "-cp", path, file.path(path, "runFOIL.java"))
ret <- system(exe, intern = TRUE)
exe <- paste(.javac(), "-cp", path, file.path(path, "runPRM.java"))
ret <- system(exe, intern = TRUE)
}
install_LUCS_KDD_CMAR <- function(force = FALSE,
source = "https://cgi.csc.liv.ac.uk/~frans/KDD/Software/CMAR/cmar.tgz") {
path <- .getLUCS_KDD("CMAR", stop = FALSE)
if(attr(path, "exists") && !force) {
cat(paste0("LUCS-KDD CMAR is already installed.\nLocation: ", path, "\n"))
return(invisible())
}
directory <- file.path(system.file(package = "arulesCBA"), "LUCS_KDD")
src <- file.path(system.file("LUCS_KDD_java", package = "arulesCBA"))
dir.create(directory, showWarnings = FALSE)
message("You are about to download and compile the LUCS-KDD Software Library implementations of the CMAR algorithms.",
"\nThis requires a working installation of the Java JDK including java and javac.",
"\nNote: The algorithms are only free to use for **non-commercial purpose**!",
"\nFor details see: https://cgi.csc.liv.ac.uk/~frans/KDD/Software/",
"\n")
# check for java/javac (stops if not found)
.java()
.javac()
if(!is.null(options()$LUCS_KDD_CMAR_FILE))
source <- paste0("file://", normalizePath(options()$LUCS_KDD_CMAR_FILE))
cat("Installing from:", source, "\n")
utils::download.file(source,
destfile = file.path(directory, "cmar.tgz"))
cat("to:", path, "\n")
#utils::untar(file.path(directory, "cmar.tgz"), exdir = file.path(directory))
utils::untar(file.path(directory, "cmar.tgz"), exdir = file.path(path))
file.copy(file.path(src, "runCMAR.java"), path)
cat("Compiling.\n")
exe <- paste(.javac(), "-cp", path, file.path(path, "runCMAR.java"))
ret <- system(exe, intern = TRUE)
}
### Write and read LUCS-KDD format files
# LUCS-KDD uses item ids and the highest item ids are the class labels.
.write_trans_LUCS_KDD <- function(formula, trans, file = "data.num") {
# FIXME: make sure that the class ids have the highest id (i.e., are the last items)
parsedFormula <- .parseformula(formula, trans)
if(!all(parsedFormula$class_ids > nitems(trans)-length(parsedFormula$class_ids)))
trans <- trans[,c(parsedFormula$var_ids, parsedFormula$class_ids)]
l <- LIST(trans, decode = FALSE)
l <- sapply(l, paste, collapse = ' ')
writeLines(l, con = file)
}
.parse_rules_LUCS_KDD <- function(ret, formula, trans) {
k <- grep("Num.*classes.*=", ret, fixed = FALSE, value = TRUE)
k <- as.numeric(sub('.*= (\\d+)', '\\1', k))
r <- grep("\\}\\s+->\\s+\\{", ret, fixed = FALSE, value = TRUE)
# we calulate laplace below
#laplace <- as.numeric(sapply(r, FUN = function(r) gsub('.*\\s(\\S+)%', '\\1', r)))
r <- strsplit(r, "->")
r <- lapply(r, FUN = function(r) gsub('.*\\{(.*)\\}.*', '\\1', r))
lhs <- lapply(r, FUN = function(rs) as.integer(strsplit(rs[1], " ")[[1]]))
rhs <- lapply(r, FUN = function(rs) as.integer(strsplit(rs[2], " ")[[1]]))
# fix item order if class items were not the last
parsedFormula <- .parseformula(formula, trans)
if(!all(parsedFormula$class_ids > nitems(trans)-length(parsedFormula$class_ids))) {
itemOrder <- c(parsedFormula$var_ids, parsedFormula$class_ids)
lhs <- lapply(lhs, FUN = function(i) itemOrder[i])
rhs <- lapply(rhs, FUN = function(i) itemOrder[i])
}
rules <- new("rules",
lhs = encode(lhs, itemLabels = itemLabels(trans)),
rhs = encode(rhs, itemLabels = itemLabels(trans))
)
#quality(rules) <- data.frame(laplace_FOIL = laplace)
quality(rules) <- interestMeasure(rules, trans,
measure = c("support", "confidence", "lift", "laplace"), k = k)
rules
}
### Run the algorithms
### FIXME: add CMAR
### FIXME: add more memory java -Xms600m -Xmx600m FILE_NAME
.LUCS_KDD <- function(formula, trans, method = c("FOIL", "PRM", "CPAR", "CMAR"), parameter = "", verbose = FALSE) {
method <- match.arg(method)
if(verbose) cat(paste("LUCS-KDD:", method, "\n"))
if(method == "CMAR") path <- .getLUCS_KDD("CMAR")
else path <- .getLUCS_KDD("CPAR")
parsedFormula <- .parseformula(formula, trans)
classParameter <- paste0("-N", length(parsedFormula$class_ids))
# write transactions
filename <- tempfile(fileext = ".num")
.write_trans_LUCS_KDD(formula, trans, filename)
exe <- paste(.java(), options()$java.parameters[1], "-cp", path, paste0("run", method),
classParameter, paste0("-F", filename), parameter)
if(verbose) cat(paste("Call:", exe, "\n\n"))
ret <- system(exe, intern = TRUE)
if(!is.null(attr(ret, "status")) && attr(ret, "status") != 0) stop("Error in call: ", exe, "\n",ret)
if(verbose) print(ret)
rules <- .parse_rules_LUCS_KDD(ret, formula, trans)
if(verbose) cat(paste("\nRules used:", length(rules), "\n"))
rules
}
### NOTE: MIN_GAIN parameter is not exposed by LUCS-KDD CPAR implmentation. It is set to 0.7
### NOTE: We use the most prevalent class if no rules match!
FOIL2 <- function(formula, data, best_k = 5, disc.method = "mdlp", verbose = FALSE) {
formula <- as.formula(formula)
trans <- prepareTransactions(formula, data, disc.method = disc.method)
parsed_formula <- .parseformula(formula, trans)
rules <- .LUCS_KDD(formula, trans, method = "FOIL", parameter = "", verbose = verbose)
structure(list(
formula = formula,
discretization = attr(trans, "disc_info"),
rules = rules,
default = majorityClass(formula, trans),
method = "weighted",
weights = "laplace",
best_k = best_k,
description = paste0("FOIL-based classifier (Yin and Han, 2003 - LUCS-KDD implementation).")
),
class = "CBA"
)
}
CPAR <- function(formula, data, best_k = 5, disc.method = "mdlp", verbose = FALSE) {
formula <- as.formula(formula)
trans <- prepareTransactions(formula, data, disc.method = disc.method)
parsed_formula <- .parseformula(formula, trans)
rules <- .LUCS_KDD(formula, trans, method = "CPAR", parameter = "", verbose = verbose)
structure(list(
formula = formula,
class = parsed_formula$class_name,
rules = rules,
default = majorityClass(formula, trans),
discretization = attr(trans, "disc_info"),
method = "weighted",
weights = "laplace",
best_k = best_k,
description = paste0("CPAR (Yin and Han, 2003 - LUCS-KDD implementation).")
),
class = "CBA"
)
}
PRM <- function(formula, data, best_k = 5, disc.method = "mdlp", verbose = FALSE) {
formula <- as.formula(formula)
trans <- prepareTransactions(formula, data, disc.method = disc.method)
parsed_formula <- .parseformula(formula, trans)
rules <- .LUCS_KDD(formula, trans, method = "PRM", parameter = "", verbose = verbose)
structure(list(
formula = formula,
discretization = attr(trans, "disc_info"),
rules = rules,
default = majorityClass(formula, trans),
method = "weighted",
weights = "laplace",
best_k = best_k,
description = paste0("PRM (Yin and Han, 2003 - LUCS-KDD implementation).")
),
class = "CBA"
)
}
CMAR <- function(formula, data, support = 0.1, confidence = 0.5, disc.method = "mdlp", verbose = FALSE) {
formula <- as.formula(formula)
trans <- prepareTransactions(formula, data, disc.method = disc.method)
parsed_formula <- .parseformula(formula, trans)
rules <- .LUCS_KDD(formula, trans, method = "CMAR",
parameter = paste0("-S",floor(support*100)," -C", floor(confidence*100)),
verbose = verbose)
# add weighted Chi2 to the rules
quality(rules)$chiSquared <- interestMeasure(rules, "chiSquared", transactions = trans)
supP <- support(lhs(rules), trans, type = "absolute")
supC <- support(rhs(rules), trans, type = "absolute")
n <- length(trans)
e <- 1/(supP*supC) + 1/(supP*(n-supC)) + 1/((n-supP)*supC) + 1/((n-supP)*(n-supC))
maxChiSquared <- (pmin(supP, supC) - supP*supC/n)^2 * n * e
quality(rules)$weightedChiSquared <- quality(rules)$chiSquared^2/maxChiSquared
structure(list(
formula = formula,
discretization = attr(trans, "disc_info"),
parameter = list(support = support, confidence = confidence),
rules = rules,
default = majorityClass(formula, trans),
weights = "weightedChiSquared",
method = "weighted",
description = paste0("CMAR (Li, Han and Pei, 2001 - LUCS-KDD implementation).")
),
class = "CBA"
)
}
|
01544234b19dde79a0fbf1e0eb60f48b7733047e | 5520d916c652e6b1410ff869eebb2669df9d15a2 | /R/testmd.R | 2c90e358b8834561c8af34d14ae50b50309f2f01 | [] | no_license | Vin985/cruiseCompare | c2771139c55b20c4c7555214c3f0ccd34ab6b7c2 | a8603b31a164347993123982a6d54d7b93994f7b | refs/heads/master | 2021-01-13T16:29:28.895129 | 2018-11-30T20:36:00 | 2018-11-30T20:36:00 | 79,853,050 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,390 | r | testmd.R | library(ECSASconnect)
library(GeoAviR)
require(MASS)
require(knitr)
require(rgdal)
library(png)
library(grid)
require(maptools)
library(RColorBrewer)
library(dplyr)
testmd <- function(Observation.df) {
##adapt species names
Observation.df$English <- as.factor(Observation.df$English)
if (length(grep("Genus", levels(Observation.df$English))) >= 1) {
index.genus <- grep("Genus", levels(Observation.df$English))
new.genus.labs <- sapply(1:length(index.genus), function(i) {
paste(strsplit(levels(Observation.df$English)[index.genus[i]], ": ")[[1]][2], "sp.", "")
})
levels(Observation.df$English)[index.genus] <- new.genus.labs
names(Observation.df$English)[index.genus] <- new.genus.labs
}
if (length(grep("Family", levels(Observation.df$English))) >= 1) {
index.family <- grep("Family", levels(Observation.df$English))
new.family.labs <- sapply(1:length(index.family), function(i) {
paste(strsplit(levels(Observation.df$English)[index.family[i]], ": ")[[1]][2], "sp.", "")
})
levels(Observation.df$English)[index.family] <- new.family.labs
names(Observation.df$English)[index.family] <- new.family.labs
}
freq.sp <- table(Observation.df$English)
browser()
###agregation Densities
total_km_watch.df <-
aggregate(WatchLenKm ~ WatchID,
data = Observation.df,
FUN = mean,
na.rm = T)
total_birds_watch.df <-
aggregate(Count ~ WatchID,
data = Observation.df,
FUN = sum,
na.rm = T)
total.df <-
join(total_km_watch.df, total_birds_watch.df, by = "WatchID")
###fill watch without observations with zeroes
total.df$Count[is.na(total.df$Count)] <- 0
total.df$Densities <- total.df$Count / total.df$WatchLenKm
###agregation species
total_birds_sp.df <-
aggregate(Count ~ English,
data = Observation.df,
FUN = sum,
na.rm = T)
#add factor CR
total_birds_sp.df$English <- as.factor(total_birds_sp.df$English)
mean_birds_sp.df <-
aggregate(Count ~ English,
data = Observation.df,
FUN = mean,
na.rm = T)
#add factor CR
mean_birds_sp.df$English <- as.factor(mean_birds_sp.df$English)
sd_birds_sp.df <-
aggregate(Count ~ English,
data = Observation.df,
FUN = sd,
na.rm = T)
#add factor CR
sd_birds_sp.df$English <- as.factor(sd_birds_sp.df$English)
##group size
birds.df <- join(mean_birds_sp.df, sd_birds_sp.df, by = "English")
names(birds.df) <- c("English", "mean", "sd")
birds.df$cv <- birds.df$sd / birds.df$mean
###change name for text
total_birds_sp.df$English <- as.factor(total_birds_sp.df$English)
#drop single observation species
birds.df <- birds.df[!is.na(birds.df$cv), ]
#descriptives
observer <-
paste(strsplit(levels(Observation.df$ObserverID), "_")[[1]][2], strsplit(levels(Observation.df$ObserverID), "_")[[1]][1], sep =
" ")
vessel <- levels(as.factor(Observation.df$PlatformText))
date1 <-
paste(substr(unique(Observation.df$StartDate), 9, 10), month.name[as.numeric(substr(unique(Observation.df$StartDate), 6, 7))], sep =
" ")
date2 <-
paste(substr(unique(Observation.df$EndDate), 9, 10),
month.name[as.numeric(substr(unique(Observation.df$EndDate), 6, 7))],
substr(unique(Observation.df$StartDate), 1, 4),
sep = " ")
table1.df <-
join(data.frame(English = names(freq.sp), Flocks = as.numeric(freq.sp)),
total_birds_sp.df,
by = "English")
table1.df$mean <- round(table1.df$Count / table1.df$Flocks, 1)
names(table1.df)[names(table1.df) == "Count"] <- "Birds"
names(table1.df)[names(table1.df) == "mean"] <- "Mean flock size"
table1.df <- table1.df[order(-table1.df$Birds), ]
total_birds_bin.df <-
aggregate(Count ~ Distance,
data = Observation.df,
FUN = sum,
na.rm = T)
###extract order to test
bin.order <-
paste(order(total_birds_bin.df[, 2] * c(1, 1, 0.5, 0.5), decreasing = TRUE), collapse =
"")
possible.outcome <- c(
paste(c(1, 2, 3, 4), collapse = ""),
paste(c(2, 1, 3, 4), collapse = ""),
paste(c(1, 2, 4, 3), collapse = ""),
paste(c(2, 1, 4, 3), collapse = "")
)
qualifier <-
c("excellent", "average", "good", "mediocre", "unsuitable")
###table observation
Dist_sp.tbl <- table(Observation.df$English, Observation.df$Distance)
#Id species with enough info
keep.sp <- which(apply(Dist_sp.tbl, 1, sum) > 50)
###qualify detection process
bin.sp <-
sapply(1:length(keep.sp), function(i)
paste(order(
Dist_sp.tbl[keep.sp[i], ] * c(1, 1, 0.5, 0.5), decreasing = TRUE
), collapse = ""))
qualify.sp <-
sapply(1:length(keep.sp), function(i)
qualifier[match(bin.sp[i], possible.outcome)[1]])
Observation.df$STR_AREA <- sum(total_km_watch.df$WatchLenKm) * 0.3
browser()
all.dist <- distance.wrap(
Observation.df,
SMP_EFFORT = "WatchLenKm",
DISTANCE = "Distance",
SIZE = "Count",
units = list(
Type = "Line",
Distance = "Perp",
Length_units = "Kilometers",
Distance_units = "Meters",
Area_units = "Square kilometers"
),
breaks = c(0, 50, 100, 200, 300),
STR_LABEL = "STR_LABEL",
STR_AREA = "STR_AREA",
SMP_LABEL = "WatchID",
path = "analysis/temp",
pathMCDS = "tools",
verbose = FALSE
)
all.sp.best <- keep.best.model(all.dist)
model.names <-
c("uniform",
"uniform",
"half-normal",
"half-normal",
"hazard rate",
"hazard rate")
adj.names <-
c("cosinus",
"polynomial",
"cosinus",
"hermite polynomial",
"cosinus",
"polynomial")
mod.selected <-
which.min(sapply(1:6, function(i)
all.dist[[i]]$AIC[3]))
####Extract the probability of detection
table3.df <- all.sp.best$parameter_estimates$Global[, -1]
table3.df[, -1] <- round(table3.df[, -1], 2)
p.line <- which(table3.df$Parameters == "p")
###extract prediction
table4.df <- all.sp.best$density_estimate$Global
table4.df[, -c(1, 2)] <- round(table4.df[, -c(1, 2)], 2)
d.line <- which(table4.df$Parameters == "D")
N.line <- which(table4.df$Parameters == "N")
}
|
42e2d8f359295bd0eecffe492e4cc295cf431925 | 524cb1fd419c5dcb533abffbae47061365794d1d | /percent.R | 702e89e59dd493fab54cd0b70e716ef3b8dbbd20 | [] | no_license | sarajohns19/predictive_analytics | fe24ff5e83a5496a2b99b70ccf20fba1a287e979 | 0e4d4fac793965e62cad1aceec657aa560bf0520 | refs/heads/master | 2021-01-01T18:49:59.371231 | 2017-07-26T15:18:32 | 2017-07-26T15:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 553 | r | percent.R | <!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<title>percent.R</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body>
<style type="text/css">
html, body, #container {
height: 100%;
}
body, #container {
overflow: hidden;
margin: 0;
}
#iframe {
width: 100%;
height: 100%;
border: none;
}
</style>
<div id="container">
<iframe id="iframe" sandbox="allow-scripts" src="/user/halpo/files/percent.R"></iframe>
</div>
</body>
</html> |
988daa40ca7179088c24c8cef7d696d48b9d47aa | a2471def418fc7d02d76a0d54ac14fe8ca094f7e | /Measles_case.R | 6f59189a18b8972400931631c6c96e5b9a6450b3 | [] | no_license | ptcuong199/Data-Visualization | 7aa96d8d5e7dc61349ce4f3d87cad0f579d4e580 | 7039d1c94948300cc0f51dabbba565de4562a495 | refs/heads/master | 2020-05-09T18:29:08.800905 | 2019-04-14T21:34:33 | 2019-04-14T21:34:33 | 181,343,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,942 | r | Measles_case.R | install.packages("readxl")
library(readxl)
graph_case <- function(y) {
# Number of children aged 0-4 died from measles
measles_data <- as.data.frame(read_excel(paste(y, "Measles Cases.xlsx", sep = "_")))
# Measles 1 Coverage - The first dose is generally given to children around 9 to 15 months of age
measles_1 <- read.csv(paste(y, "Measles 1 Coverage.csv", sep = "_"))
measles_1 <- measles_1[,-1]
colnames(measles_1) <- as.numeric(c(1:length(names(measles_1))))
measles_1 <- as.data.frame(t(measles_1))
colnames(measles_1) <- c("Year", "MCV1_Coverage")
measles_1 <- measles_1 %>% filter(Year %in% seq(2017,2000,-1))
# Measles 2 Coverage - The second dose is given to children from 4 to 6 years of age, with at least 4 weeks between the doses.
measles_2 <- read.csv(paste(y, "Measles 2 Coverage.csv", sep = "_"))
measles_2 <- measles_2[,-1]
colnames(measles_2) <- as.numeric(c(1:length(names(measles_2))))
measles_2 <- as.data.frame(t(measles_2))
colnames(measles_2) <- c("Year", "MCV2_Coverage")
# Graph
upper_lim <- max(measles_data$Total)+20
graph <- ggplot() + geom_bar(data = measles_data, aes(Year, Total, fill = "Number of cases"), stat = "identity", alpha = 1.0) +
geom_line(data = measles_1, aes(Year, MCV1_Coverage*(upper_lim/100), col = "First dose"), size = 1.0) +
geom_line(data = measles_2, aes(Year, MCV2_Coverage*(upper_lim/100), col = "Second dose"), size = 1.0) +
scale_y_continuous(limits = c(0, upper_lim), sec.axis = sec_axis(~./(upper_lim/100), name = "Coverage")) +
scale_color_manual(values = c("blue", "red")) +
scale_fill_manual(values = "black") +
labs(fill = "") +
labs(color = "Legends\n\nMeasles containing vaccine") +
ylab("Number of cases") +
ggtitle(paste("A Comparison between the Number of Measles Cases and Measles Immunization Coverage by year", y, sep = " in "))
return(graph)
}
|
684ebb36f56f6e5de65e467a72414b31e35c3e6a | a226f4b4cf54dd0e8164a727d24dca99e79e1354 | /tests/testthat/test_check4SPSS.R | b46165003dcea7acc51c425c87fba4059f566528 | [] | no_license | beckerbenj/eatGADS | 5ef0bdc3ce52b1895aaaf40349cbac4adcaa293a | e16b423bd085f703f5a548c5252da61703bfc9bb | refs/heads/master | 2023-09-04T07:06:12.720324 | 2023-08-25T11:08:48 | 2023-08-25T11:08:48 | 150,725,511 | 0 | 1 | null | 2023-09-12T06:44:54 | 2018-09-28T10:41:21 | R | UTF-8 | R | false | false | 2,348 | r | test_check4SPSS.R |
# load(file = "tests/testthat/helper_data.rda")
load(file = "helper_data.rda")
df7 <- df6 <- df5 <- df4 <- df3 <- df1
test_that("Check length of variable labels", {
df6 <- changeVarNames(df6, oldNames = c("ID1", "V1"),
c(paste(rep("a", 64), collapse = ""),
paste(rep("a", 65), collapse = "")))
out <- check4SPSS(df6)
expect_equal(out$varNames_special, character())
expect_equal(out$varNames_length, paste(rep("a", 65), collapse = ""))
})
test_that("Check length of variable labels", {
df3$labels[1, "varLabel"] <- paste(rep("a", 256), collapse = "")
df3$labels[2, "varLabel"] <- paste(rep("a", 257), collapse = "")
out <- check4SPSS(df3)
expect_equal(out$varLabels, c("V1"))
expect_equal(out$valLabels, character())
expect_equal(out$missings, character())
})
test_that("Check length of value labels", {
df4 <- changeValLabels(df4, "ID1", value = 1, valLabel = paste(rep("a", 121), collapse = ""))
df4 <- changeValLabels(df4, "V1", value = 99, valLabel = paste(rep("3", 125), collapse = ""))
out <- check4SPSS(df4)
expect_equal(out$valLabels, list(ID1 = 1, V1 = 99))
expect_equal(out$varLabels, character())
expect_equal(out$missings, character())
})
test_that("Check length of variable labels with unicode", {
df5$labels[1, "varLabel"] <- paste(paste0("ö", 1:66), collapse = "")
df5$labels[2, "varLabel"] <- paste(paste0("a", 1:88), collapse = "")
out <- check4SPSS(df5)
expect_equal(out$varLabels, character())
df5$labels[1, "varLabel"] <- paste(paste0("ö", 1:67), collapse = "")
df5$labels[2, "varLabel"] <- paste(paste0("a", 1:89), collapse = "")
out <- check4SPSS(df5)
expect_equal(out$varLabels, c("ID1", "V1"))
})
test_that("Check many missing codes", {
df7 <- changeValLabels(df7, "ID1", value = 1:5, valLabel = 1:5)
df7 <- changeValLabels(df7, "V1", value = 1:5, valLabel = 1:5)
df7 <- changeMissings(df7, "ID1", value = 1:5, c("miss", "miss", "valid", "miss", "miss"))
df7 <- changeMissings(df7, "V1", value = 1:5, c("miss", "miss", "valid", "miss", "miss"))
df7$dat$V1 <- as.character(df7$dat$V1)
out <- check4SPSS(df7)
expect_equal(out$valLabels, character())
expect_equal(out$varLabels, character())
expect_equal(out$missings, "V1")
})
|
a06a3db103d9743eea2abce34186bbc3643134e2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sbpiper/examples/plot_repeated_tc.Rd.R | f6b4db9346cc96d3905edfaeede1c85cfa917c89 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 601 | r | plot_repeated_tc.Rd.R | library(sbpiper)
### Name: plot_repeated_tc
### Title: Plot repeated time courses in the same plot separately. First
### column is Time.
### Aliases: plot_repeated_tc
### ** Examples
data(insulin_receptor_1)
data(insulin_receptor_2)
data(insulin_receptor_3)
df <- data.frame(Time=insulin_receptor_1[,1],
X1=insulin_receptor_1[,2],
X2=insulin_receptor_2[,2],
X3=insulin_receptor_3[,2])
plot_repeated_tc(df=df,
xaxis_label="Time [m]", yaxis_label="Level [a.u.]",
alpha=1, yaxis.min=NULL, yaxis.max=NULL)
|
c4f74e4aee020ce1d2532a52f277c688b29a1ccf | 3ec39ea137d1aaa0c7106c1ae49ddf395b5fda20 | /man/standardsMetadata.Rd | 601a48c0e0bc5b83d57cdf2aa109e72295a36fac | [] | no_license | mli1/safetyGraphics | e48c5bee150926f008d6185c764a179d5a3e5a71 | 165651d98c6894646f84884d1c9f7a24336d25f7 | refs/heads/master | 2023-01-22T17:00:10.267847 | 2020-01-16T14:26:14 | 2020-01-16T14:26:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 785 | rd | standardsMetadata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/standardsMetadata.R
\docType{data}
\name{standardsMetadata}
\alias{standardsMetadata}
\title{Standards Metadata}
\format{A data frame with 25 rows and 3 columns
\describe{
\item{text_key}{Text key indicating the setting name. \code{'--'} delimiter indicates a nested setting}
\item{adam}{Settings values for the ADaM standard}
\item{sdtm}{Settings values for the SDTM standard}
}}
\source{
Created for this package
}
\usage{
standardsMetadata
}
\description{
Metadata about the data standards used to configure safetyGraphics charts. One record per unique setting. Columns contain default setting values for clinical data standards, like the CDISC "adam" and "sdtm" standards.
}
\keyword{datasets}
|
30e8f5678a2c8fdeb87fcc6d8b4cb22dc08986f0 | c330fafcccacc36fb40986d81773dd642d29bf1a | /sandbox/test_plot2.R | 15d1d65807a5e73be3930ef916d8855ca6f8c201 | [] | no_license | joshuaulrich/xtsExtra | 6b3153970cb466090661f41501250b184fa70c83 | e6588dd7d7cd77e14117078251f55797e51d4052 | refs/heads/master | 2021-01-19T17:41:38.504608 | 2014-12-26T18:01:34 | 2014-12-26T18:01:34 | 30,535,442 | 14 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,413 | r | test_plot2.R | library(xtsExtra)
library(PerformanceAnalytics)
# library(quantmod)
source("sandbox/paFUN.R")
data(edhec)
R <- edhec[,1:4]
# basic plot with defaults
plot(R)
# assign to a variable and then print it results in a plot
x <- plot(R)
y <- addReturns()
x
class(x)
y
# small multiples, line plot of each column
plot(R, multi.panel=TRUE)
plot(R, multi.panel=TRUE, yaxis.same=FALSE)
layout(matrix(1:2))
plot(R, multi.panel=2, type="h")
layout(matrix(1))
plot(R[,1])
# bar chart of returns
plot(R[,1], type="h")
# bar chart of returns
# NOTE: only plots the first column of returns data
plot(R, type="h")
# small multiples, bar chart of each column
plot(R, multi.panel=TRUE, type="h")
# Replicate charts.PerformanceSummary
plot(R, FUN=CumReturns)
addReturns(type="h")
addDrawdowns()
xtsExtra::addLines(c("1999-01-01", "2000-01-01", "2005-01-01"), c("foo", "bar", "pizza"), on=1:3)
xtsExtra::addLines(c("1999-01-01", "2000-01-01", "2005-01-01"))
plot(R, FUN="CumReturns",
panels=c("addReturns(type='h')", "addDrawdowns()"))
R <- edhec[,1:8]
layout(matrix(1:4, 2, 2))
plot(R, multi.panel=2, FUN="CumReturns",
panels=c("addReturns(type='h')", "addDrawdowns()"))
layout(matrix(1))
# Replicate charts.Performance Summary in a 2x2 layout
# y-axis range here can be deceiving
layout(matrix(1:4, 2, 2))
for(i in 1:ncol(R)){
p <- plot(R[,i], FUN="CumReturns",
panels=c("addReturns(type='h')", "addDrawdowns()"),
name=colnames(R)[i])
print(p)
}
layout(matrix(1))
# layout safe: loop over returns
layout(matrix(1:4, 2, 2))
for(i in 1:4) {plot(plot(R[,i], type="h"))}
layout(matrix(1))
# layout safe: easier to specify multi.panel=1
# NOTE: y-axis matches even with multiple pages (i.e. graphics devices)
layout(matrix(1:4, 2, 2))
plot(R, multi.panel=1, type="h")
layout(matrix(1))
# Rolling performance
plot(R, FUN="CumReturns", geometric=FALSE)
plot(R, FUN="CumReturns", geometric=TRUE, wealth.index=TRUE)
addRollingPerformance()
addRollingPerformance(FUN="StdDev.annualized")
addRollingPerformance(FUN="SharpeRatio.annualized")
x <- xtsExtra:::current.xts_chob()
x$Env$call_list
x$Env$call_list[[1]]
R <- edhec[,1:4]
plot(R, FUN="CumReturns")
plot(R, FUN="CumReturns", lty=1:4)
plot(R, FUN="CumReturns", lty=1:4, lwd=c(3, 1, 1, 1))
plot(R, FUN="CumReturns", lwd=c(3, 2, 2, 2), col=c(1, rep("gray", 3)))
plot(R, yaxis.left=TRUE, yaxis.right=FALSE)
plot(R, grid.ticks.lwd=1, grid.ticks.lty="solid", grid.col="black")
# examples with legend functionality
R <- edhec[,1:10]
foo <- function(x){
CumReturns(R = x)
}
plot(R, FUN=foo)
addLegend(ncol = 4)
addLegend(legend.names = c("foo", "bar"), col = c(1,2), ncol=2)
plot(R, FUN=foo, legend.loc="topleft")
plot(R, FUN=foo, legend.loc="left")
plot(R, FUN=foo, legend.loc="bottomleft")
plot(R, FUN=foo, legend.loc="top")
plot(R, FUN=foo, legend.loc="center")
plot(R, FUN=foo, legend.loc="bottom")
plot(R, FUN=foo, legend.loc="topright")
plot(R, FUN=foo, legend.loc="right")
plot(R, FUN=foo, legend.loc="bottomright")
plot(R, FUN=foo)
addSeries(R[,1])
plot(R, FUN="CumReturns")
addSeries(R[,1], type="h")
plot(R, FUN="CumReturns")
tmp1 <- tmp2 <- R[,1]
tmp1[,1] <- 1.5
tmp2[,1] <- 1
tmp <- CumReturns(R[,1])
tmp3 <- tmp[seq(from=1, to=NROW(R), by=10),]
addSeries(tmp1, on=1)
addSeries(tmp2, on=1, type="p", pch=5)
addSeries(tmp3, on=1, type="p", pch=2)
# stock.str='AAPL'
# initDate="2011-01-01"
# endDate="2012-12-31"
# quantmod::getSymbols(stock.str,from=initDate,to=endDate, src="yahoo")
# plot(Ad(AAPL))
# addSeries(Ad(AAPL)["2012-05-28/"]-10, on=1, col = "red")
# xtsExtra::addLines(c("2011-11-04", "2012-11-10", "2012-05-28"), on=1)
# xtsExtra::addLines(c("2011-03-04", "2012-01-10", "2012-07-28"), on=1)
# xtsExtra::addLines(c("2011-11-04", "2012-11-10", "2012-05-28"))
#
# aapl <- Ad(AAPL)
# plot(aapl)
# aapl["2011-07/2012-07"] <- NA
# plot(aapl)
# png("~/Documents/foo.png")
# plot(R, FUN="CumReturns")
# addDrawdowns()
# dev.off()
##### scratch area #####
# Should we have a theme object, as in quantmod, that sets all of the basic
# parameters such as lty, lwd, las, cex, colorset, element.color, etc?
# chart specification (i.e. the xts chob)
# behaviors
# default (similar to chart.TimeSeries)
# small multiples
# panels
# chart specifications
# - specifications for common charts (e.g. charts.PerformanceSummary)
# http://www.lemnica.com/esotericR/Introducing-Closures/
|
7f2780d96f141d83f5a13f1ecbb1768ada2e2a59 | 7513790d15902b75b9ff193e1c341d13362fdf70 | /SuperFarmerDA/R/gra.R | 87eff88933321f6e1d2b8324bf8c265bced4fa03 | [] | no_license | ambroziakd/KursRprojekt2 | 2ae5f046a16b0c710beb3233ba66ebf41a2230a0 | ca96ab8d9292bdd52c90d2deb16e89ed60225400 | refs/heads/master | 2020-06-21T18:50:26.422000 | 2017-03-17T18:02:39 | 2017-03-17T18:02:39 | 74,774,696 | 0 | 1 | null | 2017-03-17T18:02:39 | 2016-11-25T16:49:44 | HTML | UTF-8 | R | false | false | 901 | r | gra.R | #' Tabela gry SuperFarmer
#'
#' Zbiór danych na którym będą wykonywane strategie gry SuperFarmer.
#' Informacje dotyczące zwierząt są zawarte w wierszach tabeli.
#'
#' Przyjęta kolejność zwierząt w tabeli game:
#' \itemize{
#' \item rabbit
#' \item small_dog
#' \item sheep
#' \item pig
#' \item big_dog
#' \item cow
#' \item horse
#' }
#'
#' Poszczególne kolumny zawierają następujące informacje:
#' \itemize{
#' \item value. Wartość zwierzęcia liczona w królikach.
#' \item max.count. Maksymalnie dopuszczalna liczba zwierząt danego gatunku.
#' \item win.condition. Minimalny stan zwierząt pozwalający ukończyć grę.
#' \item count. Liczba obecnie posiadanych zwierząt.
#' }
#'
#' @docType data
#' @keywords datasets
#' @name game
#' @examples
#' library("SuperFarmerDA")
#' game
#' @format macierz z 7-ma wierszami i 4-ma kolumnami
#'
NULL
|
6c4e47e0a5b70a17add9cbb1d0616337ea571ccf | f80773d94002ece92c87f8205b24be53ce749ba7 | /man/mono.range.Rd | e491d8327f588a049a3bee73eb985ca63dbc2982 | [] | no_license | cran/MonoInc | 4f87e810662f45b837de85052a01462329b2cfc2 | 7bd94ae52f11583924ed85eb47e5ba369f0a2779 | refs/heads/master | 2020-12-24T19:37:01.496073 | 2016-05-20T22:36:53 | 2016-05-20T22:36:53 | 59,325,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,155 | rd | mono.range.Rd | \name{mono.range}
\alias{mono.range}
\title{
Proportion in Range
}
\description{
This function reports the proportion of entries that fall inside of the prespecified range.
}
\usage{
mono.range(data, data.r, tol, xr.col, x.col, y.col)
}
\arguments{
\item{data}{
a data.frame or matrix of measurement data
}
\item{data.r}{
range for y values; must have three columns: 1 - must match values in x.col, 2 - lower range values, 3 - upper range values
}
\item{tol}{
tolerance; how much outside of the range (data.r) is acceptable; same units as data in y.col
}
\item{xr.col}{
column where x values, or time variable is stored in data.r
}
\item{x.col}{
column where x values, or time variable is stored in data
}
\item{y.col}{
column where y values, or measurements are stored in data
}
}
\value{
Returns the proportion of y values that fall inside the prespecified range
}
\author{Michele Josey \href{mailto:mjosey@nccu.edu}{mjosey@nccu.edu}
Melyssa Minto \href{mailto:mminto@nccu.edu}{mminto@nccu.edu}
}
\examples{
data(simulated_data)
data(data.r)
mono.range(simulated_data, data.r, tol=4, xr.col=1 ,x.col=2, y.col=3)
}
\keyword{range}
|
bc05bb6c017e386f69ad89fe8fd53fcf71287a2c | bb220b454f852bc42d12c1a0c42cdf530a524060 | /bin/assoc_supp.R | 6c7d4db1234e6f5c98e893ded769fdede567fd76 | [] | no_license | RyosukeChaya/ExomeChip | 6888bdb215e8d22f2a0984cb9b6bd456e2d6d4c6 | 01b458d30238b741bc8f8e86fcc27988f6ae5c77 | refs/heads/master | 2021-06-04T12:17:37.133205 | 2016-09-03T07:36:06 | 2016-09-03T07:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,422 | r | assoc_supp.R | ####################################################
# By Guillermo Torres PhD.c #
# Institue of Clinical Molecular Biology (IKMB) #
# Christian-Albrechts-Universitat zu Kiel (CAU) #
####################################################
# Last update: 1 March 2015
# Created: 23 February 2015
#
# Supplementary functions.
# This is written as part of ExomeChip analysis,
# but could be splitted to serve different purposes.
####################################################
script.dir <- "/home/torres/Documents/Projects/ExomeChip/bin"
## Defining Covariants gender and stratification PCA eigenvec.##
cov_file <- function(famf,pcaf){
fam <- read.table(famf,header=F,as.is=T) #FamilyID-IndividualID-PaternalID-MaternalID-Sex(1=M,2=F,other)-Phenotype
pca <- read.table(pcaf,header=F,as.is=T)
bx <- fam[,c(1,2,5)]
colnames(bx) <- c("FID","IID","COV1")
cx <- pca[,c(1,2,3,4,5)]
colnames(cx) <- c("IID","COV2","COV3","COV4","COV5")
cov <- merge(bx,cx, by = "IID")
cov <- cov[,c(2,1,3,4,5,6,7)]
return(cov)
}
##
##
snpinfo <- function(bim,snpid,snpgene){
exm <- read.table(bim,header=F,as.is=T)
colnames(exm) <- c("chr","RsID","unk","Pos","Maj","Min")
s <- read.table(snpid,header=T,as.is=T,na.strings = ".")
merged <- merge(s,exm,by="RsID")
sg <- read.table(snpgene,header=T,as.is=T)
all <- merge(merged,sg,by.x="RsID",by.y="refsnp_id")
return(all)
}
|
814848669796281c78fd4deee410cd2671416eb8 | 87472ccc1d880eca3808acdfb3dd34ab6631f53d | /R/writeVCF.R | afed2bd353a781c633b5aabd689821c7bcebe5b8 | [] | no_license | cran/SRMA | 30a99c997f129a0d502d8b78af6db953ebc3d4f2 | 76d894f98b2eccae81e5bb368fac3765d046084a | refs/heads/master | 2016-09-06T22:43:04.099820 | 2012-04-09T00:00:00 | 2012-04-09T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,080 | r | writeVCF.R | writeVCF<-function(srma.out, reference=NULL, target.file=''){
if(class(srma.out)!='SRMAResults') stop('An object of SRMAResults class is required.')
##header
cat('##fileformat=VCFv4.0\n', target.file, append=F, sep='')
cat(paste('##fileDate=', as.character(as.Date(Sys.time()), format='%Y%m%d') , '\n',sep=''), target.file, append=T, sep='')
cat('##source=SRMA\n', target.file, append=T, sep='')
if(!is.null(reference)) cat( paste('##reference=', reference, sep=''), target.file, append=T, sep='')
cat( '##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">\n', target.file, append=T, sep='')
##body
snps<-srma.out@srmaSnps
stopifnot(all((c('Chromosome','Position') %in% colnames(snps))))
if(nrow(snps)==0) break
snps<-snps[!is.na(snps$Chromosome),]
b1<-snps$Chromosome
b2<-snps$Position
b3<-paste(snps$Fragment, snps$FragPos, sep='_')
b4<-toupper(as.character(snps$Ref))
sn<-srma.out@sampleNames
gt.temp<-as.matrix(snps[,sn])
Ref<-b4
alt<-gt<-NULL
for(i in 1:nrow(gt.temp)){
pos.i<-gt.temp[i,]
ref<-Ref[i]
allele1<-ifelse(substr(pos.i,1,1)==ref, 0, 1)
allele1[is.na(allele1)]<-'.'
allele2<-ifelse(substr(pos.i,2,2)==ref, 0, 1)
allele2[is.na(allele2)]<-'.'
gt.i<-paste(allele1, allele2, sep='/')
allele.all<-na.exclude(unique(c(substr(pos.i,1,1),substr(pos.i,2,2) )))
alt.i<-allele.all[allele.all!=ref]
gt<-rbind(gt, gt.i)
alt<-c(alt, alt.i)
}
colnames(gt)<-colnames(gt.temp)
b5<-alt
b6<-b7<-b8<-rep('.', nrow(snps))
b9<-rep('GT', nrow(snps))
mand<-cbind(b1,b2,b3,b4,b5,b6,b7,b8,b9)
colnames(mand)<-c('#CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT')
vcfbody<-cbind(mand, gt)
cat(colnames(vcfbody), file=target.file, append=T, sep='\t')
cat('\n', file=target.file, append=T, sep='')
write.table(vcfbody, file=target.file, append=T, quote=FALSE, sep='\t', na='.', row.names=FALSE, col.names=FALSE)
}
|
f098d55c6b09aa08986e37aa18b10343f6343bf3 | 7a5cfdd8f48b6359e4a358e80e30fdec3ea78d23 | /190924 Chapter 9 Lab - Application to Gene Expression Data.r | 2735e64b4ed9bb5ea8baece3b1a5f5a8ac6c449c | [] | no_license | mare-astrorum/introduction-to-statistical-learning-practicals | 9cfa042a45eb3cf34ebb572df0285f6c9cd2d607 | ce3752f9cc8eb678d435c47dee37c4e39281a801 | refs/heads/master | 2020-09-06T19:07:37.185188 | 2019-11-08T17:42:05 | 2019-11-08T17:42:05 | 220,518,210 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 431 | r | 190924 Chapter 9 Lab - Application to Gene Expression Data.r | library(ISLR)
names(Khan)
?Khan
dim(Khan$xtrain)
dim(Khan$xtest)
length(Khan$ytrain)
length(Khan$ytest)
table(Khan$ytrain)
table(Khan$ytest)
dat = data.frame(x = Khan$xtrain, y = as.factor(Khan$ytrain))
out = svm(y~., data = dat, kernel = "linear", cost = 10)
summary(out)
table(out$fitted, dat$y)
dat.te = data.frame(x = Khan$xtest, y = as.factor(Khan$ytest))
pred.te = predict(out, newdata = dat.te)
table(pred.te, dat.te$y)
|
2667703390da0bae87d92f20982c4deedd255884 | 1fbed3a7d71030c9b02b497b8214f5e90a551444 | /tests/testthat/test_GL.R | 77dd0e71dc138d7e922e083e31f5475abd431a93 | [
"MIT"
] | permissive | honglioslo/fitdistrib | f48530b031b6ecd34c1ea13b1559e1e5b8b8c33d | 77013d26857fa2990a3090bfd1315b10ff3c296d | refs/heads/master | 2021-01-12T13:18:19.314334 | 2016-10-03T10:47:59 | 2016-10-03T10:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,677 | r | test_GL.R | # Testing the GL functions
param <- c(100, 10, 0.1)
tolerance <- 0.5 # 20% tolerance on parameter estimation accuracy
expected_min <- param * (1 - tolerance)
expected_max <- param * (1 + tolerance)
test <- gl_mle(nsRFA::rand.genlogis(1000, param[1],param[2],param[3]))
test_that("gl_mle returns reasonable estimates", {
expect_length(test, 2)
expect_length(test$estimate, 3)
expect_length(test$se, 3)
expect_true(test$estimate[1] < expected_max[1] && test$estimate[1] > expected_min[1])
expect_true(test$estimate[2] < expected_max[2] && test$estimate[2] > expected_min[2])
expect_true(test$estimate[3] < expected_max[3] && test$estimate[3] > expected_min[3])
})
test <- gl_Lmom(nsRFA::rand.genlogis(1000, param[1],param[2],param[3]))
test_that("gl_Lmom returns reasonable estimates", {
expect_length(test, 2)
expect_length(test$estimate, 3)
expect_length(test$se, 3)
expect_true(test$estimate[1] < expected_max[1] && test$estimate[1] > expected_min[1])
expect_true(test$estimate[2] < expected_max[2] && test$estimate[2] > expected_min[2])
expect_true(test$estimate[3] < expected_max[3] && test$estimate[3] > expected_min[3])
})
# test <- gl_mom(nsRFA::rand.genlogis(1000, param[1],param[2],param[3]))
#
# test_that("gl_mom returns reasonable estimates", {
# expect_length(test, 2)
# expect_length(test$estimate, 3)
# expect_length(test$se, 3)
#
# expect_true(test$estimate[1] < expected_max[1] && test$estimate[1] > expected_min[1])
# expect_true(test$estimate[2] < expected_max[2] && test$estimate[2] > expected_min[2])
# expect_true(test$estimate[3] < expected_max[3] && test$estimate[3] > expected_min[3])
# })
|
acd0b8ca8acaaa31ee875ce0385421d47de4b134 | e1b485461c40d593bb22d5d2c0e1b88308f494ec | /R/magrittr.R | 6bec086eb257518075f1287fb0646dfa34a2886d | [
"MIT"
] | permissive | tidyverse/magrittr | 59eee2624f7d59a23d7473504a87ebf250d694b6 | 21093d06e43649a72b865811188e77fc8ad87780 | refs/heads/main | 2023-08-25T01:20:58.243298 | 2023-03-08T13:30:31 | 2023-03-08T13:30:31 | 15,564,525 | 625 | 138 | NOASSERTION | 2022-08-12T07:58:20 | 2014-01-01T13:30:01 | R | UTF-8 | R | false | false | 1,785 | r | magrittr.R | #' magrittr - Ceci n'est pas un pipe
#'
#' The magrittr package offers a set of operators which promote semantics
#' that will improve your code by
#' \itemize{
#' \item structuring sequences of data operations left-to-right
#' (as opposed to from the inside and out),
#' \item avoiding nested function calls,
#' \item minimizing the need for local variables and function definitions, and
#' \item making it easy to add steps anywhere in the sequence of operations.
#' }
#' The operators pipe their left-hand side values forward into expressions that
#' appear on the right-hand side, i.e. one can replace `f(x)` with
#' \code{x \%>\% f}, where \code{\%>\%} is the (main) pipe-operator.
#'
#' Consider the example below. Four operations are performed to
#' arrive at the desired data set, and they are written in a natural order:
#' the same as the order of execution. Also, no temporary variables are needed.
#' If yet another operation is required, it is straight-forward to add to the
#' sequence of operations whereever it may be needed.
#'
#' For a more detailed introduction see the vignette
#' (`vignette("magrittr")`) or the documentation pages for the
#' available operators:\cr
#' \tabular{ll}{
#' \code{\link{\%>\%}} \tab pipe.\cr
#' \code{\link{\%T>\%}} \tab tee pipe.\cr
#' \code{\link{\%<>\%}} \tab assignment pipe.\cr
#' \code{\link{\%$\%}} \tab exposition pipe.\cr
#' }
#'
#' @useDynLib magrittr, .registration = TRUE
#' @examples
#' \dontrun{
#'
#' the_data <-
#' read.csv('/path/to/data/file.csv') %>%
#' subset(variable_a > x) %>%
#' transform(variable_c = variable_a/variable_b) %>%
#' head(100)
#' }
#' @keywords internal
"_PACKAGE"
.onLoad <- function(lib, pkg) {
.Call(magrittr_init, asNamespace("magrittr"))
}
|
5ff62a6603f52706727b6dabdc7e36ef71caf7ec | f2643256c6611d7de0db96d162f594388c2c2c50 | /analyses/TamaraAnalyses.R | c3b3b9af7b9d67672e612b878ab0f96ae82e14d1 | [] | no_license | raubreywhite/trial_dofiles | e06a5b3b39e9195eda79dd33856d67c918ec4053 | eface3b83b107cf7e621b3c654e65b5cbd45b711 | refs/heads/master | 2022-06-14T03:26:17.492945 | 2022-06-02T07:27:04 | 2022-06-02T07:27:04 | 114,857,557 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,675 | r | TamaraAnalyses.R | #RColorBrewer::display.brewer.all()
###### SETUP STARTS
###################
###################
###################
# define our dates
CLINIC_INTERVENTION_DATE <- "2018-09-27"
CLINIC_CONTROL_DATE <- "2018-09-27"
# define the folders
tryCatch({
setwd("X:/data processing/trial_dofiles")
}, error=function(err){
setwd("Z:/data processing/trial_dofiles")
})
FOLDER_DATA_RAW <- file.path(getwd(),"../data_raw")
FOLDER_DATA_CLEAN <- file.path(getwd(),"../data_clean")
FOLDER_DATA_RESULTS <- file.path(getwd(),"../results/")
FOLDER_DATA_MBO <- file.path(getwd(),"../results/mbo_r/")
FOLDER_DROPBOX_RESULTS <- file.path(
"~",
"..",
"eRegistry CRCT Dropbox",
"Data management eRegQual",
"Results_From_PNIPH",
"Results",
lubridate::today())
# say which packages we want, and install if neccessary
# (note, this should really only happen once)
desiredPackages <- c("stringr",
"lubridate",
"data.table",
"bit64",
"readxl",
"openxlsx",
"bit64",
"haven",
"lubridate",
"ggplot2",
"irr",
"rel",
"gridExtra",
"openssl",
"fmsb"
)
for(i in desiredPackages) if(!i %in% rownames(installed.packages())) install.packages(i)
# from net but the above already in R
library(data.table)
library(ggplot2)
# this loads in all the code in the "r_code" folder
# this is the same as going "library(r_code)" (except we cant do that
# because r_code isn't a package)...WE CAN deal it as library but it doesnot
fileSources = file.path("r_code", list.files("r_code", pattern = "*.[rR]$"))
# make sure that all of the file sources go DIRECTLY
# into the **global** environment
sapply(fileSources, source, .GlobalEnv)
# date stuff
MAX_YEAR <- stringr::str_sub(CLINIC_CONTROL_DATE,1,4)
MAX_MONTH <- substr(CLINIC_CONTROL_DATE,6,7)
DATE <- lubridate::today()
DATA_DATE <- min(CLINIC_INTERVENTION_DATE,CLINIC_CONTROL_DATE)
weekyear <- sprintf("%s-%s",lubridate::isoyear(lubridate::today()),lubridate::isoweek(lubridate::today()))
yearmonth <- sprintf("%s-%s",
lubridate::year(lubridate::today()),
lubridate::month(lubridate::today()))
### SETUP ENDS
# Load in datafile
d <- LoadDataFileFromNetwork()
sink()
sink(file.path(FOLDER_DROPBOX_RESULTS,
"mahima",
"trial_1",
"TRIAL_1_Numbers.txt"))
cat("\nNumber of Women in TRIAL\n")
nrow(d[bookdate>="2017-01-15"&
bookdate<="2017-09-15"&
ident_TRIAL_1==T])
cat("\nNumber of Women in ARM A\n")
nrow(d[bookdate>="2017-01-15"&
bookdate<="2017-09-15"&
ident_TRIAL_1==T &
ident_dhis2_control==T])
cat("\nNumber of Women in ARM B\n")
nrow(d[bookdate>="2017-01-15"&
bookdate<="2017-09-15"&
ident_TRIAL_1==T &
ident_dhis2_control==F])
#Mahima's stuff
#Numbers by age categories
cat("\nAge Cat_ALL women in Trial_1\n")
Mabstract <- d[ident_TRIAL_1==TRUE,
.(numWomen=.N),
keyby= agecat]
print(Mabstract)
cat("\nAge Cat_ALL women in Trial_1_ARM A\n")
Mabstract <- d[ident_TRIAL_1==TRUE&
ident_dhis2_control==T,
.(numWomen=.N),
keyby= agecat]
print(Mabstract)
cat("\nAge Cat_ALL women in Trial_1_ARM B\n")
Mabstract <- d[ident_TRIAL_1==TRUE&
ident_dhis2_control==F,
.(numWomen=.N),
keyby= agecat]
print(Mabstract)
|
50ff19bc37d29da62cfe9aa1a6f42107da848173 | 1c8af3d5cfb731fbdb6b14cbb51d337b291520ef | /scr/0.0 Load data and clean.R | a3516c7807d2701b24882f4a6b6bd9f2caa5b9ec | [
"LicenseRef-scancode-public-domain"
] | permissive | rjcommand/FISH_6002_major_assignment | d117dc49806c0d96f268320f4f865871b59af98e | d6cebbe8a65dd0ff4a239d8a17da2b59a9e8afdf | refs/heads/master | 2020-07-24T08:06:16.419817 | 2019-12-13T00:07:58 | 2019-12-13T00:07:58 | 207,858,318 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,016 | r | 0.0 Load data and clean.R | #### Tuna catch data
#### Rylan J Command
#### Created Spet 24 2019
## 0.0 Load in data
catch <- read.csv("data/cdis5016-all9sp.csv", header = TRUE)
View(catch)
## Variables list
# SpeciesCode: Three letter code corresponding to each species
# YearC: Calendar year
# Decade: Years collated into decades (1950, 1960... etc.)
# FlagName: ICCAT Flag name
# FleetCode: ICCAT Fleet code
# Stock: Species related stock or management unit
# GearGrp: Gear types used to capture tunas
# SchoolType: Type of fishing operation
# Trimester: Time strata (trimester 1, 2, 3, 4)
# QuadID: ICCAT Quadrants
# Lat5: Latitude of a 5x5 square
# Lon5: Longitude of a 5x5 square
# yLat5ctoid: Latitude (decimal degrees) centroid (Cartesian) of a 5x5 square
# xLon5ctoid: Longitude (decimal degrees) centroid (Cartesian) of a 5x5 square
# Catch_t: Nominal catches (tonnes)
## Load in packages
library(ggplot2)
library(tidyr)
library(dplyr)
library(tidyselect)
#### 0.1 Check to see if the data are clean ####
#### 0.1.1 Did the data load correctly? ####
head(catch)
tail(catch)
str(catch)
## Looks good
#### 0.1.2 Are the data types correct? ####
sapply(catch, class)
## Looks good
#### 0.1.3 Check for impossible values ####
## Go through all of the numerical values to check if they are impossible
sapply(catch[, sapply(catch, is.numeric)], range) # Ranges look good!
# I am going to rename "Catch_t" as "Catch", because we know it's in tons, and having an underscore in the name might be problamatic later
catch <- catch %>%
rename(Catch = Catch_t)
## Plot each numerical value to visually assess
## These take a while to load so I commented them out - less painful if you run the whole thing at once
# plot(catch$Year)
# plot(catch$Decade)
# plot(catch$Trimester)
# plot(catch$QuadID)
# plot(catch$Lat5)
# plot(catch$Lon5)
# plot(catch$yLat5ctoid)
# plot(catch$xLon5ctoid)
# plot(catch$Catch)
## All look good!
#### 0.1.4 Are factor levels correct? ####
## Go through all factors and make sure levels are correct
sapply(catch[,sapply(catch, is.factor)], levels)
# 1. In in the FlagName factor, there are some special characters - et's recode these so they print properly.
# 2. In the GearGrp factor, let's rename "oth" to "OTHER" so it looks a bit nicer.
# 3. In the SchoolType factor, there is a defined "n/a" value. This caught my attention, and may cause problems if variable names have "/" in it, so let's re-define it to "None"
# Let's fix it up
catch <- catch %>%
mutate(FlagName = recode(FlagName, # First address the special characters in FlagName
"C\xf4te d'Ivoire" = "Côte d'Ivoire",
"Cura\xe7ao" = "Curaçao",
"EU.Espa\xf1a" = "EU.España",
"Guin\xe9e Rep." = "Guinée Rep.",
"S. Tom\xe9 e Pr\xedncipe" = "São Tomé and Príncipe",
),
GearGrp = recode(GearGrp, # Then rename the "oth" level in GearGrp
"oth" = "OT"),
SchoolType = recode(SchoolType,
"n/a" = "None")
)
# Check it out:
sapply(catch[, sapply(catch, is.factor)], levels)
# Much better!
# Everything else looks good, so let's move on
#### 0.2 Subset North Atlantic swordfish ####
## I want to look specifically at North Atlantic swordfish, let's clean the dataset
catch_swo <- catch %>%
filter(SpeciesCode == "SWO", # Select only those rows that contain swordfish observations
Stock == "ATN") %>% # Select only those rows that concern the North Atlantic stock
select(-SpeciesCode,
-Stock) # %>% # Remove SpeciesCode and Stock as a factor, since we're only looking at North Atlantic swordfish now
# group_by(YearC, FlagName, FleetCode, GearGrp) # Re-order the rows
## Check it out
View(catch_swo)
#### 0.3 Create a wide-format dataset ####
## I want to spread by FlagName, FleetCode, GearGrp, and SchoolType to look at how much swordfish was caught by each fleet/country using each gear type on what school type
w <- catch_swo %>%
unite(temp, FlagName, FleetCode, GearGrp, SchoolType) %>% # Unite the FlagName, FleetCode, GearGrp, and SchoolType columns into one column "temp", separated by "_"
mutate(grouped_id = row_number()) %>% # Add a column to give each row a unique identifyer; need to do this because multiple rows have the same "keys" (e.g. there are several Canadian Longline catches, just at different locations)
spread(temp, Catch) %>% # Spread the united column "temp" by the values of "Catch" to get a column of catch in tons for each Country_Fleet_Gear_School combination
select(-grouped_id) # Remove the unique identifyer column
## Check it out
View(w)
## Save the .csv file to the data/ directory
write.csv(w, "./outputs/catch_swo_wideformat.csv")
#### 0.4 Create a long-format dataset ####
## Basically, get it back to how it was
l <- w %>%
mutate(grouped_id = row_number()) %>% # Add a unique identifyer for each row
gather(temp, Catch, Barbados_BRB_LL_None:Venezuela_VEN.ARTISANAL_GN_None) %>% # Gather all of the Country_Fleet_Gear_School into one column, "temp", and their catches into another "Catch" column
group_by(temp) %>%
separate(col = temp, into = c("FlagName", "FleetCode", "GearGrp", "SchoolType"), sep = "_", extra = "drop", fill = "right") %>% # Separate the single "temp" column into 4 columns, one for each factor, indicating they were separated by underscores
filter(!is.na(Catch)) %>% # Remove all rows that produce NA for "Catch" (e.g. Barbados with CAN fleet code doesn't exist), as there weren't any in the original dataset
select(YearC, Decade, FlagName, FleetCode, GearGrp, SchoolType, Trimester, QuadID, Lat5, Lon5, yLat5ctoid, xLon5ctoid, Catch) %>% # Re-order the columns
mutate_if(is.character, as.factor) # Convert the factors back into factors
## Check it out
View(l)
## Save the .csv file to the data/directory
write.csv(l, "./outputs/catch_swo_longformat.csv")
|
d241fc2a96343cb04408524530db8a28c7ce3fc6 | 571960b1277327568b646b594d872e883c5d4cca | /man/pow.curve.Rd | 8f7148afb32eec924b7f4c5811eef86cda8c0ff7 | [] | no_license | CClingain/powermlm | 72071ee75ffdd28419842b0bd67e95ac672d2b08 | 2b0ccb2e236ce0852fa7df7daa2496f2f1902f03 | refs/heads/master | 2020-04-21T14:40:35.272329 | 2019-02-07T21:11:07 | 2019-02-07T21:11:07 | 169,643,025 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,008 | rd | pow.curve.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pow.curve.R
\name{pow.curve}
\alias{pow.curve}
\title{Power Curve with Varying Alphas}
\usage{
boot.power(model, n, id, group1, group2, data)
}
\arguments{
\item{model}{vector containing the lmer formula}
\item{n}{number of bootstrap samples from which to run the model}
\item{id}{character of level 1 grouping variable/identifier}
\item{group1}{character of level 2 grouping variable (ex: classrooms)}
\item{group2}{character of level 3 grouping variable (ex: schools), default is FALSE in case of only 2 levels of nesting.}
\item{data}{dataset from which to bootstrap}
}
\value{
Returns power curves for all fixed parameters specified in the model.
}
\description{
Obtain a Power Curve based on bootstrapping with various alpha cut-offs. Displays the relation between power and Type I error rate.
}
\examples{
boot.power(model = Y ~ X + (1|schoolid), n = 1000, id = "ID", group1 = "schoolid", group2 = FALSE, data = dat)
}
|
c6efac2e94d6f6b28696d7c16b9e365415494b70 | 2d00505c7940f1bd1dcff122aa1e8bbd5d2edea2 | /R/signature.R | 6c37ee2fd0c564d072da512da4b78b28f3dfb7be | [] | no_license | kashenfelter/RGCCTranslationUnit | cb647e6c57e78656bb196e68834d60fd664e66cd | 1bd45f5589516334afa57a75e936d2a36ff943b6 | refs/heads/master | 2020-03-23T00:32:22.815755 | 2013-04-21T21:38:45 | 2013-04-21T21:38:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,310 | r | signature.R | #
# The intent is to avoid an enormous amount of resolveType() calls to get the entire information
# about the types involved method dispatch. Instead, we just get the type names for use in the S4
# signatures. We will assume that the classes are going to be defined separately and relatively cheaply.
#
setGeneric("getSignature",
function(method, nodes= getTUParser(method$node), typeMap = list(), ...)
standardGeneric("getSignature"))
setOldClass("NativeClassMethod")
setOldClass(c("NativeOperatorClassMethod", "NativeClassMethod"))
setOldClass(c("NativeClassConstructor", "NativeClassMethod"))
setOldClass("ShortNativeSignature")
setOldClass(c("NativeSignature", "ShortNativeSignature"))
setOldClass(c("NativeMethodSignature", "NativeSignature"))
setAs("NativeSignature", "character",
function(from) {
paste("(", paste(from$types, names(from$types), collapse = ", "), ")")
})
setMethod("getSignature", "NativeClassMethod",
function(method, nodes = getTUParser(method$node), typeMap = list(), paramNamesOnly = FALSE, ...)
{
types = sapply(method$parameters,
function(x, typeMap) {
type = x[["type"]]
quickResolveType(type, nodes)
}, typeMap)
names(types) = names(method$parameters)
if(paramNamesOnly)
return(structure(types, class = "ShortNativeSignature"))
# Need to know if a parameter has a default value when determining the R method signatures.
defaults = !sapply(method$parameters, function(x) is.na(x$defaultValue))
structure(list(types = types, hasDefaults = defaults), class = c("NativeMethodSignature", "NativeSignature"))
})
setMethod("getSignature", "UnresolvedClassMethods",
function(method, nodes, typeMap = list(), ...) {
lapply(method, function(x) if(!is.null(x)) getSignature(x, nodes, typeMap, ...))
})
setMethod("getSignature", "NULL",
function(method, nodes, typeMap = list(), ...) {
return(list())
})
############################
setGeneric("quickResolveType",
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
standardGeneric("quickResolveType")
# ans = standardGeneric("quickResolveType")
# if(target == "R") # && !is.character(ans))#XXX fix this.
# getRTypeName(ans)
# else
# ans
})
setMethod("quickResolveType",
c("ANY"),
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
resolveType(node, nodes, classDefs, ...)
})
setMethod("quickResolveType",
c("GCC::Node::record_type"),
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
`c++class` = isCPlusPlus(nodes) && !("C" %in% names(node[["name"]][["lang"]]))
val = getNodeName(node, raw = FALSE)
if(`c++class`)
I(val)
else
val
})
setMethod("quickResolveType",
c("GCC::Node::reference_type"),
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
quickResolveType(node[["refd"]], nodes, classDefs, ...)
})
setMethod("quickResolveType",
"GCC::Node::enumeral_type",
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
getNodeName(node)
})
setMethod("quickResolveType",
"GCC::Node::integer_type",
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
#XXX Deal with the different types/names unsigned, char, ... Should be done in getNodeName - yes!
ans = getNodeName(node)
if(target == "R")
getRTypeName(ans)
else
ans
})
setMethod("quickResolveType",
"GCC::Node::real_type",
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
ans = getNodeName(node)
if(target == "R")
getRTypeName(ans)
else
ans
})
setMethod("quickResolveType",
c("GCC::Node::pointer_type"),
function(node, nodes = getTUParser(node), classDefs = DefinitionContainer(nodes), target = "R", ...) {
type = node[["ptd"]]
depth = 1
while(inherits(type, "GCC::Node::pointer_type")) {
depth <- depth + 1
type = type[["ptd"]]
}
typeName = quickResolveType(type, nodes, classDefs, target, ...)
if(typeName == "character" && depth == 1)
return("character")
if(!inherits(typeName, "AsIs"))
paste(typeName, paste(rep("Ptr", depth), collapse = ""), sep = "")
else
unclass(typeName)
})
#setAs("NativeRoutineDescription", "character",
makeMethodsSig.un =
function(from, tu)
{
paste("(", paste(sapply(from$parameters, function(x) quickResolveType(x$type, tu, target = "R")), collapse = ", "), ")")
}
|
ca455e0c592ca9b427129d25d5636158c3c4b585 | 529cb1a83abc345517388cdd9d3db893ca159a17 | /run_analysis.R | 16f4b5d5e614c5ded1ae43d92b4947a9c3a4258e | [] | no_license | chucareer/DataCleaning | 1e479042a8a8d651643d934794459dc2c5e45b4e | a086d1dc75e71979321969350e09578d0c4e2894 | refs/heads/master | 2021-01-10T19:55:16.349076 | 2014-07-11T19:13:49 | 2014-07-11T19:13:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,963 | r | run_analysis.R | ## Create one R script called run_analysis.R that does the following:
## 1. Merges the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive activity names.
## 5. Creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##Feature dataset
feature <- read.table("features.txt", stringsAsFactor = F)
activityLabels <- read.table("activity_labels.txt", stringsAsFactor = F)
## Testing dataset
setwd("test")
test <- cbind(read.table("subject_test.txt", col.names = "Subject"),
read.table("x_test.txt", col.names = c(feature[,2])),
read.table("y_test.txt", col.names = "Activity"))
## Train Dataset
setwd("..")
setwd("train")
train <- cbind(read.table("subject_train.txt", col.names = "Subject"),
read.table("x_train.txt", col.names = c(feature[,2])),
read.table("y_train.txt", col.names = "Activity"))
##Stack testing dataset and train dataset
mergedFile <- rbind(test, train)
##Select the variables with mean and std
finalData <- mergedFile[, grep("mean|std|subject|activity", tolower(names(mergedFile)))]
##Clean the variable name
nameList <- gsub("\\.{2}|\\.{3}", ".", tolower(names(finalData)))
nameList <- sub("\\.$", "", nameList)
names(finalData) <- nameList
##Create the tidy dataset
tidyData <- aggregate(finalData[, c(2:87)],
by = list(finalData$subject, finalData$activity), FUN = "mean")
tidyData <- merge(tidyData, activityLabels, by.x = "Group.2", by.y = "V1")
names(tidyData)[c(1,2,89)] <- c("activity", "subject", "activityLabel")
##Write tidy data
setwd("..")
if(!file.exists("./tidyData")) {dir.create("./tidyData")}
setwd("tidyData")
write.csv(tidyData, file = "tidyData.txt")
|
18ef8e24362d59cf34752387f684f2e3c87ce7ab | baaf7d6c4636acce3b675be5384753afaf12cebc | /functions/visualization_functions.R | cb0a66c772072f05b1e77bd0565bf50d237a1c53 | [] | no_license | r3fang/am_geneBasis | ce44a77cc1f2efced8f52d82f8da4e11856a1155 | 362e2c54229ba04d28fd0e7025eaa37acfa0895c | refs/heads/main | 2023-08-25T10:33:46.356344 | 2021-10-18T14:20:07 | 2021-10-18T14:20:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,248 | r | visualization_functions.R | celltype_atlas_E8.5_colors = c("Epiblast" = "#635547", "Primitive Streak" = "#DABE99", "Caudal epiblast" = "#9e6762", "PGC" = "#FACB12",
"Anterior Primitive Streak" = "#c19f70", "Notochord" = "#0F4A9C", "Def. endoderm" = "#F397C0", "Gut" = "#EF5A9D",
"Nascent mesoderm" = "#C594BF", "Mixed mesoderm" = "#DFCDE4", "Intermediate mesoderm" = "#139992", "Caudal Mesoderm" = "#3F84AA",
"Paraxial mesoderm" = "#8DB5CE", "Somitic mesoderm" = "#005579", "Pharyngeal mesoderm" = "#C9EBFB", "Cardiomyocytes" = "#B51D8D",
"Allantois" = "#532C8A", "ExE mesoderm" = "#8870ad", "Mesenchyme" = "#cc7818", "Haematoendothelial progenitors" = "#FBBE92",
"Endothelium" = "#ff891c", "Blood progenitors 1" = "#f9decf", "Blood progenitors 2" = "#c9a997", "Erythroid1" = "#C72228",
"Erythroid2" = "#f79083", "Erythroid3" = "#EF4E22", "NMP" = "#8EC792", "Rostral neurectoderm" = "#65A83E",
"Caudal neurectoderm" = "#354E23", "Neural crest" = "#C3C388", "Forebrain/Midbrain/Hindbrain" = "#647a4f", "Spinal cord" = "#CDE088",
"Surface ectoderm" = "#f7f79e", "Visceral endoderm" = "#F6BFCB", "ExE endoderm" = "#7F6874", "ExE ectoderm" = "#989898",
"Parietal endoderm" = "#1A1A1A", "Doublet" = "black", "Stripped" = "black"
)
celltype_spleen_colors = c("Monocyte" = "#635547", "Plasmablast" = "#DABE99","Follicular B" = "#532C8A",
"MZ B" = "#FACB12","DC2" = "#0F4A9C","Plasma cell" = "#005579","HSC" = "#EF5A9D",
"DC1" = "#8870ad", "Ery" = "#EF4E22","memory CD4+ ab T" = "#CDE088", "naive CD4+ ab T" = "gray84",
"CD4+ T" = "#CDE088","memory CD8+ ab T" = "plum1",
"CD8+ T" = "plum1", "FCGR3A+ NK" = "#1A1A1A", "FCGR3A- NK" = "chartreuse4", "NK" = "chartreuse4",
"ILC" = "aquamarine", "gd T" = "#ff891c", "EC" = "#B51D8D", "Mac" = "#FBBE92",
"cytotoxic CD8+ ab T" = "#F397C0","Fibroblast" = "#139992", "Unknown" = "#1A1A1A", "Dividing T" = "azure4"
)
celltype_melanoma_colors = c("other" = "#635547", "Endo." = "#ff891c", "Macro." = "#005579", "B" = "#647a4f", "T" = "aquamarine",
"CAF" = "#EF4E22" , "NK" = "#C9EBFB")
celltype_colors = list("atlas_E8.5" = celltype_atlas_E8.5_colors ,
"spleen" = celltype_spleen_colors ,
"melanoma" = celltype_melanoma_colors)
celltype_kidney_colors = c("PT" = "#635547",
"tIC-CNT" = "#DABE99",
"AVR" = "#9e6762",
"PC" = "#FACB12",
"DVR" = "#c19f70",
"mDC" = "#0F4A9C",
"Cycling" = "#F397C0",
"mTAL" = "#EF5A9D",
"aIC" = "#C594BF",
"CD8 T" = "#DFCDE4",
"NKT" = "#139992",
"CD4 T" = "#3F84AA",
"PT_VCAM1" = "#8DB5CE",
"TAL_unk"= "#005579",
"Podo"= "#C9EBFB",
"NK"= "#B51D8D",
"gEC"= "#532C8A",
"Mac/Mono"= "#8870ad",
"CNT" = "#cc7818",
"DTL" = "#FBBE92",
"B cell" = "#ff891c",
"cTAL"= "#f9decf",
"Fib"= "#c9a997",
"bIC" = "#C72228",
"DCT"= "#f79083",
"ATL"= "#EF4E22",
"Myofib"= "#8EC792",
"Mmrn1 EC" = "#65A83E",
"Msg" = "#354E23"
)
plot_expr_distribution = function(sce , gene , assay = "logcounts" , title = gene){
if (!gene %in% rownames(sce)){
stop("Can not find gene in the counts matrix. Ensure that given entry exists.")
}
if (!assay %in% c("counts" , "logcounts")){
stop("Option 'assay' have to be either 'counts' or 'logcounts'.")
}
if (!assay %in% names(assays(sce))){
stop("Chosen assay option does not exist in counts matrix.")
}
counts = data.frame(cell = sce$cell ,
celltype = sce$celltype ,
counts = as.numeric( assay(sce[gene, ], assay)) )
p <- ggplot(data=counts , aes(x = celltype , y = counts , fill = celltype)) +
geom_boxplot() +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
theme(legend.position = "none") +
ggtitle(title)
return(p)
}
plot_expression_heatmap = function(sce , genes , assay = "logcounts" , title = length(genes)){
if (!assay %in% names(assays(sce))){
stop("Chosen assay option does not exist in counts matrix.")
}
sce = sce[rownames(sce) %in% genes , ]
stat = lapply(unique(sce$celltype) , function(celltype){
current.sce = sce[, sce$celltype == celltype]
current.counts = as.matrix( assay(current.sce, assay))
current.stat = data.frame(gene = rownames(sce) , mean.counts = apply(current.counts , 1 , mean))
current.stat$celltype = celltype
return(current.stat)
})
stat = do.call(rbind , stat)
p <- ggplot(data=stat , aes(x = celltype , y = gene , fill = mean.counts)) +
geom_tile() +
scale_fill_viridis(discrete = F) +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
theme(legend.position = "none") +
ggtitle(title)
return(p)
}
plot_mapping_heatmap = function(mapping , title = NULL){
if (!.valid_mapping_df(mapping)) {
stop()
} else {
if (!is.null(title) & !is(title, "character")){
stop("Option 'title' should be either NULL or a string.")
} else {
mapping$celltype = as.character(mapping$celltype)
mapping$celltype_mapped = as.character(mapping$celltype_mapped)
tab = table(mapping$celltype , mapping$celltype_mapped)
tab = sweep(tab, 1, rowSums(tab), "/")
tab = as.data.frame( tab )
colnames(tab) = c("celltype", "celltype_mapped", "n")
tab$celltype = factor(tab$celltype , levels = unique(mapping$celltype))
tab$celltype_mapped = factor(tab$celltype_mapped , levels = c(unique(mapping$celltype)))
tab = tab[!is.na(tab$celltype) , ]
p <- ggplot(tab, aes(x = celltype , y = celltype_mapped, fill = n)) +
geom_tile() + viridis::scale_fill_viridis(discrete = F) +
theme_classic() +
theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) +
ggtitle(title)
return(p)
}
}
}
plot_mapping_sensitivity_trend = function(mappings , title = NULL){
if (!is(mappings , "list")){
stop("Input should be a list")
} else {
stat = lapply(1:length(mappings), function(i){
mapping = mappings[[i]]
if (!.valid_mapping_df(mapping)) {
stop()
} else {
current_stat = get_sensitivity_mapping(mapping)
if (!is.null(names(mappings))){
current_stat$id = names(mappings)[i]
} else {
current_stat$id = i
}
return(current_stat)
}
})
stat = do.call(rbind, stat)
if (!is.null(names(mappings))){
stat$id = factor(stat$id , levels = names(mappings))
}
pal = wes_palette("Zissou1" , length(mappings) , type = "continuous")
p = ggplot(stat, aes( x = id, y = frac_mapped_correctly , col = id)) +
geom_point(size=1) +
facet_wrap(~celltype) +
scale_color_manual(values = pal) +
geom_hline(yintercept = 1, linetype = "dashed") +
theme_classic() + theme( axis.text.x = element_blank() ) +
ggtitle(title)
return(p)
}
}
get_umaps = function(sce , genes){
plots = lapply(genes , function(gene){
counts = data.frame(cell = colnames(sce) , counts = as.numeric(logcounts(sce)[gene , ]))
current.meta = merge(meta , counts)
current.meta = current.meta[order(current.meta$counts) , ]
p <- ggplot(current.meta , aes(x = x , y = y , col = counts)) +
geom_point() +
scale_color_gradient(low = "azure3" , high = "darkgreen") +
theme(legend.position="none") +
ggtitle(gene)
return(p)
})
p <- ggarrange(plotlist = plots)
return(p)
}
load_embryo_8.5 = function(dir = "local"){
require(SingleCellExperiment)
require(scater)
if (dir == "cluster") {
data.dir = "/nfs/research1/marioni/alsu/spatial/mouse_embryo/data/8_5/source/"
} else {
data.dir = "/Users/alsu/Develop/spatial/mouse_embryo/data/8_5/source/"
}
sce = readRDS( paste0( data.dir , "E8.5_sce_filt_unlabelled.Rds"))
# add normalization by libsize
assay(sce, "cpm") <- logcounts(scater::logNormCounts(sce, size_factors = sce$total))
assay(sce, "cpm_wo_xist") <- logcounts(scater::logNormCounts(sce, size_factors = as.numeric( sce$total - counts( sce["Xist"] )) ))
meta = colData(sce)
meta = data.frame(meta)
# rename Cavin3 --> Prkcdbp
rownames.sce = rownames(sce)
rownames.sce[rownames.sce == "Cavin3"] = "Prkcdbp"
rownames(sce) = rownames.sce
sce = sce[, sce$embryo == "embryo1" & sce$z == 2]
return(sce)
#assign("sce", sce, envir = .GlobalEnv)
#assign("meta", meta, envir = .GlobalEnv)
#invisible(0)
}
getSegmentationVerticesDF = function(DF,
xname = "segmentation_vertices_x_global",
yname = "segmentation_vertices_y_global",
othercols = c("uniqueID","z")) {
# DF is a DataFrame object
# othercols is the others to keep
long_x = unlist(DF[,xname])
long_y = unlist(DF[,yname])
if (length(long_x) != length(long_y)) stop("x and y need to be same length")
long_xy = data.frame(
long_x,
long_y
)
colnames(long_xy) <- c(xname, yname)
long_DF = cbind(
rep(DF[,othercols], times = unlist(lapply(DF[,xname], length))),
long_xy
)
return(as.data.frame(long_DF))
}
|
bb35ff5015bf4b1903fc1d67efed0f5a7a31aa1c | 67de61805dd839979d8226e17d1316c821f9b1b4 | /R/MxCommunication.R | 60c367597926c94d8c3fb80883c6618a735cd639 | [
"Apache-2.0"
] | permissive | falkcarl/OpenMx | f22ac3e387f6e024eae77b73341e222d532d0794 | ee2940012403fd94258de3ec8bfc8718d3312c20 | refs/heads/master | 2021-01-14T13:39:31.630260 | 2016-01-17T03:08:46 | 2016-01-17T03:08:46 | 49,652,924 | 1 | 0 | null | 2016-01-14T14:41:06 | 2016-01-14T14:41:05 | null | UTF-8 | R | false | false | 2,556 | r | MxCommunication.R | #
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
generateCommunicationList <- function(model, checkpoint, useSocket, options) {
if (mxOption(model,'Always Checkpoint') == "Yes") {
checkpoint <- TRUE
}
if (!checkpoint) return(list())
retval <- list()
if (checkpoint) {
chkpt.directory <- mxOption(model, 'Checkpoint Directory')
chkpt.directory <- removeTrailingSeparator(chkpt.directory)
chkpt.prefix <- mxOption(model, 'Checkpoint Prefix')
chkpt.units <- mxOption(model, 'Checkpoint Units')
chkpt.count <- mxOption(model, 'Checkpoint Count')
chkpt.count <- mxOption(model, 'Checkpoint Count')
if (length(chkpt.count) == 2) {
chkpt.count <- chkpt.count[[chkpt.units]]
}
if (is.null(chkpt.count)) chkpt.count <- .Machine$integer.max
if (!is.numeric(chkpt.count) || chkpt.count < 0) {
stop(paste("'Checkpoint Count' model option",
"must be a non-negative value in",
deparse(width.cutoff = 400L, sys.call(-1))), call. = FALSE)
}
if (!(is.character(chkpt.prefix) && length(chkpt.prefix) == 1)) {
stop(paste("'Checkpoint Prefix' model option",
"must be a string in",
deparse(width.cutoff = 400L, sys.call(-1))), call. = FALSE)
}
if (!(is.character(chkpt.directory) && length(chkpt.directory) == 1)) {
stop(paste("'Checkpoint Directory' model option",
"must be a string in",
deparse(width.cutoff = 400L, sys.call(-1))), call. = FALSE)
}
if (!(is.character(chkpt.units) && length(chkpt.units) == 1)) {
stop(paste("'Checkpoint Units' model option",
"must be a string in",
deparse(width.cutoff = 400L, sys.call(-1))), call. = FALSE)
}
filename <- paste(chkpt.prefix, paste(model$name, 'omx', sep = '.'), sep = '')
fullpath <- paste(chkpt.directory, filename, sep="/")
override <- mxOption(model, "Checkpoint Fullpath")
if (nchar(override)) {
fullpath <- override
}
description <- list(0L, fullpath, chkpt.units, chkpt.count)
retval[[length(retval) + 1]] <- description
}
return(retval)
}
|
fb9ce6a16b90bf8da4becd0b80cd51b840d19ce0 | 02f24f0d8eaed72b4cb8b4ceb98d731eef1ee5c8 | /man/expr.Rd | 14248298de00e7d401788f46cde3387d1d81b3d0 | [
"MIT"
] | permissive | bradleycolquitt/phyloRNA | 78c24ea7d0e6f2df6f6a6e7a31702694518a837a | d7dc3bf1daace8fe884558b14ae735051d5e6ff0 | refs/heads/master | 2023-07-23T06:55:16.094078 | 2021-08-31T23:14:46 | 2021-08-31T23:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,956 | rd | expr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/expression.r
\name{expr}
\alias{expr}
\alias{expr_read10x}
\alias{expr_read10xh5}
\alias{expr_normalize}
\alias{expr_scale}
\alias{expr_zero_to_na}
\alias{expr_quality_filter}
\alias{expr_merge}
\alias{expr_discretize}
\title{Functions for manipulation with the expression data}
\usage{
expr_read10x(
dir,
gene_column = 2,
unique_features = TRUE,
strip_suffix = FALSE
)
expr_read10xh5(input, use_names = TRUE, unique_features = TRUE)
expr_normalize(data, scale_factor = 10000)
expr_scale(data)
expr_zero_to_na(data)
expr_quality_filter(data, minUMI = 500, minGene = 250, trim = TRUE)
expr_merge(datasets, names = NULL)
expr_discretize(data, intervals, unknown = "N")
}
\arguments{
\item{dir}{a directory with barcodes, features and sparse matrix}
\item{gene_column}{\strong{optional} the position of column with gene/feature names}
\item{unique_features}{\strong{optional} gene/feature names will be made unique to prevent possible
name conflict}
\item{strip_suffix}{\strong{optional} the \code{-1} suffix which is common for 10X barcodes}
\item{input}{an input data in the \code{.h5} format}
\item{use_names}{\strong{optional} use gene names instead of gene IDs}
\item{data}{an expression matrix}
\item{scale_factor}{\strong{optional} a scaling factor}
\item{minUMI}{minimum of UMI (unique molecules) per cell}
\item{minGene}{minimum represented genes/features per cell}
\item{trim}{\strong{optional} trim empty genes after filtering}
\item{datasets}{list of datasets to be merged}
\item{names}{\strong{optional} list of suffixes used to distinguish individual datasets}
\item{intervals}{an interval vector describing interval borders, i.e., interval c(-1, 1)
would describe half-open intervals: [-Inf -1), [-1, 1) and [1, Inf).}
\item{unknown}{\strong{optional} a character that represents unknown character}
}
\value{
sparse matrix
a list of sparse matrices
log-normalized matrix
rescaled and centered data
a dense matrix with \code{NA} instead of zeros
filtered matrix
merged datasets
descritized matrix
}
\description{
A group of functions, often lifted and modified from the Seurat package for manipulation with the 10X scRNAseq data.
}
\details{
The Seurat package is a great tool for manipulation with the 10X scRNAseq expression data.
However, it has two major issues. The first one is that it assumes that the zero expression
is true zero. While this is reasonable assumption with a high coverage, small coverage scRNAseq
can suffer from drop out due to the nature of a small amount of starting product and certain
randomness coming from used methodology. This means that the measured zero level of expression
is more accurately described as a missing data. Unfortunatelly, the sparse matrice implementation
used by Seurat does not allow this change of context.
The second issue is the huge amount of dependencies that the Seurat brings. Due to the limited
scope in which Seurat functionality is used and given that the utilized functionality had to be
already rewritten due to the above reasons, it seems more convenient to just lift up remaining
Seurat functionality.
}
\section{Functions}{
\itemize{
\item \code{expr_read10x}: Read 10X data
\item \code{expr_read10xh5}: Read 10X data in the \code{.h5} format.
\item \code{expr_normalize}: Log-normalize data. Feature counts for each cell are divided by the total count
for that cell multiplied by a scale factor. This is then natural log transformed using log1p.
\item \code{expr_scale}: Scale and center genes/features
\item \code{expr_zero_to_na}: Transform a sparse matrix into dense matrix where zeros are respresented
as \code{NA}.
\item \code{expr_quality_filter}: Filter the expression matrix according to quality metrics
\item \code{expr_merge}: Merge multiple datasets
\item \code{expr_discretize}: Discretize expression matrix according to interval vector.
}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.