blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b06003ecf8eede73d8cdec51e5b1b19c887ec6fe | 2140b0f4a3556b5dc76a4e1749cbcb7ea1d0a61b | /scripts/plot.R | 1eeb89b466b3ca8f94a274c84661062ed1f36ace | [] | no_license | sashahafner/NH3MTC | aa53173461029f3c4446a69aa2bc1d1d50f48f60 | ae913b6036b9956205bda6fa3b93c0ed32b67e7a | refs/heads/master | 2023-04-08T10:20:44.339685 | 2021-04-08T11:40:24 | 2021-04-08T11:40:24 | 300,247,350 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,236 | r | plot.R |
ggplot(dl, aes(tc, log10(tcn), fill = tc)) +
geom_boxplot() +
facet_wrap(~ interaction(stor.type1, source), scale = 'free') +
labs(x = 'MTC type', y = 'MTC') +
theme(legend.position = 'none')
ggsave('../plots/log_box.png', height = 4, width = 6)
ggplot(dl, aes(tc, tcn, fill = tc)) +
geom_boxplot() +
facet_wrap(~ interaction(stor.type1, source), scale = 'free') +
labs(x = 'MTC type', y = 'MTC') +
theme(legend.position = 'none')
ggsave('../plots/box.png', height = 4, width = 6)
dd <- subset(d, !grepl('Ammonium', source))
ggplot(dd, aes(source, flux/24, fill = source)) +
geom_boxplot() +
scale_y_continuous(trans = 'log10') +
facet_wrap(~ stor.type1, scale = 'free') +
labs(x = 'Source', y = 'Flux (g/m2-h)') +
theme(legend.position = 'none')
ggsave('../plots/flux_box.png', height = 4, width = 6)
ggplot(dl, aes(sample = tcn, colour = tc)) +
geom_qq() +
stat_qq_line() +
facet_wrap(~ interaction(stor.type1, source), scale = 'free')
ggsave('../plots/qqnorm.png', height = 4, width = 6)
ggplot(dl, aes(sample = log10(tcn), colour = tc)) +
geom_qq() +
stat_qq_line() +
facet_wrap(~ interaction(stor.type1, source), scale = 'free')
ggsave('../plots/qqlognorm.png', height = 4, width = 6)
|
4bdf84d8a1d25fce4603a9402539e47dc5eb5093 | 7357817e8a37471f8605dbbe78915efc0eaa9299 | /First competitions/Santa's Workshop Tour 2019/showdata.R | dbfd48ac154e53be69e1cf782fedb8ff1f2b7f52 | [] | no_license | PasaOpasen/Competitions | 8f15ac3c1a9cc744eac266b9deea5fa25d2420c1 | f701d9bb492f63b934a5b490800df171ee052817 | refs/heads/master | 2020-12-27T06:52:50.987987 | 2020-09-04T16:19:27 | 2020-09-04T16:19:27 | 237,802,501 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,813 | r | showdata.R | bes=read_csv("res.csv")
res= bes$assigned_day
choise=-res
choise[res==choises[1]]=0
choise[res==choises[2]]=1
choise[res==choises[3]]=2
choise[res==choises[4]]=3
choise[res==choises[5]]=4
choise[res==choises[6]]=5
choise[res==choises[7]]=6
choise[res==choises[8]]=7
choise[res==choises[9]]=8
choise[res==choises[10]]=9
choise[choise<0]=-1
which(res==choises[6])
df=data.frame(fam=bes$family_id,ch=factor(choise,levels = c("0","1","2","3","4","5","6","7","8","9","no")),np=factor(peop)) %>% tbl_df()
counts=df %>% group_by(ch,np) %>% summarise(ct=n())
ggplot(counts,aes(x=ch,y=ct))+
facet_wrap(vars(np))+
geom_col()+
theme_light()
counts
qplot(x=factor(res))
ds=data.frame(p=peop,dc=factor(res)) %>% group_by(dc) %>% summarise(val=sum(p))
ggplot(ds,aes(x=dc,y=val))+
geom_col()+
labs(title=paste("acc =",accounting.penalty(res)," pre =",preference.cost(res)," sum =",score(res)))+
geom_hline(yintercept =c( 125,300),col="red",size=1.5)+
theme_bw()
ds=data.frame(p=peop,dc=factor(res),np=factor(peop)) %>% group_by(dc,np) %>% summarise(val=sum(p))
ggplot(ds,aes(x=dc,y=val))+
geom_col()+
facet_wrap(vars(np))+
labs(title=paste("acc =",accounting.penalty(res)," pre =",preference.cost(res)))+
# geom_hline(yintercept =c( 125,300),col="red",size=1.5)+
theme_bw()
N=ds$val[1:99]
N2=c(N,N[99])[2:100]
rs=( (N-125)/400*N^(0.5+0.02*abs(N-N2)))
########################################
resold= read_csv("resold.csv")$assigned_day
tt=cbind(i=1:5000,resold,res,peop,choises[,1:6]) %>% tbl_df()
pp=tt %>% filter(resold!=res)
###
alien=data.table::fread("alien3.txt",header = T,sep='\t')
best.res=alien$assigned_day
res=read_csv("res.csv")
res$assigned_day=best.res
#best.res=res$assigned_day
write_csv(res,"res.csv")
#####################
library(factoextra) #графика по главным компонентам
print(fviz_cluster(list(data = choises[, 1:5], cluster = res), ellipse.type = "norm"))
#######
ifelse(res==choises[1]&res==1,peop,0) %>% sum()
ifelse(choises[1]==1,peop,0) %>% factor()%>% summary()
bes=read_csv("res.csv")
res= bes$assigned_day
ds=data.frame(p=peop,dc=factor(res)) %>% group_by(dc) %>% summarise(val=sum(p))
ggplot(ds,aes(x=dc,y=val))+
geom_col()+
labs(title=paste("acc =",accounting.penalty(res)," pre =",preference.cost(res)))+
geom_hline(yintercept =c( 125,300),col="red",size=1.5)+
theme_bw()
res[choises[1]==1&peop==8]=1
res[(res==1&peop==2)]=choises[(res==1&peop==2),2]%>% unclass() %$%choice_1
day=97
chs=4
inds=ifelse(choises[chs]==day,1,0)
sum((res==1)*inds)
res[(res==1)*inds]=day
res[res==1&peop==4]=choises[res==1&peop==4,3] %>% unclass() %$%choice_2
res[res==1&peop<=3]=choises[res==1&peop<=3,4] %>% unclass() %$%choice_3
tt=cbind(i=1:5000,res,peop,choises[,1:6]) %>% tbl_df()
res[3249]=2
|
6b68bb05f5e92f79b4965befcc8c61cd30cd04d6 | a2a7872c86f4a5cd1f109c91f11bf9cd3f4464ca | /man/vboot.coxnet.Rd | d0d2ab356ff48be561bf771f6c9e469c0abefa5e | [] | no_license | guhjy/BootValidation | d614cee9842d190e787e17ae14edf4a95fb1f985 | 8c76591b3553921080d4b68c314f86329f9ae741 | refs/heads/master | 2020-09-11T10:40:28.223939 | 2018-07-24T06:27:40 | 2018-07-24T06:27:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 964 | rd | vboot.coxnet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parallel_vboot.R
\name{vboot.coxnet}
\alias{vboot.coxnet}
\title{Internal bootstraping validation cox glmnet model}
\usage{
\method{vboot}{coxnet}(fit, x, y, s, nfolds = 5, B = 200,
cv_replicates = 100, n_cores = max(1, parallel::detectCores() - 1))
}
\arguments{
\item{fit}{Object from glmnet fit}
\item{x}{A matrix of the predictors, each row is an observation vector.}
\item{y}{Should be a two-column matrix with columns named 'time' and 'status' as in 'glmnet'}
\item{s}{Value of the penalty parameter "lambda" selected from the original 'cv.glmnet'}
\item{nfolds}{Number of folds for cross validation as in 'cv.glmnet'}
\item{B}{Number of bootsrap samples}
\item{cv_replicates}{Number of replicates for the cross-validation step}
\item{n_cores}{number of cores to use in parallel. Default detectCores()-1}
}
\description{
Validate glmnet cox regression using bootstrap.
}
|
739d5583918829ccc3c63bd9281d1e9510024598 | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/road2stat/ShinyDSCR/server-core.R | 167ffe46e10756daf90cc4b1c8ed93d00dcabc11 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 528 | r | server-core.R | library('xlsx')
# A rewrite of write.xlsx2() since it has logical problems and fails the write
xn.write.xlsx = function (x, file, sheetName = "Sheet1",
col.names = TRUE, row.names = TRUE) {
wb = createWorkbook(type = 'xlsx')
sheet = createSheet(wb, sheetName)
addDataFrame(x, sheet, col.names = col.names, row.names = row.names,
startRow = 1, startColumn = 1, colStyle = NULL, colnamesStyle = NULL,
rownamesStyle = NULL)
saveWorkbook(wb, file)
invisible()
}
|
c262468a21f5e832b31d08ccc87eb1fd815ea680 | 850313938cfab881133a7d9caaacf790b56feca8 | /banco_candidaturas_resultados.R | 374ec76be4e604881d752bc4526585fb97e82ab3 | [] | no_license | marinamerlo/dissertacao | 2a6a7f13151186714b028d78bf503c5ebca8648b | 19b76a86c923f6912c7fe57f5f8f32fb6ee54a1e | refs/heads/master | 2020-12-30T10:50:38.147621 | 2018-07-25T14:44:36 | 2018-07-25T14:44:36 | 98,828,270 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,347 | r | banco_candidaturas_resultados.R | #configurando a pasta em que os arquivos serão salvos
setwd("C:\\Users\\d841255\\Desktop\\dados")
#abrindo os pacotes que vou usar usar. Eles já estavam instalados.
#install.packages("readr")
#install.packages("dplyr")
library(readr)
library(dplyr)
library(data.table)
########################################
###### Parte 1 - abrindo os dados ######
########################################
#baixando o arquivo com todos os resultados eleitorais de 2016
url_result <- "http://agencia.tse.jus.br/estatistica/sead/odsele/votacao_candidato_munzona/votacao_candidato_munzona_2016.zip"
download.file(url_result, "temp.zip", quiet = F)
#descompactando o arquivo e removendo o .zip da pasta
unzip("temp.zip")
file.remove("temp.zip")
#baixando o arquivo com os dados de candidaturas
url_cand <- "http://agencia.tse.jus.br/estatistica/sead/odsele/consulta_cand/consulta_cand_2016.zip"
download.file(url_cand, "temp.zip", quiet = F)
#descompactando o arquivo e removendo o .zip da pasta
unzip("temp.zip")
file.remove("temp.zip")
##selecionando os arquivos do Estado de São Paulo
#criando uma lista de todos os arquivos contidos na pasta
lista.arquivos <-list.files(file.path(getwd()))
print(lista.arquivos)
#criando uma lista para pegar somente os documentos de votação
lista.resultados <- grep(pattern="votacao_candidato_munzona_2016_", lista.arquivos, value=TRUE)
print(lista.resultados)
#pegando somente o arquivo de São Paulo
lista.resultados <- lista.resultados[c(25)]
print(lista.resultados)
#criando o dataframe vazio que receberá os dados
resultados <- data.frame()
#Loop para coletar os dados que queremos:
#vai abrir cada uma das listas, renomear as colunas de acordo com o indicado no arquivo LEIAME.
#incluir no dataframe vazio
for(arquivo in lista.resultados){
resultados <- fread(file.path(getwd(), arquivo), stringsAsFactors = F, encoding = "Latin-1", header = F)
names(resultados) <- c("DATA_GERACAO",
"HORA_GERACAO",
"ANO_ELEICAO",
"NUM_TURNO",
"DESCRICAO_ELEICAO",
"SIGLA_UF",
"SIGLA_UE",
"CODIGO_MUNICIPIO",
"NOME_MUNICIPIO",
"NUMERO_ZONA",
"CODIGO_CARGO",
"NUMERO_CAND",
"SEQUENCIAL_CANDIDATO",
"NOME_CANDIDATO",
"NOME_URNA_CANDIDATO",
"DESCRICAO_CARGO",
"COD_SIT_CAND_SUPERIOR",
"DESC_SIT_CAND_SUPERIOR",
"CODIGO_SIT_CANDIDATO",
"DESC_SIT_CANDIDATO",
"CODIGO_SIT_CAND_TOT",
"DESC_SIT_CAND_TOT",
"NUMERO_PARTIDO",
"SIGLA_PARTIDO",
"NOME_PARTIDO",
"SEQUENCIAL_LEGENDA",
"NOME_COLIGACAO",
"COMPOSICAO_LEGENDA",
"TOTAL_VOTOS",
"TRANSITO")
}
#checando se os quatro estados foram empilhados
table(resultados$SIGLA_UF)
##repetindo os passos para criar o arquivo das candidaturas##
#criando uma lista de todos os arquivos contidos na pasta
lista.arquivos <-list.files(file.path(getwd()))
print(lista.arquivos)
#criando uma lista para pegar somente os documentos de votação
lista.candidatos <- grep(pattern="consulta_cand_2016_", lista.arquivos, value=TRUE)
print(lista.candidatos)
#pegando somente o arquivo de São Paulo
lista.candidatos <- lista.candidatos[c(27)]
print(lista.candidatos)
#criando o dataframe vazio que receberá os dados
candidatos <- data.frame()
#Loop para coletar os dados que queremos:
#vai abrir cada uma das listas, renomear as colunas de acordo com o indicado no arquivo LEIAME.
#incluir no dataframe vazio
for(arquivo in lista.candidatos){
candidatos <- fread(file.path(getwd(), arquivo), stringsAsFactors = F, encoding = "Latin-1", header = F)
names(candidatos) <- c("DATA_GERACAO",
"HORA_GERACAO",
"ANO_ELEICAO",
"NUM_TURNO",
"DESCRICAO_ELEICAO",
"SIGLA_UF",
"SIGLA_UE",
"DESCRICAO_UE",
"CODIGO_CARGO",
"DESC_CARGO",
"NOME_CANDIDATO",
"SEQUENCIAL_CANDIDATO",
"NUMERO_CANDIDATO",
"CPF_CAND",
"NOME_URNA_CANDIDATO",
"COD_SITUACAO_CANDIDATURA",
"DES_SITUACAO_CANDIDATURA",
"NUMERO_PARTIDO",
"SIGLA_PARTIDO",
"NOME_PARTIDO",
"CODIGO_LEGENDA",
"SIGLA_LEGENDA",
"COMPOSICAO_LEGENDA",
"NOME_LEGENDA",
"CODIGO_OCUPACAO",
"DESCRICAO_OCUPACAO",
"DATA_NASCIMENTO",
"NUM_TITULO_ELEITORAL_CANDIDATO",
"IDADE_DATA_ELEICAO",
"CODIGO_SEXO",
"DESCRICAO_SEXO",
"COD_GRAU_INSTRUCAO",
"DESCRICAO_GRAU_INSTRUCAO",
"CODIGO_ESTADO_CIVIL",
"DESCRICAO_ESTADO_CIVIL",
"COD_COR_RACA",
"DESC_COR_RACA",
"CODIGO_NACIONALIDADE",
"DESCRICAO_NACIONALIDADE",
"SIGLA_UF_NASCIMENTO",
"CODIGO_MUNICIPIO_NASCIMENTO",
"NOME_MUNICIPIO_NASCIMENTO",
"DESPESA_MAX_CAMPANHA",
"COD_SIT_TOT_TURNO",
"DESC_SIT_TOT_TURNO",
"EMAIL")
}
#checando se os quatro estados foram empilhados
table(candidatos$SIGLA_UF)
############################################
###### Parte 2 - data frame resultados #####
############################################
names(resultados)
#selecionando as linhas que contem resultados para vereador, deixando o banco com as variáveis e renomeando-as
resultados <- resultados %>%
filter(DESCRICAO_CARGO == "VEREADOR") %>%
select(SIGLA_UF,
SIGLA_UE,
CODIGO_MUNICIPIO,
NUMERO_ZONA,
SIGLA_PARTIDO,
NUMERO_CAND,
SEQUENCIAL_CANDIDATO,
NOME_CANDIDATO,
NOME_URNA_CANDIDATO,
COMPOSICAO_LEGENDA,
TOTAL_VOTOS,
NOME_MUNICIPIO) %>%
rename(uf = SIGLA_UF,
ue = SIGLA_UE,
cod_mun = CODIGO_MUNICIPIO,
num_zona = NUMERO_ZONA,
sigla = SIGLA_PARTIDO,
num_cand =NUMERO_CAND,
seq = SEQUENCIAL_CANDIDATO,
nome = NOME_CANDIDATO,
nome_urna = NOME_URNA_CANDIDATO,
colig = COMPOSICAO_LEGENDA,
votos_total = TOTAL_VOTOS,
nome_mun = NOME_MUNICIPIO)
############################################
###### Parte 3 - data frame candidatos #####
############################################
names(candidatos)
#selecionando as linhas que contem candiadtos para vereador, deixando o banco com as variáveis e renomeando-as
candidatos <- candidatos %>%
filter(DESC_CARGO == "VEREADOR") %>%
select(SEQUENCIAL_CANDIDATO,
CPF_CAND,
DES_SITUACAO_CANDIDATURA,
NOME_LEGENDA,
NUM_TITULO_ELEITORAL_CANDIDATO,
IDADE_DATA_ELEICAO,
DESCRICAO_SEXO,
DESCRICAO_OCUPACAO,
DESCRICAO_GRAU_INSTRUCAO,
DESCRICAO_ESTADO_CIVIL,
DESC_COR_RACA,
DESPESA_MAX_CAMPANHA,
DESC_SIT_TOT_TURNO) %>%
rename(
seq = SEQUENCIAL_CANDIDATO,
cpf = CPF_CAND,
situ = DES_SITUACAO_CANDIDATURA,
colig_nome = NOME_LEGENDA,
titulo = NUM_TITULO_ELEITORAL_CANDIDATO,
idade = IDADE_DATA_ELEICAO,
genero = DESCRICAO_SEXO,
ocup = DESCRICAO_OCUPACAO,
instru = DESCRICAO_GRAU_INSTRUCAO,
estcivil = DESCRICAO_ESTADO_CIVIL,
cor = DESC_COR_RACA,
despmax = DESPESA_MAX_CAMPANHA,
result = DESC_SIT_TOT_TURNO)
##########################################################
###### Parte 4 - agregando e combinando por município#####
##########################################################
#deixando as variáveis de voto como numérica e a seq, nossa chave, como string
resultados$votos_total <- as.numeric(resultados$votos_total)
resultados$seq <- as.character(resultados$seq)
candidatos$seq <- as.character(candidatos$seq)
#criando um banco que tem o agregado de votos por município
resultado_mun <- resultados %>%
group_by(ue) %>%
summarise(votos_total_mun = sum(votos_total))
#adicionando o voto total por municipio no banco de resultados
resultados<- right_join(resultados, resultado_mun, by = "ue")
#criando um banco que tem o agregado de votos por candidato
resultado_cand <- resultados %>%
group_by(seq) %>%
summarise(votos_total_cand = sum(votos_total))
#adicionando o voto total por municipio no banco de resultados e criando uma variável da porcentagem do candidato
resultados <- resultados %>%
right_join(resultado_cand, by = "seq") %>%
mutate(prop_mun_cand = votos_total_cand / votos_total_mun)
##########################################################
###### Parte 6 - agregando e combinando por candidato#####
##########################################################
#juntando o banco de candidatos e o de resultados
#também deixa o banco apenas com as variáveis únicas
resultados_2 <- inner_join(candidatos, resultados, by = c("seq"))
#olhando quais são as variáveis que ficaram no banco pra checar que todas vieram
glimpse(resultados_2)
###############################################################
###### Parte 6 - selecionando apenas a cidade de São Paulo#####
###############################################################
#vendo qual o código da unidade eleitoral de São Paulo para fazer o filtro.
#como é a maior cidade, vamos ordenar o banco pelo maior número de votos totais no municipio
resultados_2 <- resultados_2 %>% arrange(desc(votos_total_mun))
head(resultados_2)
#o número da cidade de SP é 71072
dados_SP <- resultados_2 %>%
filter(ue == "71072")
#banco com resultados por candidatos únicos como linha. Seleciona todas as linhas exceto as de voto por zona.
dados_cand <- dados_SP %>%
select(-votos_total, -num_zona)
#deixa as linhas únicas por candidato.
dados_cand <- distinct(dados_cand)
dados <- dados_cand
#vendo se tem os números corretos de eleitos
table(dados_cand$result, dados_cand$genero)
#salvando os dois bancos:
#banco com as observações por candidatos
write.table(dados_cand, "result_cand_SP.csv", sep = ";", fileEncoding ="latin1", row.names = F)
#banco com as observações por votação por zona
write.table(dados_SP, "result_candzona_SP.csv", sep = ";", fileEncoding ="latin1", row.names = F)
|
bf105ae7d77b3a2f884e83fb38ce3df98aebdd69 | 919c4a7c851054cf60111d183e47881c3e0967e9 | /cachematrix.R | a0292f99d7f3af381e8e8486eec4bc92370bd15d | [] | no_license | syablokova/ProgrammingAssignment2 | d5dcedbad76f7c12d5f85203208f98589bc34793 | 7af799d4a1a7ba8e33122f9362069d8ea1aaa32a | refs/heads/master | 2021-07-15T18:35:57.746134 | 2017-10-21T19:23:37 | 2017-10-21T19:23:37 | 107,748,046 | 0 | 0 | null | 2017-10-21T03:23:52 | 2017-10-21T03:23:51 | null | UTF-8 | R | false | false | 1,542 | r | cachematrix.R | ## This script has functioins for creating a matrix object, calculating inverse matrix and storing
## results in the parent enviroment. When the cacheSolve function is called
## it first checks out the cache before running expensive calculations,
## making the process a lot more efficient.
## example:
## myData = matrix(c(2, 4, 3, 1), nrow=2, ncol=2)
## myCache <- makeCacheMatrix()
## myCache$set(myData)
## cacheSolve(myCache)
## makeCacheMatrix | creates an empty matrix object in the current enviroment
## that exposes set, get, and inverse functions.
## On initialization it clears current and parent x and s variables
makeCacheMatrix <- function(x = matrix()) {
message("creating makeCacheMatrix")
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
get <- function() x
setinverse <- function(solve) s <<- solve
getinverse <- function() s
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve | stores the value of solve(x) function in the cached matrix created
## in the makeCacheMatrix
## if the cache vale exists it returned immidiatly, otherwise solve is run
## on x and the value stored in cache for future use
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getinverse()
if(!is.null(s)) {
message("getting cached data from makeCacheMatrix")
return(s)
}
data <- x$get()
s <- solve(data, ...)
message("setting cached data in makeCacheMatrix")
x$setinverse(s)
s
}
|
f26129b5f80108f60ced9bc6c1027f06293c85f9 | f7746a5ccce8d8413f63ee03163501be63b22904 | /tp2-correction.R | 9f54c25ad40fde21f4284f9b7139387ee482b710 | [] | no_license | katossky/panorama-bigdata | 6392c28fb3197f959a5be61ec7feb1e8f80783d8 | 2dbd9f03b7ecc9dbde1359934eff0090459812e4 | refs/heads/master | 2020-07-04T18:15:33.281748 | 2020-03-26T09:20:51 | 2020-03-26T09:20:51 | 202,369,631 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 7,546 | r | tp2-correction.R | # Ex 3.1
library(dplyr)
library(RPostgreSQL)
pw <- "YojCqLr3Cnlw6onuzHU3" # Do not change the password !
drv <- dbDriver("PostgreSQL") # loads the PostgreSQL driver.
# (Needed to query the data)
# This is the proper connection.
# The same object will be used each time we want to connect to the database.
con <- dbConnect(drv, dbname = "postgres",
host = "database-1.c5bqsgv9tnea.eu-west-3.rds.amazonaws.com", port = 5432,
user = "postgres", password = pw)
rm(pw) # We don't need the password anymore.
dbExistsTable(con, "flight")
import <- function(n){
dbGetQuery(con, paste("SELECT * FROM flight LIMIT", n))
}
count <- function(N){ # <----------- DO NOT MIX N AND n !
COUNTS <- data.frame(
year = integer(),
month = integer(),
count = integer()
)
n <- 0
for (i in 1:N) { # <---------- CHANGED!!!!!!!
flight <- FLIGHTS[i,]
selection <- COUNTS$year == flight$year & COUNTS$month == flight$month
if(nrow(COUNTS[selection,]) > 0) {
COUNTS[selection, "count"] <- COUNTS[selection, "count"]+1
}else{
n <- n+1
COUNTS[n, "year"] <- flight$year
COUNTS[n, "month"] <- flight$month
COUNTS[n, "count"] <- 1
}
}
return(COUNTS) # <---------- CHANGED!!!!!!!
}
rolling_avg <- function(N){
PASSENGERS_adjusted <- tibble()
# Passengers per day
PASSENGERS <- FLIGHTS[1:N,] %>% # <------------ CHANGED!
group_by (year, month, day) %>%
summarise (passengers = sum(passengers)) %>%
arrange(year, month, day)
# 5-days rolling average. Begins at day 5.
for (i in 5:nrow(PASSENGERS)) {
n <- 0
for(j in (i-4):i) n <- n + PASSENGERS$passengers[j]
new_row <- tibble(
year = PASSENGERS$year[i],
month = PASSENGERS$month[i],
day = PASSENGERS$day[i],
passengers = PASSENGERS$passengers[i],
passengers_w5 = n/5
)
PASSENGERS_adjusted <- bind_rows(PASSENGERS_adjusted, new_row)
}
# 10-days rolling average. Begins at day 10.
for (i in 10:nrow(PASSENGERS)) {
n <- 0
for(j in (i-9):i) n <- n + PASSENGERS$passengers[j]
PASSENGERS_adjusted[i-5, "passengers_w10"] <- n/10
}
return(PASSENGERS_adjusted)
}
rolling_reg <- function(N){
variables <- c("seats", "passengers", "freight", "mail", "distance")
betas <- numeric()
DATA <- FLIGHTS[1:N,] %>% arrange(year, month, day) # <------ CHANGED
for (i in 1000:nrow(DATA)) { # <------ CHANGED
X <- data.matrix(DATA[(i-999):i, variables]) # <------ CHANGED
X <- cbind(intercept=1, X)
Y <- matrix(DATA$payload[(i-999):i], nrow = 1000, ncol = 1) # <------ CHANGED
betas <- cbind(betas, solve(t(X)%*% X) %*% t(X) %*% Y)
}
rownames(betas) <- c("intercept", "seats", "passengers", "freight", "mail", "distance")
betas[,1:10]
}
FLIGHTS <- import(2000)
count(2000)
rolling_avg(2000)
rolling_reg(2000)
library(microbenchmark)
timing_import <- microbenchmark(
times = 10,
FLIGHTS <- import(n=250),
FLIGHTS <- import(n=500),
FLIGHTS <- import(n=1000),
FLIGHTS <- import(n=2500),
FLIGHTS <- import(n=5000),
FLIGHTS <- import(n=10000),
FLIGHTS <- import(n=15000)
)
library(ggplot2)
autoplot(timing_import) # install.package("ggplot2")
table_timing_import <- print(timing_import)
plot(
x = c(250,500,1000,2500,5000,10000,15000),
y = table_timing_import$median,
ylim=c(0,8),
xlab = "Nombre de lignes",
ylab = "Temps (s)",
main = "Complexité empirique en temps"
)
timing_count <- microbenchmark(
times = 10,
count(N=250),
count(N=500),
count(N=1000),
count(N=2500),
count(N=5000),
count(N=10000),
count(N=15000)
)
autoplot(timing_count) # install.package("ggplot2")
table_timing_count <- print(timing_count)
plot(
x = c(250,500,1000,2500,5000,10000,15000),
y = table_timing_count$median,
#ylim=c(0,8),
xlab = "Nombre de lignes",
ylab = "Temps (ms)",
main = "Fonction count",
sub = "Complexité empirique en temps"
)
library(profmem)
mem_import <- list()
mem_import[[1]] <- profmem(FLIGHTS <- import(n=250))
mem_import[[2]] <- profmem(FLIGHTS <- import(n=500))
mem_import[[3]] <- profmem(FLIGHTS <- import(n=1000))
mem_import[[4]] <- profmem(FLIGHTS <- import(n=2500))
mem_import[[5]] <- profmem(FLIGHTS <- import(n=5000))
mem_import[[6]] <- profmem(FLIGHTS <- import(n=10000))
mem_import[[7]] <- profmem(FLIGHTS <- import(n=15000))
plot(
x = c(250,500,1000,2500,5000,10000,15000),
y = unlist(lapply(mem_import, function(e) sum(e$bytes)))
)
# 4.1
import2 <- function(n){
dbGetQuery(con, paste(
"SELECT passengers, year, month, day,",
"payload, distance, freight, seats",
"FROM flight LIMIT", n
))
}
library(microbenchmark)
timing_import2 <- microbenchmark(
times = 10,
FLIGHTS <- import2(n=250),
FLIGHTS <- import2(n=500),
FLIGHTS <- import2(n=1000),
FLIGHTS <- import2(n=2500),
FLIGHTS <- import2(n=5000),
FLIGHTS <- import2(n=10000),
FLIGHTS <- import2(n=15000)
)
autoplot(timing_import2) # install.package("ggplot2")
table_timing_import2 <- print(timing_import2)
plot(
type="l",
x = c(250,500,1000,2500,5000,10000,15000),
y = table_timing_import$median,
ylim=c(0,8)
)
lines(
x = c(250,500,1000,2500,5000,10000,15000),
y = table_timing_import2$median/1000,
col="red"
)
# 4.2
# On peut exécuter toute la section COUNTS
# dans la base de données.
# SELECT COUNT(*), year, month FROM flight GROUP BY year, month
dbGetQuery(con, "SELECT COUNT(*), year, month FROM flight GROUP BY year, month")
# 4.3
table(FLIGHTS$year, FLIGHTS$month)
with(FLIGHTS, table(year, month))
FLIGHTS %>% # version dplyr
group_by(year, month) %>%
summarize(n=n())
library(dbplyr)
tbl(con, "flight") %>%
group_by(year, month) %>%
summarize(n=n()) %>%
collect()
# 5.1
n <- 100
bench::mark(
vect = {v <- cumprod(1:n)},
non_vect = {v<-1; for(i in 2:n) v[i] <- v[i-1]*i ; v}
)
#
my_cumsum <- sum(PASSENGERS$passengers[1:10])
for (i in 11:nrow(PASSENGERS)) {
my_cumsum <- my_cumsum + PASSENGERS$passengers[i] - PASSENGERS$passengers[i-10]
PASSENGERS_adjusted[i-5, "passengers_w10"] <- my_cumsum/10
}
#
bench::mark(
vect ={v <- rep(1,n)},
preallocate ={v <- numeric(n) ; for(i in 1:n) v[i] <- 1 ; v},
no_preallocate ={v <- numeric() ; for(i in 1:n) v[i] <- 1 ; v},
concatenate ={v <- numeric() ; for(i in 1:n) v <- c(1,v); v}
)
library(foreach) # Parallel for loops
library(parallel) # Interface between R and multiple cores
library(doParallel) # Interface between foreach and parallel
detectCores() # How many cores are available ?
registerDoParallel(cores=2) # Number of cores you want to work with
library(tictoc)
tic()
foreach(i=1:10) %dopar% Sys.sleep(1)
toc()
rolling_reg2 <- function(N){
variables <- c("seats", "passengers", "freight", "mail", "distance")
betas <- numeric()
DATA <- FLIGHTS[1:N,] %>% arrange(year, month, day) # <------ CHANGED
beta = foreach(i=1000:nrow(DATA), .combine = cbind) %dopar% { # <------ CHANGED
X <- data.matrix(DATA[(i-999):i, variables])
X <- cbind(intercept=1, X)
Y <- matrix(DATA$payload[(i-999):i], nrow = 1000, ncol = 1) # <------ CHANGED
solve(t(X)%*% X) %*% t(X) %*% Y
}
return(beta)
}
microbenchmark(
rolling_reg(N=1010),
rolling_reg2(N=1010)
)
rownames(betas) <- c("intercept", "seats", "passengers", "freight", "mail", "distance")
betas[,1:10] |
a6c7192a11699a576d90b1e23163fd2cf3818387 | 2386da2a8a68f29bfb78d806ad6252d6e9bf3a1a | /R/.svn/text-base/perform.all.inference.R.svn-base | 8c016853c8fe4c81cdefb6f77f62e6f09efedf41 | [] | no_license | vitalinakomashko/mGenomics | 4b4c601198ed1a10c07ee5f1b0422e35b84950cf | 140b79c94361e8a814d35e6456c8c178c0e78c83 | refs/heads/master | 2021-05-27T13:50:09.713370 | 2013-03-20T00:24:07 | 2013-03-20T00:24:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,502 | perform.all.inference.R.svn-base | perform.all.inference <- function(dat, adj.var, meta.data, i=1, sva=FALSE) {
# Move through each unique covariate
results <- list();
for( cov.num in 1:ncol(meta.data)) {
covariate <- meta.data[[cov.num]]
# cat("variable",cov.num," class",class(covariate),"\n")
which.nas <- is.na(covariate)
if(class(covariate) == "factor" | class(covariate) == "character") {
# Its a factor, remove unused levels
if(class(covariate) == "factor") {
covariate <- as.character(unlist(covariate))
}
if(class(covariate) == "character") {
covariate <- factor(covariate)
}
# Handle missing values. This adjusts the covariate and data matrix to remove samples with missing data
if(sum(is.na(covariate)) > 0) {
valid.samples <- which(!is.na(covariate))
covariate <- factor(covariate[valid.samples])
dat.c <- dat[,valid.samples]
}else{
dat.c <- dat
}
# Move to the next covariate if there is only one level
if(length(unique(covariate)) == 1){
results[[cov.num]] <- NA
next
}
# If one unique level for each sample then stop
if(ncol(dat.c) == nlevels(covariate)){
results[[cov.num]] <- NA
next;
}
X <- model.matrix(~ covariate)
res <- dat.c - t(X %*% solve(t(X) %*% X) %*% t(X) %*% t(dat.c))
lf <- lm(dat.c[i,] ~ X)
results[[cov.num]] <- list(
var.num = cov.num,
lf.fit = lf,
no.samples = ncol(dat.c),
which.nas = which.nas,
n.levels = unique(covariate))
}
}
return(results)
}
| |
a20fe08f426271cb347403c26d710403f70879c3 | 81b443dbe06b02c7ce9c36878a34d79c5863db7b | /chapter-1-chapter-2-augmented.R | 6fef4fa4cf36d0f963d94aeff94b68d4bcf69614 | [] | no_license | aejb22122/Analyse-de-donnees-these-paris-saclay | 7367be49a56938a9fa5a2f3f3113744aa3a45d01 | 53e278ff71e5de0050cce269606162e65ac760a2 | refs/heads/master | 2020-12-02T16:28:50.690770 | 2020-01-14T22:25:23 | 2020-01-14T22:25:23 | 96,557,982 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,039 | r | chapter-1-chapter-2-augmented.R | # ---- Thèse de doctorat Annick Eudes JEAN-BAPTISTE ----
# Codes de réplication des calculs du Chapitre # 1 - augmentés d'autres calculs
# ---- Préliminaires ----
# Cleaning the session form other the objects in the environment
remove(list=ls())
ls()
# setting the working directory
setwd("~/OneDrive/Documents/2_Data_analysis_research/GitHub/Analyse-de-donnees-these-paris-saclay/datasets")
# Adding the packages used in this analysis
install.packages("xlsx") # Lire les fichers excel
install.packages("ggplot2") # Installer le packet ggplot2
install.packages("calibrate") # Pour ajouter les noms des points dans un scatter plot
install.packages("reshape2") # Load le packet qui permet de faire le reshaping et le melt:
install.packages("ggpubr") # ggpubr: 'ggplot2' Based Publication Ready Plots -
# stat() for the Pearson correlation in the plot
# Loading the required packages :
library("xlsx")
library("ggplot2")
library(calibrate)
library(reshape2)
library(ggpubr)
# Removinng the scientific notations
options(scipen=999)
# En francais :
# Sys.setenv(LANG = "fr")
# Ou en anglais Sys.setenv(LANG = "en")
# ---- Graphique # 3 ----
# Importer le ficher excel contenu dans le repertoire :"dataset"
df <- read_excel("Taux_de_croissance_PD PVD_PMA.xlsx")
str(df)
View(df)
# Figure 3. Taux de croissance du PIB per capita par
# catégories de revenus de 1971 à 2015 en USD constant en 2010
ggplot(df, aes(df$Date)) + geom_line(aes(y = df$`Pays Developpes`,
color = "Pays développés")) +
geom_line(aes(y = df$PVD, color = "PVD")) +
geom_line(aes(y = df$`PVS sans les PMA`, color = "PVD sans les PMA")) +
geom_line(aes(y = df$`PMA : Afrique et Haiti`, color = "PMA Afrique et Haiti")) +
geom_line(aes(y = df$`PMA : Asie`, color = "PMA d'Asie")) +
xlab("Années") +
ylab("Croissance du PIB par habitant (% annuel)")
# ---- Graphique # 4 ----
# Figure 4. Contribution du secteur agricole à la valeur ajoutée (en USD constant 2010)
library(readxl)
df <- read_excel("Contribution_VA_PIB.xlsx")
View(df)
# 1er chose à faire, elever la colonne "date" et "reshape" les variables pour les
# mettre dans le format adéquat :
df.pma = df
df.pma$Date <- NULL
# Load le packet qui permet de faire le reshaping et le melt:
library(reshape2)
# Shaping the data
melted_data = melt(df.pma)
View(melted_data)
# Change the names to make it less confusing if necessary
colnames(melted_data) = c("Pays", "va.agr.PIB")
# ploting with ggplot, x = variable and y = value (one can add cathegories)
ggplot(melted_data) + geom_boxplot(aes(x = Pays, y = va.agr.PIB))
# Ajouter des valeurs sur les axes x et y
ggplot(melted_data) +
geom_boxplot(aes(x = Pays, y = va.agr.PIB)) +
xlab("Groupes économiques") +
ylab("Agriculture, valeur ajoutée (% du PIB)")
# ---- Graphique # 8 ----
# Importer le ficher excel contenu dans le repertoire :"dataset"
library(readxl)
df <- read_excel("Commodity_prices.xlsx",
sheet = "Data")
View(df)
# Figure 8. Évolution du prix de quelques produits de base à forte demande dans les PMA
ggplot(df, aes(df$Annees)) +
geom_line(aes(y = df$`Moyenne en pétrole brut ($ / bbl)`, color = "Moyenne en pétrole brut (USD/baril)")) +
geom_line(aes(y = df$`Maïs - ($ / mt)`, color = "Maïs (USD/tonnes métriques)")) +
geom_line(aes(y = df$`Sorgho - ($ / mt)`, color = "Sorgo (USD/tonnes métriques)")) +
geom_line(aes(y = df$`Riz Thai 5% ($ / mt)`, color = "Riz Thai 5% (USD/tonnes métriques)")) +
geom_line(aes(y = df$`Blé US HRW ($ / mt)`, color = "Blé US (USD/tonnes métriques)")) +
scale_fill_discrete(name="Experimental\nCondition") +
xlab("Années") +
ylab("Prix en USD")
# ---- Graphique # 9 ----
# Figure 9. Solde du commerce de marchandises dans les PMA (1971 à 2016)
#### Figure 9 Situation du commerce externe des PMA
library(readxl)
df <- read_excel("balance_commerciale3.xlsx",
col_types = c("numeric",
"numeric",
"numeric",
"numeric"))
str(df)
ggplot(df, aes(df$Annee)) +
geom_line(aes(y = df$Afrique_et_Haiti, color = "PMA Afrique + Haiti")) +
geom_line(aes(y = df$PMA_Asie, color = "PMA d'Asie")) +
geom_line(aes(y = df$PMA_iles, color = "PMA îles")) +
ylab(label="En millions de USD") +
xlab("Années")
# ---- Graphique # 11 ----
# Figure 11. Perception de la corruption dans les économies par niveau de revenue
library(readxl)
df <- read_excel("CorruptionIndex_GDP_per_capita.xlsx",
col_types = c("text", "text", "numeric",
"numeric"))
View(df)
str(df)
# Modèle dans le graph avec intervalle de confiances
# on ajoute un model qui sera dans le graph après
model = lm(df$CPI_2014 ~ df$`GDP per capita (constant 2005 US$)_2014` + df$Categorie)
# abline ajoute le model au graphique
abline(model, lwd = 2)
df.m <- df
# Basic scater plot avec GGPLOT2, on prepare le graph, ensuite on ajoute le layer
# le layer c'est le ...+ geom_point()
p1 <- ggplot(df.m, aes(x = df.m$`GDP per capita (constant 2005 US$)_2014`, y = df.m$CPI_2014, label=df.m$Pays)) +
geom_point(color="blue") +
geom_smooth(method = "lm", se = TRUE) +
geom_text(check_overlap = TRUE) +
geom_point(aes(color=factor(df.m$Categorie))) +
#scale_fill_discrete(name="Niveau de revenu") + # If you wish to modify the legend.
xlab("PIB per Capita 2014 (Prix en USD constant 2005)") +
ylab("Indice de perception de la corruption 2014") +
scale_color_discrete(name = "Niveau de revenu") +
stat_cor(method = "pearson", label.x = 7500, label.y = 75) # Add correlation coefficients (label.x and lable.y are the positions in the grid of the plot)
#theme(legend.position="none") # If you wish to remove the legend
#aes(fill = df.m$Categorie)
p1
# ---- Graphique # 12 ----
# Figure 12. Investissements Directs Etrangers rentrants
# dans quelques zones économiques (1971 à 2016)
library(readxl)
df <- read_excel("IDE_1971_2016.xlsx",
col_types = c("numeric", "numeric", "numeric",
"numeric", "numeric"))
View(df)
df$Date <- NULL
df.m <- df
melted_data <- melt(df.m)
View(melted_data)
# Change the names to make it less confusing if necessary
colnames(melted_data) = c("Pays", "IDE")
ggplot(melted_data) + geom_boxplot(aes(x = Pays, y = IDE)) +
xlab("Groupe de pays") +
ylab("IDE (en milliards de USD courants")
# ---- Graphique # 13 ----
# Figure 13. Incidences de la violence sur les coûts des entreprises
# par niveaux de développement économique (2015)
# ---- Graphique # 14 ----
# Figure 14. Crédit intérieur au secteur privé par niveaux de revenus en 2015
library(readxl)
df <- read_excel("Credit_PIB.xlsx",
col_types = c("text", "numeric", "numeric",
"numeric", "text"))
str(df)
ggplot(df, aes(x = df$`PIB par habitant, ($ PPA internationaux courants)_2015` , y = df$`Crédit intérieur fourni au secteur privé (% du PIB)_2015` )) +
geom_point(aes(color = factor(df$Categories))) +
#geom_smooth(method = "lm", se = TRUE) +
scale_color_discrete(name = "Groupe de pays") +
stat_cor(method = "pearson", label.x = 150000, label.y = 225) +
xlab("PIB par habitant, (USD internationaux courants en PPA)") +
ylab("Crédit intérieur fourni au secteur privé (en pourcentage du PIB)")
# ---- Graphique # 17 ----
# Figure 17. Ressources internes par groupes de pays
# ---- Graphique # 18 ----
# Figure 18. Indicateurs de gouvernance des économies en 2015, par niveaux de revenus
library(readxl)
df <- read_excel("WGI_new.xlsx", col_types = c("text",
"text", "text", "numeric"))
str(df)
colnames(df) = c("Pays", "Niveau de revenu", "WGI", "Valeur")
View(df) # To verify that all is ok
ggplot(df) +
geom_boxplot(aes(x = df$WGI, df$Valeur)) +
facet_grid(.~df$`Niveau de revenu`) +
xlab("Indicateurs de gouvernance") +
ylab("Valeur") +
theme(axis.text.x = element_text(angle = 90, hjust = 1, nrow(4)))
# ---- Graphique # 19 ----
# Figure 19. Dépenses publiques en fonction des niveaux de revenus en 2011
# Figure 21 Dépenses publiques en fonction des niveaux de revenus
library(readxl)
df <- read_excel("Dep_en % PIB_PIB_per_capita2011.xlsx",
col_types = c("text", "text", "numeric",
"numeric", "numeric", "numeric"))
View(df)
str(df)
ggplot(df, aes(x = df$log_PIB_per_capita, y = df$`log_dep_pub_en%PIB`)) +
#geom_point(aes(color = df$`Niveau de revenus`)) +
scale_color_discrete(name = "Groupe de pays") +
geom_smooth(method = "lm", se = T) +
stat_cor(method = "pearson", label.x = 3.6, label.y = 1.7) +
xlab("PIB per capita (USD constant 2005) (log)") +
ylab("Dépenses publiques en pourcentage du PIB (log)") +
geom_jitter(aes(color = df$`Niveau de revenus`), position=position_jitter(width=.1, height=0))
# ---- Graphique # 26 ----
# Figure 26. L’aide publique au développement et l’envoie de fonds des migrants dans les Pays en Voie
# de Développement et dans les Pays les Moins Avancés (en USD courants, 2014)
library(readxl)
df <- read_excel("APD_Transferts_migrants.xlsx",
col_types = c("text", "text", "numeric",
"numeric"))
str(df)
df.pma = df
melted_data = melt(df.pma)
View(melted_data)
colnames(melted_data) = c("Pays", "Niveau de revenu", "Sources", "Montant")
View(melted_data)
ggplot(melted_data) +
geom_boxplot(aes(x = melted_data$`Niveau de revenu`, y = melted_data$Montant)) +
facet_grid(.~melted_data$Sources) +
xlab("Niveaux de revenus") +
ylab("En millions de USD à prix courants et à PPA")
|
66afa4c01ad8203a8b69ba44c9b4b563fe81e924 | 313732a7fbbf99c82c305f6aaa59e9002494fd51 | /Proses Investasi Investor/Analisis_3.r | b575bad6432b03303851207d62ab54f4835ef0c1 | [] | no_license | mzfuadi97/RstudioQ | 9cae6d46e2d7d370c4f4c59e7351a02b9c735d36 | 06d48dfd8ece935b93ef06dbf94a3bd9f380c5a8 | refs/heads/master | 2023-08-24T05:19:26.672127 | 2021-09-22T16:17:39 | 2021-09-22T16:17:39 | 408,685,241 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,286 | r | Analisis_3.r | library(dplyr)
library(lubridate)
library(ggplot2)
# Trend Investor Register
df_investor_register <- df_event %>%
filter(nama_event == 'investor_register') %>%
mutate(tanggal = floor_date(created_at, 'week')) %>%
group_by(tanggal) %>%
summarise(investor = n_distinct(investor_id))
ggplot(df_investor_register) +
geom_line(aes(x = tanggal, y = investor)) +
theme_bw() +
labs(title = "Investor register sempat naik di awal 2020 namun sudah turun lagi",
x = "Tanggal",
y = "Investor Register")
# Trend Investor Investasi Pertama
df_investor_pertama_invest <- df_event %>%
filter(nama_event == 'investor_pay_loan') %>%
group_by(investor_id) %>%
summarise(pertama_invest = min(created_at)) %>%
mutate(tanggal = floor_date(pertama_invest, 'week')) %>%
group_by(tanggal) %>%
summarise(investor = n_distinct(investor_id))
ggplot(df_investor_pertama_invest) +
geom_line(aes(x = tanggal, y = investor)) +
theme_bw() +
labs(title = "Ada tren kenaikan jumlah investor invest, namun turun drastis mulai Maret 2020",
x = "Tanggal",
y = "Investor Pertama Invest")
# Cohort Pertama Invest berdasarkan Bulan Register
df_register_per_investor <- df_event %>%
filter(nama_event == 'investor_register') %>%
rename(tanggal_register = created_at) %>%
mutate(bulan_register = floor_date(tanggal_register, 'month')) %>%
select(investor_id, tanggal_register, bulan_register)
df_pertama_invest_per_investor <- df_event %>%
filter(nama_event == 'investor_pay_loan') %>%
group_by(investor_id) %>%
summarise(pertama_invest = min(created_at))
df_register_per_investor %>%
left_join(df_pertama_invest_per_investor, by = 'investor_id') %>%
mutate(lama_invest = as.numeric(difftime(pertama_invest, tanggal_register, units = "day")) %/% 30) %>%
group_by(bulan_register, lama_invest) %>%
summarise(investor_per_bulan = n_distinct(investor_id)) %>%
group_by(bulan_register) %>%
mutate(register = sum(investor_per_bulan)) %>%
filter(!is.na(lama_invest)) %>%
mutate(invest = sum(investor_per_bulan)) %>%
mutate(persen_invest = scales::percent(invest/register)) %>%
mutate(breakdown_persen_invest = scales::percent(investor_per_bulan/invest)) %>%
select(-investor_per_bulan) %>%
spread(lama_invest, breakdown_persen_invest)
# Cohort Retention Invest
df_investasi_per_investor <- df_event %>%
filter(nama_event == 'investor_pay_loan') %>%
rename(tanggal_invest = created_at) %>%
select(investor_id, tanggal_invest)
df_pertama_invest_per_investor %>%
mutate(bulan_pertama_invest = floor_date(pertama_invest, 'month')) %>%
inner_join(df_investasi_per_investor, by = 'investor_id') %>%
mutate(jarak_invest = as.numeric(difftime(tanggal_invest, pertama_invest, units = "day")) %/% 30) %>%
group_by(bulan_pertama_invest, jarak_invest) %>%
summarise(investor_per_bulan = n_distinct(investor_id)) %>%
group_by(bulan_pertama_invest) %>%
mutate(investor = max(investor_per_bulan)) %>%
mutate(breakdown_persen_invest = scales::percent(investor_per_bulan/investor)) %>%
select(-investor_per_bulan) %>%
spread(jarak_invest, breakdown_persen_invest) %>%
select(-`0`) |
f909681167ad4e1d9ec4ae2c22f231ba60215a08 | 2c38fc71287efd16e70eb69cf44127a5f5604a81 | /tests/testthat/test-utils_functional.R | cfe8c692009f3250387175ca7639d31525beaf07 | [
"MIT",
"Apache-2.0"
] | permissive | ropensci/targets | 4ceef4b2a3cf7305972c171227852338dd4f7a09 | a906886874bc891cfb71700397eb9c29a2e1859c | refs/heads/main | 2023-09-04T02:27:37.366455 | 2023-09-01T15:18:21 | 2023-09-01T15:18:21 | 200,093,430 | 612 | 57 | NOASSERTION | 2023-08-28T16:24:07 | 2019-08-01T17:33:25 | R | UTF-8 | R | false | false | 592 | r | test-utils_functional.R | tar_test("map_chr()", {
expect_equal(unname(map_chr(letters, identity)), letters)
})
tar_test("map_dbl()", {
x <- as.numeric(seq_len(4))
expect_equal(map_dbl(x, identity), x)
})
tar_test("map_int()", {
expect_equal(map_int(seq_len(4), identity), seq_len(4))
})
tar_test("map_lgl()", {
expect_equal(map_lgl(c(TRUE, FALSE), identity), c(TRUE, FALSE))
})
tar_test("map_rows()", {
x <- data_frame(x = seq_len(3), y = rep(1, 3), z = rep(2, 3))
expect_equal(map_rows(x, ~sum(.x)), seq_len(3) + 3)
})
tar_test("fltr()", {
expect_equal(fltr(seq_len(10), ~.x < 5), seq_len(4))
})
|
64052acd46759bd70cf09136d00b121e3d753de8 | 0749efd772a2daf6215d4377c42803a73bf860e3 | /R/EdgingtonMethod.r | af4f45cda4640f1bddba907da333bc4c5b5ddc21 | [] | no_license | cran/combinationpvalues | 1c17b8f03e767c274ef1642a91f20ffe8dc1df00 | 4b3daaa44a47aa1a7a9a1f6e8107535bcffe32f5 | refs/heads/master | 2023-07-18T04:31:47.436486 | 2021-09-03T18:00:02 | 2021-09-03T18:00:02 | 368,922,105 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 642 | r | EdgingtonMethod.r | #' @title Edgington Method
#'
#' @description #' Combination p-value method that uses Edgington statistic
#' Summation i=1 to n pi where p equals p-value
#'
#' @param x #' SumOfPs
#'
#' @return Combined P-value
#' @examples
#' Output <- SumOfPs(0.1,0.3,.7)
#' Final <- EdMethod(Output)
#' @export
#' @importFrom dplyr "%>%"
#'
EdMethod = function(x) {
k <- 1
Len<- length(x)
temp <-vector("list",Len)
for (i in x) {
temp[[k]]<- i
k <- k + 1
}
temp1 <- Reduce("+",temp)
output <- temp1
return(output)
}
|
31aabec7385b837f4c3f2fce0e1fbaa4cfe7e5a5 | 7917fc0a7108a994bf39359385fb5728d189c182 | /cran/paws.compute/man/ec2_allocate_address.Rd | 0cdbf06bcbb859180200a50c6968c763b248fe45 | [
"Apache-2.0"
] | permissive | TWarczak/paws | b59300a5c41e374542a80aba223f84e1e2538bec | e70532e3e245286452e97e3286b5decce5c4eb90 | refs/heads/main | 2023-07-06T21:51:31.572720 | 2021-08-06T02:08:53 | 2021-08-06T02:08:53 | 396,131,582 | 1 | 0 | NOASSERTION | 2021-08-14T21:11:04 | 2021-08-14T21:11:04 | null | UTF-8 | R | false | true | 5,743 | rd | ec2_allocate_address.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_allocate_address}
\alias{ec2_allocate_address}
\title{Allocates an Elastic IP address to your AWS account}
\usage{
ec2_allocate_address(Domain, Address, PublicIpv4Pool,
NetworkBorderGroup, CustomerOwnedIpv4Pool, DryRun, TagSpecifications)
}
\arguments{
\item{Domain}{Indicates whether the Elastic IP address is for use with instances in a
VPC or instances in EC2-Classic.
Default: If the Region supports EC2-Classic, the default is \code{standard}.
Otherwise, the default is \code{vpc}.}
\item{Address}{[EC2-VPC] The Elastic IP address to recover or an IPv4 address from an
address pool.}
\item{PublicIpv4Pool}{The ID of an address pool that you own. Use this parameter to let Amazon
EC2 select an address from the address pool. To specify a specific
address from the address pool, use the \code{Address} parameter instead.}
\item{NetworkBorderGroup}{A unique set of Availability Zones, Local Zones, or Wavelength Zones
from which AWS advertises IP addresses. Use this parameter to limit the
IP address to this location. IP addresses cannot move between network
border groups.
Use \code{\link[=ec2_describe_availability_zones]{describe_availability_zones}} to
view the network border groups.
You cannot use a network border group with EC2 Classic. If you attempt
this operation on EC2 classic, you will receive an
\code{InvalidParameterCombination} error. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html}{Error Codes}.}
\item{CustomerOwnedIpv4Pool}{The ID of a customer-owned address pool. Use this parameter to let
Amazon EC2 select an address from the address pool. Alternatively,
specify a specific address from the address pool.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{TagSpecifications}{The tags to assign to the Elastic IP address.}
}
\value{
A list with the following syntax:\preformatted{list(
PublicIp = "string",
AllocationId = "string",
PublicIpv4Pool = "string",
NetworkBorderGroup = "string",
Domain = "vpc"|"standard",
CustomerOwnedIp = "string",
CustomerOwnedIpv4Pool = "string",
CarrierIp = "string"
)
}
}
\description{
Allocates an Elastic IP address to your AWS account. After you allocate
the Elastic IP address you can associate it with an instance or network
interface. After you release an Elastic IP address, it is released to
the IP address pool and can be allocated to a different AWS account.
You can allocate an Elastic IP address from an address pool owned by AWS
or from an address pool created from a public IPv4 address range that
you have brought to AWS for use with your AWS resources using bring your
own IP addresses (BYOIP). For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-byoip.html}{Bring Your Own IP Addresses (BYOIP)}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
[EC2-VPC] If you release an Elastic IP address, you might be able to
recover it. You cannot recover an Elastic IP address that you released
after it is allocated to another AWS account. You cannot recover an
Elastic IP address for EC2-Classic. To attempt to recover an Elastic IP
address that you released, specify it in this operation.
An Elastic IP address is for use either in the EC2-Classic platform or
in a VPC. By default, you can allocate 5 Elastic IP addresses for
EC2-Classic per Region and 5 Elastic IP addresses for EC2-VPC per
Region.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html}{Elastic IP Addresses}
in the \emph{Amazon Elastic Compute Cloud User Guide}.
You can allocate a carrier IP address which is a public IP address from
a telecommunication carrier, to a network interface which resides in a
subnet in a Wavelength Zone (for example an EC2 instance).
}
\section{Request syntax}{
\preformatted{svc$allocate_address(
Domain = "vpc"|"standard",
Address = "string",
PublicIpv4Pool = "string",
NetworkBorderGroup = "string",
CustomerOwnedIpv4Pool = "string",
DryRun = TRUE|FALSE,
TagSpecifications = list(
list(
ResourceType = "client-vpn-endpoint"|"customer-gateway"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"internet-gateway"|"key-pair"|"launch-template"|"local-gateway-route-table-vpc-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"placement-group"|"reserved-instances"|"route-table"|"security-group"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-route-table"|"volume"|"vpc"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
)
)
}
}
\examples{
\dontrun{
# This example allocates an Elastic IP address to use with an instance in
# a VPC.
svc$allocate_address(
Domain = "vpc"
)
# This example allocates an Elastic IP address to use with an instance in
# EC2-Classic.
svc$allocate_address()
}
}
\keyword{internal}
|
03f813b374f09810f0c6184c43a4968c4f507483 | 8576630b68703160c81fc06b95a450b5db017cb0 | /ui.R | 9511e2c23e88f2a84351e07caa5e60560cc43e83 | [] | no_license | CWesthoven/demi | 6fcda3c86498a0517cf2c9977967d5ebf094bc84 | a11f8b8b64e8bb99e1d32a21b77cedfbd7ae300c | refs/heads/master | 2020-08-07T17:28:56.361717 | 2019-10-08T04:27:18 | 2019-10-08T04:27:18 | 213,538,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,501 | r | ui.R | library(shinydashboard)
dashboardPage(
dashboardHeader(title = "Psudoboat"),
dashboardSidebar(
sliderInput("comb", "Combat:",
min = 0, max = 100, value = 50
),
sliderInput("puzz", "Puzzle:",
min = 0, max = 100, value = 50
),
sliderInput("soci", "Social:",
min = 0, max = 100, value = 50
),
sliderInput("stry", "Player Driven Story:",
min = 0, max = 100, value = 50
),
sliderInput("role", "Roleplay:",
min = 0, max = 100, value = 50
),
sliderInput("rule", "Rules:",
min = 0, max = 100, value = 50
),
checkboxGroupInput("dow", "Days Looking for Games",
c("Sunday" = 1,
"Monday" = 2,
"Tuesday" = 3,
"Wednesday" = 4,
"Thursday" = 5,
"Friday" = 6,
"Saturday" = 7), selected = 1),
numericInput("hod_start", "Desired Game Time Between:", 12, min = 1, max = 24, step = 1),
numericInput("hod_end", "And:", 17, min = 1, max = 24, step = 1),
checkboxGroupInput("types", "Game Types",
c("Free" = "free",
"Paid" = "paid",
"One Shot" = "oneshot",
"Recurring" = "recurring"),
selected = c(1,2,3,4))
),
dashboardBody(
# Boxes need to be put in a row (or column)
# fluidRow(
# box(
# tableOutput("player_scores")
# )
# ),
# fluidRow(
# tableOutput()
# ),
fluidRow(
HTML("<H2>Top Matching Games <H2>"),
box(
htmlOutput("game_1_details")
),
box(
imageOutput("radar_1"),
height = 350
)
),
fluidRow(
box(
htmlOutput("game_2_details")
),
box(
imageOutput("radar_2"),
height = 350
)
),
fluidRow(
box(
htmlOutput("game_3_details")
),
box(
imageOutput("radar_3"),
height = 350
)
),
fluidRow(
box(
htmlOutput("game_4_details")
),
box(
imageOutput("radar_4"),
height = 350
)
),
fluidRow(
box(
htmlOutput("game_5_details")
),
box(
imageOutput("radar_5"),
height = 350
)
),
fluidRow(
tableOutput("results_table")
)
)
) |
6fcf0315e1103f70fe916c65dd00516d49124699 | 1dbc5e6d521d86e4eb1e6c6d8bd5ed471d77c254 | /Codigo/Descritiva.R | 6b2306eba81b6870060581ea13043a4a07a364cb | [] | no_license | JoaoVideira/IAP-Receita | cb7ecb30b10246eaed6029dc57fe9aaeaa15834d | 74080ea8ab8004e2ebe70d678e17d63a98eafea3 | refs/heads/main | 2023-06-17T01:26:56.081107 | 2021-07-19T15:29:22 | 2021-07-19T15:29:22 | 301,699,273 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 634 | r | Descritiva.R | #Análise descritiva dos dados da base: "basepainel"
install.packages("xray")
library(xray)
anomalies(basepainel)
distributions(basepainel)
install.packages("visdat")
library(visdat)
vis_dat(basepainel)
vis_guess(basepainel)
vis_miss(basepainel)
vis_cor(basepainel[,5:15])
install.packages("dlookr")
library(dlookr)
eda_report(basepainel, output_format = "html")
install.packages("DataExplorer")
library(DataExplorer)
introduce(basepainel)
plot_intro(iris)
plot_missing(basepainel)
profile_missing(basepainel)
plot_histogram(basepainel)
plot_bar(basepainel)
plot_qq(basepainel)
plot_correlation(basepainel)
plot_prcomp(basepainel)
|
2561d3e0a803829d348536bfc1f99f785b9e4682 | 398a50731ba81a8fc9b05ed0307b41966dfe78b4 | /cachematrix.R | 981a20afd05c8126ec654634eeea8447f64079a5 | [] | no_license | plringler/ProgrammingAssignment2 | cb273c1fcc0ca00eeedeb831e71f52f44b931413 | c5ee43ed5cb1abf11880488324f524edccdd053f | refs/heads/master | 2020-12-25T00:55:28.916314 | 2014-05-24T04:31:21 | 2014-05-24T04:31:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,564 | r | cachematrix.R | ## The functions makeCacheMatrix and cacheSolve take a matrix, make a list of functions
## to set the matrix, get the matrix, set the inverse of thee matrix, and get the inverse
## of the matrix. Once the list is created, it can be called from he cacheSolve funcion, which
## will take this special list, calculate and cache the inverse of the matrix.
## makeCacheMatrix makes a special variable that sets funcions to set and get the cached
## matrix and inverse of the matrix. This cacheMatrix variable can be called by the function
## cacheSolve to calculate the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## cacheSolve takes as an argument the resulting cacheMatrix from makeCacheMatrix
## and determines the inverse matrix using solve(), and caches it by calling the
## function setinv from makeCacheMatrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
79999ba732013710c6d92f358225a1f1f9d8f6b1 | c278cd820003d18052f32f37409cfbb11ccc6d1b | /man/R_bar_ceiling.Rd | 8eda6e2fb91e864307cd27b65b14cf4e34aacb2e | [
"MIT"
] | permissive | ashander/phenoecosim | d4433c86fa96d1873d02cab0cb6d119d31412a01 | 1fe278475b6189dd675ca97688d3a2fa86476c42 | refs/heads/master | 2020-12-26T04:38:28.976657 | 2016-06-27T02:40:50 | 2016-06-27T02:40:50 | 14,127,841 | 1 | 0 | null | 2016-05-08T01:08:46 | 2013-11-05T00:32:15 | R | UTF-8 | R | false | true | 657 | rd | R_bar_ceiling.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{R_bar_ceiling}
\alias{R_bar_ceiling}
\title{Compute population growth rate under stabilizing selection and ceiling regulation}
\usage{
R_bar_ceiling(R0, Wbar, N, K)
}
\arguments{
\item{R0}{basic reproductive number}
\item{Wbar}{average fitness}
\item{N}{number of individuals in this generation}
\item{K}{carrying capacity}
}
\description{
Compute population growth rate under stabilizing selection and ceiling regulation
}
\details{
Assumes ceiling population regulation
would be good to have separate DD function specified after
chevin and lande 2010
}
|
6f99972c327c961d29d2401e907a2170c6775424 | 1e7af84e367e61bca72bd85e8cce15ccf40a5f1d | /R/tune_grid.R | 60f05fe5ba1d47ab33b7a1d0cf819b64cc22dba1 | [] | no_license | milosvil/caretTuneGrid | 2e7cac5cf70a5ac64689af154874d7daeb420bf4 | 6045e782650ffb4133b35dfee508c6620a942b79 | refs/heads/master | 2021-04-27T21:48:51.758247 | 2018-05-10T20:11:11 | 2018-05-10T20:11:11 | 122,407,108 | 4 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,687 | r | tune_grid.R | tune_grid <- function() {
model_exist <- function(c7, method) {
for (i in 1:length(c7)) {
for (j in 1:length(c7_data[[i]])) {
if (method == c7_data[[i]][[j]]$method) {
return(TRUE)
}
}
}
return(FALSE)
}
get_params <- function(c7, method) {
for (i in 1:length(c7)) {
for (j in 1:length(c7_data[[i]])) {
if (method == c7_data[[i]][[j]]$method) {
return(c7_data[[i]][[j]]$tuning_parameters)
}
}
}
return(NULL)
}
create_tune_grid <- function(params, method) {
if (is.na(params[1])) { return("# No tuning parameters for this model") }
pr <- paste0(params, " = NA", collapse = ",\n ")
tGrid <- paste0("# Tuning parameters for method '",method,"'\ntuneGrid <- expand.grid(", pr, ")\n")
return(tGrid)
}
get_doc <- rstudioapi::getActiveDocumentContext()
method <- get_doc$selection[[1]]$text
if (model_exist(c7_data, method)) {
params <- get_params(c7_data, method)
tGrid <- create_tune_grid(params, method)
start <- get_doc$selection[[1]]$range$start[1]
if (start - 2 == 0) {
ind <- 0
} else {
ind <- 1:(start-2)
}
cont <- get_doc$contents
cont_new <- c(cont[ind], tGrid, cont[(start-1):(length(cont))])
cont_txt <- paste(cont_new, collapse = "\n")
rstudioapi::insertText(rstudioapi::document_range(c(1,1), end = c(length(cont)+1,1)), text = " ", id = get_doc$id)
rstudioapi::insertText(rstudioapi::document_range(c(1,1), c(1,1)), text = cont_txt, id = get_doc$id)
rstudioapi::setCursorPosition(rstudioapi::document_range(c(start,1), c(start,1)), id = get_doc$id)
}
}
|
90328fa8c4a6cebde86fc974ca3b6e30737aa0f6 | e6cb6c696e671d9fea22a33dc3b06a990d9eac27 | /createtextfilesfromhtml (1).R | 6443282166489f233ee08c2847d0e074fef858bb | [] | no_license | ankit51/indix_data_monster | a139df8fe10ab99be63f88b81e0d3a37940f0a0e | 660c2f14ea66c57150b5dbad96e2ce6a0bef59fa | refs/heads/master | 2020-07-31T16:22:53.905824 | 2016-11-13T05:52:45 | 2016-11-13T05:52:45 | 73,596,290 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,985 | r | createtextfilesfromhtml (1).R | # This is the second part of the code (2 of 3) submitted to Indix datamonster hackathon and three parts combined handle classification of
# product pages from others
# The code reads each of the html files from trainingdatafolder directory and converts it into text file
# then write the text files to trainingdatafoldertxtv1 directory
# the text file name is same as html file execept that then extension of the file is now .txt
# please ensure to review the paths and set them appropriately pointing to a folder in your local inorder to execute the code
htmlToText <- function(input, ...) {
###---PACKAGES ---###
require(RCurl)
require(XML)
###--- LOCAL FUNCTIONS ---###
# Determine how to grab html for a single input element
evaluate_input <- function(input) {
# if input is a .html file
if(file.exists(input)) {
char.vec <- readLines(input, warn = FALSE)
return(paste(char.vec, collapse = ""))
}
# if input is html text
if(grepl("</html>", input, fixed = TRUE)) return(input)
# if input is a URL, probably should use a regex here instead?
if(!grepl(" ", input)) {
# downolad SSL certificate in case of https problem
if(!file.exists("cacert.perm")) download.file(url="http://curl.haxx.se/ca/cacert.pem", destfile="cacert.perm")
return(getURL(input, followlocation = TRUE, cainfo = "cacert.perm"))
}
# return NULL if none of the conditions above apply
return(NULL)
}
# convert HTML to plain text
convert_html_to_text <- function(html) {
doc <- htmlParse(html, asText = TRUE)
text <- xpathSApply(doc, "//text()[not(ancestor::script)][not(ancestor::style)][not(ancestor::noscript)][not(ancestor::form)]", xmlValue)
return(text)
}
# format text vector into one character string
collapse_text <- function(txt) {
return(paste(txt, collapse = " "))
}
###--- MAIN ---###
# STEP 1: Evaluate input
html.list <- lapply(input, evaluate_input)
# STEP 2: Extract text from HTML
text.list <- lapply(html.list, convert_html_to_text)
# STEP 3: Return text
text.vector <- sapply(text.list, collapse_text)
return(text.vector)
}
# read each html / xml file from trainingdatafolder directory on the file system
library(RCurl)
library(XML)
filenames <- list.files("/home/sunil/Desktop/indixhack/trainingdatafolder", full.names=F)
for (i in filenames) {
setwd("/home/sunil/Desktop/indixhack/trainingdatafolder")
# assign input (could be a html file, a URL, html text, or some combination of all three is the form of a vector)
input <- i
# calling the htmlToText custom function with file name as the parameter
txt <- htmlToText(input)
# post-processing the output returned by htmlToText custom function
txt<-paste(txt, collapse = "\n")
outfilename<-paste0(input,".txt")
# writing the created text file to trainingdatafoldertxtv1 directory
setwd("/home/sunil/Desktop/indixhack/trainingdatafoldertxtv1")
writeLines(txt,outfilename)
}
|
ffec40e611e968142ea8943b8bea4af1b603648b | afbc278644d03a69a62fb3f602697151d4b083ab | /R/evaluate.R | 2bd4cef310c48f7a85039f6fbd772554b268796c | [] | no_license | FJValverde/entropies | 6f0b54b1d289d052763f929987f026b8425d5dbb | d9bbbbc5d9759e5d0d84955d8dc0f012ea8de0b6 | refs/heads/master | 2023-05-02T04:46:32.020156 | 2023-04-24T18:46:58 | 2023-04-24T18:46:58 | 44,973,393 | 1 | 4 | null | null | null | null | UTF-8 | R | false | false | 2,843 | r | evaluate.R | #' A function to evaluate confusion matrices and contingency tables.
#'
#' Provides also the EMA and NIT over that provided by perplexities.
#'
#' @param data Either a \code{\link[base]{table}} working as an n-way contingency
#' table or a \code{\link[caret]{confusionMatrix}}
#' @return At least, the concatenation of the entropies (whether split or not), the \code{EMA}
#' and \code{NIT} rate as explained in \cite{val:pel:14a}.
#' @export
evaluate <- function(data, ...) UseMethod("evaluate")
#' A function to evaluate the entropic coordinates of a distribution
#' @importFrom dplyr %>%
#' @export
evaluate.data.frame <- function(data, ...){
if(hasCmetEntropicCoords(data)){
return(perplexities(data) %>% ##filter(type!="XY") %>%
mutate(EMA=1/kx_y, NIT=muxy/k))
} else
stop("Missing fields in entropy data to work out performance indicators.")
}
#' A function to evaluate a confusion matrix
#'
#' @description The criteria evaluated at present are:
#' \itemize{
#' \item entropic quantities, as issued from \code{jentropies}
# \item accuracies and kappa (and their intervals), e.g. as issued from
# \code{\link[caret]{confusionMatrix}}.
#' \item perplexities, as issued from \code{\link{perplexities}}
#' \item Expected Modified Accuracy (EMA) and Normalized Information Transfer (NIT) rate
#' }
#' @param cm A confusion matrix as per \code{\link[caret]{confusionMatrix}}
##' @param split=FALSE Whether to evaluat the split entropies or not.
#' @return The concatenation of the entropies (whether split or not), the
#' \code{cm$overall} information from confusion matrix cm, and the \code{EMA}
#' and \code{NIT} rate as explained in \cite{val:pel:14a}.
#' @importFrom caret confusionMatrix
#' @export
evaluate.confusionMatrix <- function(data, ...){
return(evaluate(jentropies(data)))
}
#' A primitive to evaluate a contingency table
#'
#' @description The criteria evaluated at present are:
#' \itemize{
#' \item entropic quantities, as issued from \code{entropies} and \code{entropicCoords}.
#' \item perplexities, as issued from \code{\link{perplexities}}
#' }
#' @param cm A contingency table as per \code{\link[base]{table}}
##' @param split=FALSE Whether to evaluat the split entropies or not.
#' @examples evaluate.table(UCBAdmissions) # from package datasets
#' @export
evaluate.table <- function(data, ...){
# #data <- as.table(cm) # Is this necessary after dispatch?
# dataEntropies <- jentropies(data,...)
# dataPerp <- perplexities(dataEntropies)
return(evaluate(jentropies(data)))
# return(perplexities(data) %>% mutate(EMA=1/kx_y, NIT=muxy/k))
# return(cbind(dataEntropies,
# dataPerp,
# EMA = 1/dataPerp$kx_y,
# NIT = dataPerp$muxy/dataPerp$k
# )
# )
} |
d9bfba35899e2459acb90574e14cc3b1108be345 | 8f795c3dd3d10105f636854d1abaf58e5ad49d8c | /cachematrix.R | 346d41789a3b1e81d8874dc415ead3d727c94fe1 | [] | no_license | liamvt/ProgrammingAssignment2 | a391da0a686b288015372160722fd9eabb66bc49 | 90fd1a2888ccb32696cf25213cc47fdcb590b432 | refs/heads/master | 2021-01-18T00:30:24.504623 | 2015-04-27T03:49:37 | 2015-04-27T03:49:37 | 34,555,660 | 0 | 0 | null | 2015-04-25T04:45:54 | 2015-04-25T04:45:54 | null | UTF-8 | R | false | false | 1,513 | r | cachematrix.R | ## makeCacheMatrix is a function that stores a list of functions that operate
## on the matrix given as its argument x.
## Cachesolve stores the inverse of the matrix x in the cache
## Takes the argument x, outputs a list of four functions (set, get, setinverse
## and getinverse)
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Takes the output of makeCacheMatrix as its input.
## Returns the inverse of matrix x, by either using the solve() function or
## retrieving the stored inverse calculation
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setinverse(m)
m
}
## Test code to confirm that the functions work
m <- matrix(c(-1, -2, 1, 1), 2,2)
x <- makeCacheMatrix(m)
x$get()
inv <- cacheSolve(x) ## Calculates the inverse using solve()
inv
inv <- cacheSolve(x) ## Retrieves the cached solution
inv
|
918ff900501dc337a20c1c195f783229cf80c8b5 | a6e37171b21874eabd8031fbb965b9927b643664 | /R/telomere_genes.R | 62776844032e301051cd458f8c7f1af89884ac94 | [
"Apache-2.0"
] | permissive | ibn-salem/telomere_genes | 530dfc3ffaef8f185a7080e40eac482687703286 | c555386dca0919ec08faacb28b2842c59e0c1960 | refs/heads/master | 2021-08-18T19:30:50.893556 | 2017-11-23T17:08:42 | 2017-11-23T17:08:42 | 108,840,653 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,239 | r | telomere_genes.R | #'
#' A script to get all human and mouse genes with theire distance to telomeres
#'
# load required packages
library(GenomicRanges)
library(TxDb.Hsapiens.UCSC.hg38.knownGene)
library(TxDb.Mmusculus.UCSC.mm10.knownGene)
library(biomaRt)
library(tidyverse)
source("R/get_telomere_dist.R")
#-------------------------------------------------------------------
# A few parameters
#-------------------------------------------------------------------
outPrefix <- "results/telomere_genes_v02"
dir.create("results", showWarnings = FALSE)
#-------------------------------------------------------------------
# get human genes with annotation:
#-------------------------------------------------------------------
# define atributes and download genes from ensemble
ensembl_human <- useMart(biomart = "ENSEMBL_MART_ENSEMBL",
dataset = "hsapiens_gene_ensembl")
geneAttributes = c("ensembl_gene_id", "ensembl_transcript_id",
"external_gene_name", "external_gene_source",
"chromosome_name", "transcript_start","transcript_end",
"transcription_start_site", "strand", "gene_biotype")
human_genes = getBM(attributes = geneAttributes, mart = ensembl_human)
# extract seqinfo object
seqInfo <- seqinfo(TxDb.Hsapiens.UCSC.hg38.knownGene)
# select longest transcript per gene
genesDF_human <- human_genes %>%
as_tibble() %>%
mutate(transcript_size = transcript_end - transcript_start) %>%
filter(!is.na(chromosome_name)) %>%
group_by(ensembl_gene_id) %>%
filter(min_rank(desc(transcript_size)) == 1) %>%
# filter for regular chromosomes contained in Bioc object
filter(paste0("chr", chromosome_name) %in% seqlevels(seqInfo)) %>%
ungroup()
# convert into GRanges
tssGR_human <- GRanges(
paste0("chr", genesDF_human$chromosome_name),
IRanges(genesDF_human$transcription_start_site, genesDF_human$transcription_start_site),
strand = ifelse(genesDF_human$strand == 1, "+", "-"),
seqinfo = seqinfo(TxDb.Hsapiens.UCSC.hg38.knownGene)
)
mcols(tssGR_human) <- genesDF_human %>%
select(-chromosome_name, -transcription_start_site, -strand) %>%
as.data.frame()
# add distance to telomere
tssGR_human <- add_telomere_dist(tssGR_human)
humanDF <- mcols(tssGR_human) %>%
as.data.frame() %>%
as.tibble() %>%
select(ensembl_gene_id, external_gene_name, external_gene_source,
telomere_dist, telomere, everything())
write_tsv(humanDF, paste0(outPrefix, ".human_genes_with_telomere_distance.tsv"))
#-------------------------------------------------------------------
# get mouse genes with annotation:
#-------------------------------------------------------------------
seqInfo_mouse <- seqinfo(TxDb.Mmusculus.UCSC.mm10.knownGene)
# define atributes and download genes from ensemble
ensembl_mouse <- useMart(biomart = "ENSEMBL_MART_ENSEMBL",
dataset = "mmusculus_gene_ensembl")
geneAttributes = c("ensembl_gene_id", "ensembl_transcript_id",
"external_gene_name", "external_gene_source",
"chromosome_name", "transcript_start","transcript_end",
"transcription_start_site", "strand", "gene_biotype")
mouse_genes = getBM(attributes = geneAttributes, mart = ensembl_mouse)
# select longest transcript per gene
genesDF_mouse <- mouse_genes %>%
as_tibble() %>%
mutate(
transcript_size = transcript_end - transcript_start,
chr = paste0("chr", chromosome_name)
) %>%
filter(!is.na(chromosome_name)) %>%
group_by(ensembl_gene_id) %>%
filter(min_rank(desc(transcript_size)) == 1) %>%
# filter for regular chromosomes contained in Bioc object
filter(chr %in% seqlevels(seqInfo_mouse))
# convert into GRanges
tssGR_mouse <- GRanges(
genesDF_mouse$chr,
IRanges(genesDF_mouse$transcription_start_site, genesDF_mouse$transcription_start_site),
strand = ifelse(genesDF_mouse$strand == 1, "+", "-"),
seqinfo = seqInfo_mouse)
mcols(tssGR_mouse) <- genesDF_mouse %>%
select(-chromosome_name, -transcription_start_site, -strand) %>%
as.data.frame()
# add distance to telomere
tssGR_mouse <- add_telomere_dist(tssGR_mouse)
mouseDF <- mcols(tssGR_mouse) %>%
as.data.frame() %>%
as.tibble() %>%
select(ensembl_gene_id, external_gene_name, external_gene_source,
telomere_dist, telomere, everything())
write_tsv(mouseDF, paste0(outPrefix, ".mouse_genes_with_telomere_distance.tsv"))
#-------------------------------------------------------------------
# get human-mouse ortholog pairs
#-------------------------------------------------------------------
# atributes for orthologs
orthologAttr = c("ensembl_gene_id",
paste0("mmusculus",
c("_homolog_ensembl_gene", "_homolog_orthology_type",
"_homolog_subtype", "_homolog_orthology_confidence",
"_homolog_perc_id", "_homolog_perc_id_r1",
"_homolog_dn", "_homolog_ds")))
orthologs = getBM(attributes = orthologAttr, mart = ensembl_human)
orthologsDF <- orthologs %>%
as.tibble() %>%
filter(mmusculus_homolog_ensembl_gene != "")
write_tsv(orthologsDF, paste0(outPrefix, ".human_mouse_orthologs.tsv"))
|
2b12158a31695cfca15abcb06887d62622975338 | 42ff40e63ba5c362f8067a7503aa7d1aa2263109 | /analyses/R/Fstats_Ho_Significance.R | 7ab71f9e918b370d01512d048cd68c8886aff53f | [] | no_license | mfisher5/PCod-Korea-repo | 686820f87625e5a7ea68c97c9286b783199c267e | 90324e4431292fda757abb364bcc002dd4117e7e | refs/heads/master | 2021-01-01T16:37:52.932264 | 2018-08-20T23:12:16 | 2018-08-20T23:12:16 | 81,503,317 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,025 | r | Fstats_Ho_Significance.R | # Purpose: testing for significant changes in Fis, Ho per population
#
#From: MF 6/10/2018
#
#
#
# ---------------------------------------------------------------------------------
# Import these libraries
library(readr)
library(reshape2)
library(dplyr)
# Set working directory
setwd("D:/Pacific cod/DataAnalysis/PCod-Korea-repo/analyses")
# Western Population ------------------------------------------------------
# Import data
mydata <- read_delim("D:/Pacific cod/DataAnalysis/PCod-Korea-repo/results/verif/Ho_Fis_perLocus_perSite.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
head(mydata)
# Adjust data frame
mydata_het <- subset(x = mydata, select = c(locus, pop, Ho))
head(mydata_het)
west_het <- filter(mydata_het, pop %in% c("YS_121316_07", "BOR07_21_2"))
head(west_het)
mydata_fis <- subset(x = mydata, select=c(locus,pop,Fis_wc))
head(mydata_fis)
west_fis <- filter(mydata_fis, pop %in% c("YS_121316_07", "BOR07_21_2"))
head(west_fis)
# Run Wilcox Test
wilcox.test(Ho ~ pop, data = west_het)
wilcox.test(Fis_wc ~ pop, data = west_fis)
# Southern Population ------------------------------------------------------
# Import data
mydata <- read_delim("D:/Pacific cod/DataAnalysis/PCod-Korea-repo/results/verif/Ho_Fis_perLocus_perYear_South.txt", "\t", escape_double = FALSE, trim_ws = TRUE)
head(mydata)
# Adjust data frame
mydata_het <- subset(x = mydata, select = c(locus, pop, Ho))
head(mydata_het)
mydata_het_1 <- filter(mydata_het, pop %in% c("JB021108_23","GEO020414_30_2"))
mydata_het_2 <- filter(mydata_het, pop %in% c("NA021015_26","GEO020414_30_2"))
mydata_fis <- subset(x = mydata, select=c(locus,pop,Fis_wc))
head(mydata_fis)
mydata_fis_1 <- filter(mydata_fis, pop %in% c("JB021108_23","GEO020414_30_2"))
mydata_fis_2 <- filter(mydata_fis, pop %in% c("NA021015_26","GEO020414_30_2"))
# Run Wilcox Test
wilcox.test(Ho ~ pop, data = mydata_het_1)
wilcox.test(Ho ~ pop, data = mydata_het_2)
wilcox.test(Fis_wc ~ pop, data = mydata_fis_1)
wilcox.test(Fis_wc ~ pop, data = mydata_fis_2)
|
7f0598100c316b1a9f43d154c069bb7b7bd3ec5d | 58bbdaff8ead843c3d6f1464bb9369551d453c3c | /static/download/minicursos/R/2019/aula1/Aula1.R | 645a813000f51ab4e186ead8516b5aba880e9086 | [] | no_license | pet-estatistica/site | 3e30e37cc26a8e4f38a0b94f2bbb568538cf7820 | 36ac97ed0f7c7e7e3a9663d9d012a7b3e075aec0 | refs/heads/master | 2022-10-27T21:32:45.170179 | 2022-10-17T02:02:59 | 2022-10-17T02:02:59 | 144,061,167 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 16,792 | r | Aula1.R |
##################
# #
# FUNÇÕES #
# #
##################
## Funções Iniciais
## ------------------------------------------------------------------------
c() # Cria um Vetor
getwd() # Mostra o Diretório de Trabalho Atual
setwd('C:/Diretório') # Muda o Diretório de Trabalho
dir() # Lista os Arquivos do Diretório de Trabalho Atual
sessionInfo() # Mostra algumas informações da sessão instalada
install.packages('nome_do_pacote') # Instala um pacote
library(nome_do_pacote) # Carrega um pacote
require(nome_do_pacote) # Carrega um pacote
help.start() # Mostra um tutorial para novos usuários de R
help('função'); ?função # Mostra a documentação de um pacote ou função
help.search('mean'); ??mean # Faz uma pesquisa mais geral para o pacote ou função
example('plot') # Mostra exemplos de alguma função
print() # Imprime o resultado de uma variável
q() # Fecha a Sessão
ls(); objects() # Exibe os objetos que foram armazenados
rm(x,y) # Remove o objeto ‘x’ e ‘y’
rm(list=ls()); rm(list=objects()) # Remove todos os objetos que foram armazenados
str() # Mostra a estrutura de um objeto
class() # Verifica a classe de um objeto
## Operadores Básicos (R utilizado como Calculadora)
## ------------------------------------------------------------------------
4 + 4 # Adição
4 - 4 # Subtração
4 * 4 # Multiplicação
4 / 4 # Divisão
(1+1) * 5 # Respeita a ordem comum de operações matemáticas
4^2 # Exponenciação (4**2 também pode ser utilizado)
14 %% 3 # Módulo
## Funções Matemáticas
## ------------------------------------------------------------------------
sqrt(4) # Raiz Quadrada
factorial(4) # Fatorial
exp(1);exp(2) # Exponencial
abs(c(-2,-4,5,7)) # Absoluto
log(2.71828182) # Logaritmo Neperiano
round(2.718281, digits = 2) # Arredondamento com dois dígitos
ceiling(2.718281) # Arredondamento para cima
floor(2.718281) # Arredondamento para baixo
## Funções Estatísticas
## ------------------------------------------------------------------------
length(c(2,3,7,9,10)) # Imprime o comprimento do vetor
mean(c(4,3,7,8)) # Calcula a média
median(c(5,6,7,10)) # Calcula a mediana
min(c(5,6,7,10)) # Imprime o valor mínimo
max(c(5,6,7,10)) # Imprime o valor máximo
var(c(5,6,7,10)) # Calcula a variância
sd(c(5,6,7,10)) # Calcula o desvio padrão
## Criando Sequência de Valores
## ------------------------------------------------------------------------
1:10 # Sequência de números inteiros de 1 a 10.
rep(x=1,times=20) # 20 repetições do número 1.
seq(from=1,to=16, length.out = 7) # Sequência de 1 a 16 com comprimento 7.
seq(from=1,to=20, by = 2) # Sequência de 1 a 20 com intervalo 2.
runif(n = 10, max = 6, min = 5) # 10 números aleatórios no intervalo entre 5 e 6 com base na distribuição uniforme de probabilidade.
rnorm(n = 50, mean = 0, sd = 1) # 50 números aleatórios com base na distribuição normal de probabilidade com média 0 e desvio padrão 1.
##################
# OBJETOS #
# E #
# CLASSES #
##################
## VETOR
## ------------------------------------------------------------------------
##
##
x <- 25
class(x)
## CRIAÇÃO DE VETORES
## ------------------------------------------------------------------------
(meu_primeiro_vetor <- c(1,2,3)) # Vetor com números
(meu_segundo_vetor <- c("S&P","500")) # Vetor com palavras
## CLASSES
## ------------------------------------------------------------------------
var1 <- c(367,352,459)
class(var1)
var2 <- c(367L,352L,459L)
class(var2)
object.size(var1);object.size(var2) # O espaço ocupado na memória para um objeto do tipo 'integer' é menor.
var3 <- c(TRUE,FALSE,FALSE)
class(var3)
var4 <- c(5.2+3i, 3.8+4i)
class(var4)
var5 <- c("João","Matheus","Lineu","Alberto")
class(var5)
var6 <- c("João"=10,"Matheus"=9,"Lineu"=8,"Alberto"=10)
class(var6)
names(var6)
## VERIFICAÇÃO DE CLASSE
## ------------------------------------------------------------------------
var1;is.numeric(var1) # Retorna 'TRUE', pois o objeto 'var1' é do tipo 'numeric'
var2;is.integer(var2) # Retorna 'TRUE', pois o objeto 'var2' é do tipo 'integer'
var3;is.character(var3) # Retorna 'FALSE', pois o objeto 'var3' não é do tipo 'character'. Esse objeto é do tipo 'logical'
var3;is.logical(var3) # Retorna 'TRUE', pois o objeto 'var3' é do tipo 'logical'
var4;is.complex(var4) # Retorna 'TRUE', pois o objeto 'var4' é do tipo 'complex'
var5;is.character(var5) # Retorna 'TRUE', pois o objeto 'var5' é do tipo 'character'
var6;is.character(var6) # Retorna 'FALSE', pois o objeto 'var6' não é do tipo 'character'. Esse objeto é do tipo 'numeric'
## FORÇANDO OBJETO A MUDAR DE CLASSE
## ------------------------------------------------------------------------
var1;as.character(var1) # Transforma a classe do objeto 'var1' para 'character'
var3;as.integer(var3) # Transforma a classe do objeto 'var3' para 'integer'
c(1,0,1);as.logical(c(1,0,1)) # Transforma a classe do vetor 'c(1,0,1)' para 'logical'
var5;as.numeric(var5) # Não transforma a classe do objeto 'var5' para 'numeric'. Por conta disso, retorna um vetor com 'NA'.
## INDEXAÇÃO PARA VETORES
## ------------------------------------------------------------------------
## Vetores com Números
(vetor <- c(1,23,3,47,90,6,7,8, 5 ,6 ,10 , 45)) # Criando um novo vetor de números
vetor[5];vetor[c(1,2,3,4,6)] # Selecionando o elemento de posição 5; Selecionando os elementos da posição 1,2,3,4 e 6.
vetor[1:3];vetor[seq(1, 3)] # Selecionando os primeiros 3 elementos; Selecionando os primeiros 3 elementos utilizando a função 'seq'.
vetor[-3];vetor[-c(1,4)] # Removendo o elemento da posição 3; Removendo os elementos da posição 1 e 4.
## Vetores com Nomes
(chr = c("Barack", "Obama")) # Criando um novo vetor de palavras
(names(chr) = c("Nome", "Sobrenome"))
chr
chr["Nome"]
## SELEÇÃO CONDICIONAL PARA VETORES
## ------------------------------------------------------------------------
(vetor2 <- c(18, 12 , 31 , 56 , 7 , 5 , 9 )) # Criando um novo vetor de números
vetor > 15;vetor[vetor > 15]
vetor > 30 & vetor < 100;vetor[vetor > 30 & vetor < 100] # Operador lógico '&' significa 'e' (and)
vetor > 30 | vetor < 100;vetor[vetor > 30 | vetor < 100] # Operador lógico '|' significa 'ou' (or)
(chr2 <- letters[1:20])
chr2[1:5]
chr2[chr2 == "e"]
chr2[chr2 == "b" | chr2 == "f"]
which(chr2 == "e") # Retorna a posição em que o elemento "e" se encontra no vetor
## FATOR
## ------------------------------------------------------------------------
##
##
## CRIAÇÃO DE FATORES
## ------------------------------------------------------------------------
(vec1 <- c("Macho","Femea","Femea","Macho","Macho"))
(fac_vec1 <- factor(vec1))
class(vec1)
class(fac_vec1)
## VARIÁVEIS CATEGÓRICAS NOMINAIS
## ------------------------------------------------------------------------
(animais <- c("Zebra", "Pantera", "Rinoceronte", "Macaco", "Tigre"))
(fac_animais <- factor(animais))
class(fac_animais)
levels(fac_animais)
## VARIÁVEIS CATEGÓRICAS ORDINAIS
## ------------------------------------------------------------------------
(grad <- c("Mestrado", "Doutorado", "Bacharelado", "Mestrado", "Mestrado"))
(fac_grad <- factor(grad, ordered = TRUE, levels = c("Doutorado", "Mestrado", "Bacharelado")))
levels(fac_grad)
is.ordered(fac_grad)
is.ordered(fac_animais)
summary(grad);summary(fac_grad)
## MATRIZ
## ------------------------------------------------------------------------
##
##
## CRIAÇÃO DE MATRIZES
## ------------------------------------------------------------------------
(matriz <- matrix(1:9, nrow = 3, ncol = 3)) # Preenchimento por coluna
(matriz <- matrix(1:9, nrow = 3, ncol = 3, byrow = TRUE)) # Preenchimento por linha
(matriz <- matrix(c(1,4,5,7),nrow=2,ncol=2)) # Criação de uma matriz de ordem 2
t(matriz) # Transposta
solve(matriz) # Inversa
diag(1:3) # Matriz Diagonal
matriz;rbind(matriz, c(0,1)) # Nova Linha
matriz;cbind(matriz, c(2,3)) # Nova Coluna
## DIMENSÕES DA MATRIZ
## ------------------------------------------------------------------------
dim(matriz) # N° de linhas e colunas
nrow(matriz) # N° de linhas
ncol(matriz) # N° de colunas
## OPERAÇÕES COM MATRIZES
## ------------------------------------------------------------------------
(matriz1 <- matrix(1:16,nrow = 4, ncol = 4))
(matriz2 <- matrix(seq(1,32,by=2),nrow=4,ncol=4))
matriz2 + 3 # Adição
matriz2 - 1 # Subtração
matriz2 * 3 # Multiplicação
matriz2 / 2 # Divisão
matriz1 * matriz2 # Multiplicação elemento por elemento
matriz1 %*% matriz2 # Multiplicação Matricial
## INDEXAÇÃO PARA MATRIZES
## ------------------------------------------------------------------------
(matriz <- matrix(c(1,4,5,7),nrow=2,ncol=2))
matriz[1,2] # Elemento da linha 1 e coluna 2
matriz[1,] # Elementos da linha 1
matriz[, 2] # Elementos da coluna 2
matriz[, 2, drop = FALSE] # Elementos da coluna 2 (Mantendo o formato de matriz)
## SELEÇÃO CONDICIONAL PARA MATRIZES
## ------------------------------------------------------------------------
matriz
matriz == 1;matriz[matriz == 1] # Seleciona os elementos iguais a 1
matriz > 4;matriz[matriz > 4] # Seleciona os elementos maiores que 4
matriz >=4 & matriz <= 5;matriz[matriz >=4 & matriz <= 5] # Seleciona os elementos maiores ou iguais a 4 E menores ou iguais a 5
matriz == 1 | matriz == 5;matriz[matriz == 1 | matriz == 5] # Seleciona o elemento igual a 1 ou igual a 5
## DATAFRAME
## ------------------------------------------------------------------------
##
##
## CRIAÇÃO DE DATAFRAMES
## ------------------------------------------------------------------------
data.frame(Nomes=c("Marcelo","Fernanda"),Notas=c(8,9))
país = c("EUA", "Dinamarca", "Holanda", "Espanha", "Brasil")
nome = c("Maurício", "Pedro", "Aline", "Beatriz", "Marta")
altura = c(1.78, 1.72, 1.63, 1.59, 1.63)
peso = c(50, 76, 62, 55, 120)
(pesquisa <- data.frame(país, nome, altura, peso))
## INFORMAÇÕES SOBRE O DATAFRAME
## ------------------------------------------------------------------------
class(pesquisa) # Classe do dataframe
str(pesquisa) # Estrutura do dataframe
dim(pesquisa) # Dimensões (Número de linhas e Colunas)
nrow(pesquisa) # Número de Linhas
ncol(pesquisa) # Número de Colunas
## Adicionar uma nova coluna ou linha e renomear
## ------------------------------------------------------------------------
rbind(pesquisa,data.frame(país="China",nome="Bruce",altura=1.82,peso=70)) # Nova Linha
cbind(pesquisa,IMC = c(50,76,62,55,120) / c(1.78,1.72,1.63,1.59,1.63)^2 ) # Nova Coluna
rownames(pesquisa) # Nome das Linhas
colnames(pesquisa) # Nome das Colunas
rownames(pesquisa) <- c("Primeiro","Segundo","Terceiro","Quarto","Quinto") # Alterando nome das linhas
colnames(pesquisa) <- c("PAÍS","NOME","ALTURA","PESO") # Alterando nome das colunas
## INDEXAÇÃO PARA DATAFRAMES
## ------------------------------------------------------------------------
pesquisa
pesquisa[4, ] # Selecionando a 4° linha do dataframe
pesquisa[1:5,];head(pesquisa,5) # Selecionando as primeiras 5 linhas
pesquisa[3:5,];tail(pesquisa,3) # Selecionando as últimas 3 linhas
pesquisa[,2];pesquisa$NOME # Selecionando a 2° Coluna (Pelo número da coluna e pelo nome da coluna)
pesquisa[3,1];pesquisa$PAÍS[3] # Selecionando a 3° linha da 1° coluna (Por números e pelo nome da coluna)
pesquisa[1, "NOME"] # Selecionando a 1° linha da 2° coluna
## SELEÇÃO CONDICIONAL PARA DATAFRAMES
## ------------------------------------------------------------------------
pesquisa
pesquisa[pesquisa$PESO == 50,] # Seleciona a linha com peso igual a 50
pesquisa[pesquisa$ALTURA>=1.65,] # Seleciona as linhas com altura maior ou igual a 1.65
pesquisa[pesquisa$ALTURA>=1.65,"NOME"] # Seleciona as linhas com altura maior ou igual a 1.65 da coluna 'NOME'
pesquisa[pesquisa$PESO > 50 & pesquisa$PESO < 70,];subset(pesquisa, PESO > 50 & PESO < 70) # Seleciona as linhas com peso maior que 50 e menor que 70.
pesquisa[pesquisa$PESO > 50 & pesquisa$ALTURA < 1.70,];subset(pesquisa, PESO > 50 & ALTURA < 1.70) # Seleciona as linhas com peso maior que 50 e altura menor que 1.70.
## LISTA
## ------------------------------------------------------------------------
##
##
## CRIAÇÃO DE LISTAS
## ------------------------------------------------------------------------
(lista <- list(1:10,c("Maria", "João", "Alfredo"),rnorm(10),pesquisa))
# Lista Nomeada
(lista <- list(Sequencia=1:10,Nomes=c("Maria", "João", "Alfredo"),NumAleat=rnorm(10),Dataframe=pesquisa))
class(lista)
length(lista)
str(lista)
## INDEXAÇÃO PARA LISTAS
## ------------------------------------------------------------------------
lista[1];str(lista[1]) # Extrai o 1° elemento da lista (Retorna uma lista com um elemento)
lista[[1]];str(lista[[1]]) # Extrai o 1° elemento da lista (Retorna somente o elemento)
lista[[2]];lista[["Nomes"]] # Extrai o 2° elemento da lista
lista[[1]][3] # Extrai o 3° item do 1° elemento da lista
lista$NumAleat[4] # Extrai o 4° item do elemento 'NumAleat' da lista
lista$Nomes[c(2,3)] # Extrai o 2° e 3° item do elemento 'Nomes' da lista
lista$Dataframe$Nome # Extrai a coluna 'NOME' do elemento 'Dataframe' da lista
## SELEÇÃO CONDICIONAL PARA LISTAS
## ------------------------------------------------------------------------
lista
lista[[3]][lista[[3]]>=0] # Extrai os itens positivos do terceiro elemento da lista
lista$Sequencia[lista$Sequencia>=5] # Extrai os itens maiores que 5 do elemento 'Sequencia' da lista
##################
# LEITURA #
# DE #
# DADOS #
##################
## TXT
## ------------------------------------------------------------------------
dados_txt <- read.table(file = "MingotiAnA4.txt",
header = TRUE,
sep = '\t',
dec = '.')
head(dados_txt)
## CSV
## ------------------------------------------------------------------------
dados_csv <- read.csv(file = "MingotiAnA4.csv",
header = TRUE,
sep = ';',
dec = '.')
head(dados_csv)
## XLSX
## ------------------------------------------------------------------------
library(readxl)
dados_xlsx <- read_excel(path = "MingotiAnA4.xlsx",
sheet = 1)
head(dados_xlsx);class(dados_xlsx)
as.data.frame(head(dados_xlsx))
##################
# #
# GRÁFICOS #
# #
##################
summary(dados_txt)
plot(dados_txt)
## ------------------------------------------------------------------------
barplot(table(dados_txt$grp))
plot(dados_txt$x1)
hist(dados_txt$x1)
plot(density(dados_txt$x1))
boxplot(dados_txt$x1)
## ------------------------------------------------------------------------
plot(dados_txt$x1 ~ as.factor(dados_txt$grp))
plot(dados_txt$x1 ~ dados_txt$x2)
## ------------------------------------------------------------------------
plot(dados_txt$x1 ~ dados_txt$x2) # exemplo
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3') # eixos
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3',
main = 'x3 em função de x1') # título
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3',
main = 'x3 em função de x1',
col = 'blue') # cores (nome) - azul
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3',
main = 'x3 em função de x1',
col = 2) # cores (número) - vermelho
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3',
main = 'x3 em função de x1',
col = '#00cc00') # cores (html) - verde
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3',
main = 'x3 em função de x1',
pch = 19) # estilos de pontos (experimente entre 1 e 127)
## ------------------------------------------------------------------------
plot(dados_txt$x1 ~ dados_txt$x2,
xlab = 'Variável x2',
ylab = 'Variável x3',
main = 'x3 em função de x1',
pch = 19,
col = dados_txt$grp)
legend("top", legend=c("Grupo 1", "Grupo 2"),
col=c(1,2), bty="o", pch=c(19,19)) ## Acrescenta legenda
demo(graphics)
|
1fd2b882a12fc37cda0e8b1eef3a69db2d9d133b | c0bce42fcea5993c3d9976248c157f4a4433db0b | /figure_CLL/code/23_analysis_chisquared.R | 6bd7a3db0e40a8aa71195378e0a7b7ba8d97e0b0 | [] | no_license | ChenPeizhan/mtscATACpaper_reproducibility | a01b22f43c6222a56e04e731d68de7440c3cfc76 | e543610bf29dbac1094994c54d3e7edd41609d5a | refs/heads/master | 2022-12-11T11:31:24.877462 | 2020-08-29T18:40:36 | 2020-08-29T18:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,748 | r | 23_analysis_chisquared.R | library(data.table)
library(dplyr)
# Set up a data frame for plotting
dt <- readRDS("../output/PT1_X2.rds")
dt <- dt %>% arrange(desc(obs_x2)) %>%
mutate(rank = 1:n(), padj = p.adjust(obs_p), obs_log10p = -1*log10(obs_p))
dt$perm_x2 <- sort(dt$perm_x2, decreasing = TRUE)
dt$perm_log10p <- -1*log10(sort(dt$perm_p, decreasing = FALSE))
sum(dt$padj < 0.01)
p1 <- ggplot(dt, aes(x = rank, y = obs_x2, color = padj < 0.01)) +
geom_point_rast(size = 0.1, raster.dpi = 500) + scale_color_manual(values = c("black", "firebrick")) +
geom_point_rast(inherit.aes = FALSE, data = dt, aes(x = rank, y = perm_x2), color = "lightgrey", size = 0.1, raster.dpi = 500) +
labs(x = "Rank sorted peaks", y = "X2 Statistic") +
pretty_plot(fontsize = 8) + L_border() +
theme(legend.position = "none")
cowplot::ggsave2(p1, file = "../plots/PT1_ChiSquareSummary.pdf", width = 1.8, height = 1.8)
# Set up a data frame for plotting
dt <- readRDS("../output/PT2_X2.rds")
dt <- dt %>% arrange(desc(obs_x2)) %>%
mutate(rank = 1:n(), padj = p.adjust(obs_p), obs_log10p = -1*log10(obs_p))
dt$perm_x2 <- sort(dt$perm_x2, decreasing = TRUE)
dt$perm_log10p <- -1*log10(sort(dt$perm_p, decreasing = FALSE))
sum(dt$padj < 0.01)
p1 <- ggplot(dt, aes(x = rank, y = obs_x2, color = padj < 0.01)) +
geom_point_rast(size = 0.1, raster.dpi = 500) + scale_color_manual(values = c("black", "firebrick")) +
geom_point_rast(inherit.aes = FALSE, data = dt, aes(x = rank, y = perm_x2), color = "lightgrey", size = 0.1, raster.dpi = 500) +
labs(x = "Rank sorted peaks", y = "X2 Statistic") +
pretty_plot(fontsize = 8) + L_border() +
theme(legend.position = "none")
cowplot::ggsave2(p1, file = "../plots/PT2_ChiSquareSummary.pdf", width = 1.8, height = 1.8) |
7c6c5afb4420cacc7e6f69defd2b9c2e731f7dee | 76e1ad0b4fea46946939c33e795b73d86ba2124b | /task4/traintest.R | 75d0909ea6bbe17858273711d595b0e5bbb426a1 | [] | no_license | guidogallopyn/TextPrediction | a559940bd1e05ea9ff1e07a71c87dbc4b51ccba5 | dfeb4d109c686ea004cd1736a60cf418b7f37970 | refs/heads/master | 2021-01-02T09:15:11.169480 | 2015-04-27T18:21:36 | 2015-04-27T18:21:36 | 34,554,810 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,560 | r | traintest.R |
# split Capstone corpus in training, eval and test set and store
# subsample the training set in smaller pieces
# 1% small
# 3% medium
# 10% large
setwd("~/Documents/Courses/DataScience/CapStone")
library(tm)
#corpus <- PCorpus(DirSource("data/final/en_US/"),dbControl = list(dbName = "db/enUSfinal.db"))
corpus <- VCorpus(DirSource("data/final/en_US/"))
sample.fraction <- function(len, frac) sample(len,frac*len)
subsample.TextDocument <- function(doc,frac)
PlainTextDocument( sample(content(doc), frac * length(content(doc))),
id = meta(doc,"id") )
split.Corpus <- function(corpus,frac) {
corpus<- tm_map(corpus, function(x) { meta(x,"sample") <- sample.fraction(length(content(x)), frac); x })
split<- list(tm_map(corpus, function(x) PlainTextDocument(content(x)[ meta(x,"sample") ], id=meta(x,"id"))),
tm_map(corpus, function(x) PlainTextDocument(content(x)[ -meta(x,"sample") ], id=meta(x,"id") )))
return(split)
}
set.seed(123)
split <- list()
# subsample test corpus
split[c("testing","rest")] <- split.Corpus(corpus,1/20)
# store the corpus
dir.create(file.path("data", "testing"), showWarnings = FALSE)
dir.create(file.path("data/testing", "en_US"), showWarnings = FALSE)
writeCorpus(split$testing, path = "data/testing/en_US/")
# subsample evaluation corpus
split[c("evaluation","training")] <- split.Corpus(split$rest,1/10)
dir.create(file.path("data", "eval"), showWarnings = FALSE)
dir.create(file.path("data/eval", "en_US"), showWarnings = FALSE)
writeCorpus(split$evaluation, path = "data/eval/en_US/")
dir.create(file.path("data", "training"), showWarnings = FALSE)
dir.create(file.path("data/training", "en_US"), showWarnings = FALSE)
writeCorpus(split$training, path = "data/training/en_US/")
rm(corpus)
# subsample training corpus
large <- tm_map(split$training, subsample.TextDocument, 1/10)
dir.create(file.path("data", "large"), showWarnings = FALSE)
dir.create(file.path("data/large", "en_US"), showWarnings = FALSE)
writeCorpus(large, path = "data/large/en_US/")
rm(training)
medium <- tm_map(large, subsample.TextDocument, 1/3)
dir.create(file.path("data", "medium"), showWarnings = FALSE)
dir.create(file.path("data/medium", "en_US"), showWarnings = FALSE)
writeCorpus(medium, path = "data/medium/en_US/")
rm(large)
small <- tm_map(medium, subsample.TextDocument, 1/3)
dir.create(file.path("data", "small"), showWarnings = FALSE)
dir.create(file.path("data/small", "en_US"), showWarnings = FALSE)
writeCorpus(small, path = "data/small/en_US/")
rm(medium,small)
|
4fed83361a25808918abec57c97e266048af8790 | fddbf4eb21d1723823810e906d5d747162dabf7c | /basic stats.R | 154cc2cc8e17b3b22038547bfc004d9eab548750 | [] | no_license | Swetapadma94/Basic-Statistics | 9a52cb23db593fd13341500d8e0a18e848f2f8de | 53d1bd278854f294a64ffc71b6df211004cf0fcd | refs/heads/master | 2022-10-03T07:07:43.225895 | 2020-06-05T18:11:58 | 2020-06-05T18:11:58 | 269,731,884 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,064 | r | basic stats.R | data("quakes")
quakes<-datasets::quakes
View(quakes)
summary(quakes)
head(quakes)
head(quakes,20)
tail(quakes)
tail(quakes,10)
sum(quakes$depth)
sum(is.na(quakes))
quakes[,c(1:4 )]
quakes[1:4,2:4]
quakes$dept
colnames(quakes)
plot(quakes)
plot(quakes$long)
plot(quakes$depth,type = "b")
plot(quakes$depth,type = "o")
plot(quakes$depth,type = "l",col="green")
plot(quakes$depth,xlab='sweta',ylab='dalai',main='basic',col="green")
barplot(quakes$depth)
barplot(quakes$depth,main = 'quakes',xlab='sweta',ylab = 'dalai',col = 'blue',horiz = F)
barplot(quakes$stations, main = 'Ozone Concenteration in air',
xlab = 'ozone levels', col= 'red',horiz = F)
hist(quakes$depth,col = 'blue')
boxplot(quakes)
boxplot(quakes$long)
boxplot(quakes[,1:4],
main='Multiple')
## Grid##
par(mfrow=c(3,3),mar=c(2,5,2,1),las=1,bty="n")
plot(quakes)
plot(quakes$long)
plot(quakes$depth,type = "b")
plot(quakes$depth,type = "o")
plot(quakes$depth,type = "l",col="green")
plot(quakes$depth,xlab='sweta',ylab='dalai',main='basic',col="green")
barplot(quakes$depth)
barplot(quakes$depth,main = 'quakes',xlab='sweta',ylab = 'dalai',col = 'blue',horiz = F)
barplot(quakes$stations, main = 'Ozone Concenteration in air',
xlab = 'ozone levels', col= 'red',horiz = F)
hist(quakes$depth,col = 'blue')
### Probability##
pnorm(680,711,29)
pnorm(730,711,29)
pnorm(730,711,29)-pnorm(680,711,29)
pnorm(750,711,29)
a<-scan()
hist(a)
### Confidence Interval###
#Package for CI
install.packages("gmodels")
library(gmodels)
#Data
install.packages('nycflights13')
library(nycflights13)
#Loads the data from package 'nycflights13'
flg<-nycflights13::flights
dep_delay<-flg$dep_delay
#to check NA values
is.na(dep_delay)
#Remove NA values
dep_delay1<-dep_delay[!is.na(dep_delay)]
#CI construction
ci(dep_delay1,confidence = 0.95)
### if we include NA value w will get NA values##
ci(dep_delay,confidence = 0.95)
y<-flg$arr_delay
is.na(y)
sum(is.na(y))
z<-y[!is.na(y)]
ci(z,confidence = 0.95)
## to find 1-sample t test##
pt(2.23,79)
# for Z-test
pnorm()
## 2 tail t test##
2*pt(-1.414,49)
2*(1-pt(1.414,49))
pt(1.414,49)
1-0.91
2*pt(-0.471,49)
2*pt(-0,49)
2*pt(-0.94,49)
## Exercise###
x<-c(0.593,0.142,0.329,0.691,0.231,0.793,0.519,0.392,0.418)
t.test(x,alternative = "greater",mu=0.3) # 1 tail test
summary(x)
mean(x)
sd(x)
var(x)
## 2 tail t test###
Control = c( 91, 87, 99, 77, 88, 91)
Treat = c( 101, 110, 103, 93, 99, 104)
t.test (Control, Treat, alternative='two.sided')
t.test(x, y = NULL, alternative = c("two.sided", "less", "greater"), mu = 0, conf.level = 0.95)
t.test (Control, Treat, alternative='two.sided')
t.test(x, y = NULL, alternative = c("two.sided", "less", "greater"), mu = 0, conf.level = 0.95)
## Linear Regression##
wcat<-read.csv("E:\\srinivas excelr\\WC_AT.csv")
View(wcat)
model<-lm(wcat$AT~.,data = wcat)
summary(model)
wc<-data.frame(Waist=c(40,70,200))
nw<-predict(model,newdata =wc)
summary(nw)
|
181a28246569c6b0db7d567c5af74692ac94154f | 2f3fcf80cd963b69167248d90510d3d0e59c4eb7 | /R/fancy_tree.R | 8ae4c220a65e1e952fc422e41cb8039993e28cd4 | [] | no_license | FabianGrammes/R_scripts | 177e9e93e4538a98773eb4ecf6c00dab9c5ef182 | 60a6a65488b373418e2865f0fc1491b8ab68ad12 | refs/heads/master | 2020-12-24T16:32:59.783829 | 2016-04-04T07:47:00 | 2016-04-04T07:47:00 | 41,821,432 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,458 | r | fancy_tree.R |
# Tip lables do not match in order
# -- function to list the alignment blocks --
find.algn.interval <- function(x){
out <- list()
out$range <- c(1, length(x))
xx <- ! x %in% 24:25
out$ival <- list()
oi <- FALSE
idx <- 0
for( i in 1:length(xx) ){
if( oi == FALSE & xx[i] == TRUE ){
idx <- idx+1
st <- i
}
if( oi == TRUE & xx[i] == FALSE ){
end <- i-1
out$ival[[idx]] <- c(st,end)
}
if( xx[i] == TRUE & i == length(x) ){
end <- i
out$ival[[idx]] <- c(st,end)
}
oi <- xx[i]
}
return(out)
}
# SCREWS UP THE FUNCTION:
# find.algn.intervalV <- Vectorize(find.algn.interval, SIMPLIFY = FALSE)
library(grid)
library(gridBase)
# -- function to plot alignments, only 1 --
plot.algn <- function(x, fill, col = NA, lwd = 1){
pushViewport( viewport( width = 0.95, height = 0.75, xscale = x$range) )
# plot alignemnt blocks
x <- x$ival
for(i in 1:length(x)){
xx <- x[[i]]
xx <- rep(xx, each = 2)
grid.polygon(x = unit(xx, units = 'native'),
y = c(0,1,1,0),
gp = gpar(fill = fill, col = fill, lwd = 0.5))
}
# bottom line
grid.lines(x = unit(c(0,1), units = 'npc'),
y = unit(c(0,0), units = 'npc'),
gp = gpar(col = fill, lwd = lwd))
# top line
grid.lines(x = unit(c(0,1), units = 'npc'),
y = unit(c(1,1), units = 'npc'),
gp = gpar(col = fill, lwd = lwd))
# pop
popViewport()
}
plot.algn.wrapper <- function( algn, col, lwd ){
nrow <- length(algn)
lay <- grid.layout(nrow = length(algn),
ncol =1,
heights = rep(1/nrow, nrow))
ralgn <- rev(algn)
pushViewport( viewport( layout = lay))
for( i in 1:length(algn)){
pushViewport( viewport(layout.pos.row = i, layout.pos.col = 1) )
aa <- find.algn.interval(ralgn[[i]])
plot.algn(aa, fill = col, lwd)
#grid.text( label = names(ralgn)[i] )
upViewport()
}
popViewport()
}
# - wrapping the functions -
plot.tb <- function( tree, algn, block.col, block.lwd, nl = FALSE, ... ){
# --- 1st check
if( ! all(tree$tip.label %in% names(algn)) )
stop( 'ERR1: TREE and Alignemnt do not match')
# --- reaorder alignment according to tree ---
ord <- match(tree$tip.label, names(algn))
algn <- algn[ ord ]
# --- 2nd check
if( ! all(tree$tip.label == names(algn)) )
stop( 'ERR2: Names in TREE and Alignemnt do not match')
# -- plot dimensions -- scaling factor for the tree y-axis
yfact <- 1/(0.93 + 0.93/length(algn))
# page layout
glay <- grid.layout(nrow = 3,
ncol = 4,
widths = c(0.05, 0.6, 0.2, 0.05),
heights = c(0.05, 0.9, 0.05))
pushViewport(viewport(layout = glay))
# --- plot Dendrogram ---
pushViewport( viewport(layout.pos.col = 2 , layout.pos.row = 2) )
pushViewport( viewport(height = yfact, width =1))
par( plt = gridPLT());par(new=TRUE)
plot(tree, cex = 0.5, ...)
upViewport()
upViewport()
# --- plot alignment bars ---
pushViewport( viewport(layout.pos.col = 3, layout.pos.row = 2 ) )
plot.algn.wrapper(algn, col = block.col, lwd = block.lwd)
upViewport()
}
|
19ef6dfb7567ced5c785322d60ee77772549448a | 4d10b72d98206f2da0fafc4dacdc0c2d8598f174 | /plot24052021.R | 94ecc60c2a051d877a2a7309e378a3fea0615b74 | [] | no_license | d2525456d/ER | a2418cb12da9c48ba3383926db3b19edcceadb4b | d765927a654fe6bac74e1e12d7e2ff8f08cb3c44 | refs/heads/main | 2023-08-01T10:33:42.411642 | 2021-09-30T05:02:54 | 2021-09-30T05:02:54 | 359,309,520 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,885 | r | plot24052021.R | #cau1
methods(plot)
help(plot)
setwd('D:\\downloads\\New folder')
Benthic <-read.table(file="RIKZ2.txt", header=TRUE)
Benthic$fBeach <- factor(Benthic$Beach)
plot(Benthic$Richness,type="p")
plot(Benthic$Richness,type="l")
plot(Benthic$Richness,type="b")
plot(Benthic$Richness,type="o")
plot(Benthic$Richness,type="h")
plot(Benthic$Richness,type="s")
plot(Benthic$Richness,type="n")
#"p": Points
#"l": Lines
#"b": Both
#"c": The lines part alone of "b"
#"o": Both "overplotted"
#"h": Histogram like (or high-density) vertical lines
#"n": No plotting
#cau2
#plot.new
plot.new()
#la ham dung de ket thuc khung ve va sang khung ve moi
#win.graph
win.graph(width = 7, height = 7, pointsize = 12)
plot(Benthic$Richness,type="o")
#mo thiet bi do hoa moi
#windows
plot.window(xlim=c(0,1), ylim=c(5,10))
plot(Benthic$Richness,type="o")
#thiet lap toa do cho graphics window
#savePlot
win.graph(width = 7, height = 7, pointsize = 12)
savePlot(filename = "Rplot",plot(Benthic$Richness,type="o"),device = dev.cur(),
restoreConsole = TRUE)
#luu file tu thiet bi do hoa moi
#locator
locator(n = 512, type = "n")
plot(Benthic$Richness,type="o")
#Doc vi tri cua con tro do hoa khi nhan nut chuot (dau tien).
#range
x = 1995:2005
y = c(81.1, 83.1, 84.3, 85.2, 85.4, 86.5,
88.3, 88.6, 90.8, 91.1, 91.3)
plot.new()
plot.window(xlim = range(x),
ylim = range(y))
lines(x, y, lwd = 2)
title(main = "A Line Graph Example",
xlab = "Time",
ylab = "Quality of R Graphics")
axis(1)
axis(2)
box()
#thiet lap pham vi cho graphics window
xmat <- cbind(rnorm(100, -3), rnorm(100, -1), rnorm(100, 1), rnorm(100, 3))
head(xmat)
plot(xmat[,1], type = 'l')
lines(xmat[,2], col = 'red')
lines(xmat[,3], col = 'green')
lines(xmat[,4], col = 'blue')
matplot(xmat, type = 'l')
matplot(xmat, type = 'l', col = 'black')
matplot(xmat, type = 'l', col = c('red', 'green', 'blue', 'orange'))
matplot(x = seq(0, 10, length.out = 100), y = xmat, type='l')
xes <- cbind(seq(0, 10, length.out = 100),
seq(2.5, 12.5, length.out = 100),
seq(5, 15, length.out = 100),
seq(7.5, 17.5, length.out = 100))
matplot(x = xes, y = xmat, type = 'l')
#Ve cac cot cua mot ma tran so voi cac cot cua ma tran khac.
#persp
x = seq(-10, 10, length= 30)
y = x
f = function(x, y) { r <- sqrt(x^2+y^2); 10 * sin(r)/r }
z = outer(x, y, f)
z[is.na(z)] = 1
op <- par(bg = "white")
persp(x, y, z, theta = 30, phi = 30, expand = 0.5, col = "lightblue")
persp(x, y, z, theta = 30, phi = 30, expand = 0.5, col = "lightblue",
ltheta = 120, shade = 0.75, ticktype = "detailed",
xlab = "X", ylab = "Y", zlab = "Sinc( r )"
) = res
round(res, 3)
#Ham nay ve cac do thi phoi canh cua mot be mat tren mat phang x - y
#cut: cat plot ra
#split: tach plot ra
|
86f63d6c9f495468ee456d8c80f2dd51b117d819 | 2a9e9f7a1537d3d49f1553695a6d473fcbdd24f6 | /man/forecast.Rd | 9f042dcddfdfba836da03d2e200c0e4e0fd14fdf | [] | no_license | asl/rssa | 66a16e6bd373bf0c588aedb2ce595d182cb43575 | a39fc8271eb2a4926f15fe48f331673218c51974 | refs/heads/master | 2022-08-30T02:05:23.745208 | 2022-08-22T14:12:33 | 2022-08-22T14:12:43 | 161,125 | 45 | 25 | null | 2018-07-03T21:48:00 | 2009-03-27T18:14:31 | R | UTF-8 | R | false | false | 3,806 | rd | forecast.Rd | \name{forecast}
\alias{forecast.ssa}
\alias{forecast.1d.ssa}
\alias{forecast.toeplitz.ssa}
\alias{predict.ssa}
\alias{predict.1d.ssa}
\alias{predict.mssa}
\alias{predict.toeplitz.ssa}
\title{Perform SSA forecasting of series}
\description{
All-in-one function to perform SSA forecasting of one-dimensional series.
}
\usage{
\method{forecast}{1d.ssa}(object,
groups, h = 1,
method = c("recurrent", "vector"),
interval = c("none", "confidence", "prediction"),
only.intervals = TRUE,
\dots,
drop = TRUE, drop.attributes = FALSE, cache = TRUE)
\method{forecast}{toeplitz.ssa}(object,
groups, h = 1,
method = c("recurrent", "vector"),
interval = c("none", "confidence", "prediction"),
only.intervals = TRUE,
\dots,
drop = TRUE, drop.attributes = FALSE, cache = TRUE)
\method{predict}{1d.ssa}(object,
groups, len = 1,
method = c("recurrent", "vector"),
interval = c("none", "confidence", "prediction"),
only.intervals = TRUE,
\dots,
drop = TRUE, drop.attributes = FALSE, cache = TRUE)
\method{predict}{toeplitz.ssa}(object,
groups, len = 1,
method = c("recurrent", "vector"),
interval = c("none", "confidence", "prediction"),
only.intervals = TRUE,
\dots,
drop = TRUE, drop.attributes = FALSE, cache = TRUE)
\method{predict}{mssa}(object,
groups, len = 1,
method = c("recurrent", "vector"),
direction = c("column", "row"),
\dots,
drop = TRUE, drop.attributes = FALSE, cache = TRUE)
}
\arguments{
\item{object}{SSA object holding the decomposition}
\item{groups}{list, the grouping of eigentriples to be used in the forecast}
\item{h,len}{the desired length of the forecasted series}
\item{method}{method of forecasting to be used}
\item{interval}{type of interval calculation}
\item{only.intervals}{logical, if 'TRUE' then bootstrap method is used
for confidence bounds only, otherwise --- mean bootstrap forecast is
returned as well}
\item{direction}{direction of forecast in multichannel SSA case, "column"
stands for so-called L-forecast and "row" stands for K-forecast}
\item{\dots}{further arguments passed for forecast routines
(e.g. \code{level} argument to \code{bforecast})}
\item{drop}{logical, if 'TRUE' then the result is coerced to series
itself, when possible (length of 'groups' is one)}
\item{drop.attributes}{logical, if 'TRUE' then the forecast routines do not try
to infer the time index arguments for the forecasted series.}
\item{cache}{logical, if 'TRUE' then intermediate results will be
cached in the SSA object.}
}
\details{
This function is a convenient wrapper over other forecast routines (see
'See Also') turning their value into object of type 'forecast' which
can be used with the routines from \pkg{forecast} package.
}
\value{
object of class 'forecast' for \code{forecast} function call,
predicted series for \code{predict} call.
}
\seealso{
\code{\link{Rssa}} for an overview of the package, as well as,
\code{\link[Rssa:rforecast]{rforecast}},
\code{\link[Rssa:vforecast]{vforecast}},
\code{\link[Rssa:bforecast]{bforecast}},
\code{\link[forecast:forecast]{forecast (package)}}
}
\examples{
s <- ssa(co2)
# Calculate 24-point forecast using first 6 components as a base
f <- forecast(s, groups = list(1:6), method = "recurrent", bootstrap = TRUE, len = 24, R = 10)
\donttest{
# Plot the result including the last 24 points of the series
plot(f, include = 24, shadecols = "green", type = "l")
# Use of predict() for prediction
p <- predict(s, groups = list(1:6), method = "recurrent", len = 24)
# Simple plotting
plot(p, ylab = "Forecasteed Values")
}
}
|
e32ad074edb117392d6bd3acf779fca7473d633f | 984569f23dc3be101d69ab868da1f73c235b6c23 | /SummaryWorkbook/data.R | 76bbb06a13db0282630728f66fda31a2f6554a65 | [] | no_license | mobro-git/Hydrogen-Analysis | 01a081b319232d827f6cc0988d72d263ee291982 | 88675124a0bd3ee8caf7d2e45fb84ce2a8b65ca8 | refs/heads/main | 2023-04-17T11:15:19.939613 | 2021-04-29T21:01:19 | 2021-04-29T21:01:19 | 342,646,488 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,625 | r | data.R | ## ----Setup------------------------------------------
setwd("SummaryWorkbook/")
source("library.R")
source("functions.R")
source("graphing.R")
myfiles <- list.files(pattern = "*.csv")
data <- lapply(myfiles, read_csv)
names(data) <- c("com","dummy","elc","emis","flex","ind","res","trn")
list2env(data, envir = globalenv())
setwd(..)
## ----Dummies------------------------------------------
dummy <- read_csv("Dummy Check.csv")
dummy_reg <- dummy %>%
group_by(Scenario,Attribute,Commodity,Process,Period,Region) %>%
summarize(Pv = sum(Pv)) %>%
ungroup()
dummy <- dummy_reg %>%
group_by(Scenario,Attribute,Commodity,Process,Period) %>%
summarize(Pv = sum(Pv)) %>%
ungroup()
## ----Commercial Summary------------------------------------------
com <- read_csv("Commercial Summary.csv")
com_reg <- com %>%
group_by(Scenario,Attribute,Commodity,Process,Period,Region) %>%
summarize(Pv = sum(Pv)) %>%
ungroup()
## ----Electric Summary------------------------------------------
elc <- read_csv("ELC Summary.csv")
## ----Emissions Summary------------------------------------------
emis <- read_csv("Emissions Summary.csv")
## ----Flex Fuels------------------------------------------
flexfuel <- read_csv("ETHB20LPGXCNGX breakdown.csv")
## ----Industrial Summary------------------------------------------
ind <- read_csv("Industrial Summary.csv")
## ----Residential Summary------------------------------------------
res <- read_csv("Residential Summary.csv")
## ----Transportation Summary------------------------------------------
trn <- read_csv("Transportation Summary.csv")
|
07e525772817c6f0070859911a528acf3bec123d | 5e8b99ec5db5a5b65c916b7eb013fb9358f2722c | /man/anynames.Rd | aab9f8bbc529f7b3e2bb47b794a04e57fd06346f | [] | no_license | cran/lambda.tools | 726c6c330e59da4b56f97115bd56c4b49b481145 | d438dde6ecd1655c20cf3b7445ce2f75ce6bde75 | refs/heads/master | 2020-12-12T22:47:16.225850 | 2016-05-11T18:06:28 | 2016-05-11T18:06:28 | 17,696,974 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,560 | rd | anynames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/any.R
\name{anynames}
\alias{anynames}
\alias{anynames<-}
\title{Get the names of a data structure. This attempts to
create some polymorphism around lists, vectors, and data.frames}
\arguments{
\item{data}{Any indexable data structure}
}
\value{
Returns the names for a data structure.
}
\description{
This function gets the useful names of a data structure. This attempts to
create some polymorphism around lists, vectors, and data.frames.
}
\section{Usage}{
anynames(data)
}
\section{Details}{
Depending on the type of structure utilized in code, one needs to
call either names or \code{colnames} to get information related to
the data sets within the structure. The use of two separate functions
can cause errors and slows development time as data structures
passed from intermediate functions may change over time,
resulting in a broken interface.
By providing a thin layer over underlying accessors,
this function attempts to expedite development and add a
bit of polymorphism to the semantics of names.
The explicit assumption is that data sets in
two dimensional structures are organized by column,
as this is compatible with time-series objects such as
zoo and xts.
}
\examples{
m <- matrix(c(1,2,3,4,5,6), ncol=2)
anynames(m) <- c('d','e')
anynames(m)
v <- c(a=1,b=2,c=3,d=4,e=5)
anynames(v)
l <- list(a=1,b=2,c=3,d=4,e=5)
anynames(l)
df <- data.frame(a=1:10, b=1:10,c=1:10,d=1:10,e=1:10)
anynames(df)
}
\author{
Brian Lee Yung Rowe
}
\keyword{attribute}
|
df5373dbac384502a593c3be10374c29907b5fe9 | 89fb97e3cc23abd1ad7909b80248056462def3f3 | /man/tmodTagcloud.Rd | b9e5cf7c1b30d17e4154077b2d8ef1c1312eb4b0 | [] | no_license | january3/tmod | 2316ade2d253196ca54b68d95ea449154efbafc9 | 1c5f882e78089f62132ff6fe65e9e9b0577e78f5 | refs/heads/master | 2023-04-06T00:43:18.160542 | 2023-04-04T10:17:44 | 2023-04-04T10:17:44 | 195,234,071 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,949 | rd | tmodTagcloud.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariate.R
\name{tmodTagcloud}
\alias{tmodTagcloud}
\title{Tag cloud based on tmod results}
\usage{
tmodTagcloud(
results,
filter = TRUE,
simplify = TRUE,
tag.col = "Title",
min.auc = 0.5,
max.qval = 0.05,
plot = TRUE,
weights.col = "auto",
pval.col = "P.Value",
maxn = NULL,
...
)
}
\arguments{
\item{results}{data frame produced by one of the tmod enrichment tests}
\item{filter}{Whether redundant and not annotated modules should be removed}
\item{simplify}{Whether module names should be simplified}
\item{tag.col}{Which column from results should be used as tags on the plot}
\item{min.auc}{Minimal AUC to show (default: 0.5)}
\item{max.qval}{Maximal adjusted p value to show (default: 0.05)}
\item{plot}{Should the tag cloud be plotted or only returned}
\item{weights.col}{Which column from results should be used as weights for the tag cloud}
\item{pval.col}{Which column contains the P values which will be used to shade the tags}
\item{maxn}{Maximum number of gene set enrichment terms shown on the plot (if NULL – default – all terms will be shown)}
\item{...}{Any further parameters are passed to the tagcloud function}
}
\value{
Either NULL or whatever tagcloud returns
}
\description{
Plot a tag (word) cloud based on results from tmod enrichment.
}
\details{
The tags will be generated based on results from tmod or any other
suitable data frame. The data frame must contain two numeric columns,
specified with "weights.col" and "pval.col", which will be
used to calculate the size and shade of the tags, respectively.
Furthermore, it has to contain a column with tags (parameter "tag.col",
by default "Title").
Any data frame can be used as long as it contains the specified columns.
}
\examples{
data(tmod)
fg <- getModuleMembers("LI.M127")[[1]]
bg <- tmod$gv
result <- tmodHGtest( fg, bg )
tmodTagcloud(result)
}
|
73e77fa18277fbdb26720e925e056d5745718db8 | ca8b5df44305bee5da209a4c73b1a915fb87fa6b | /plot2.R | 24c0c0eab0b8d3e127d5377c5df1762e07c74fe4 | [] | no_license | SelinaChan/ExData_Plotting1 | 95cb205889ad08588bc4dd9f18ad6d05346bd3cb | 20df0df561fadefaa69efef4e4d312166edff521 | refs/heads/master | 2020-05-31T17:57:34.357140 | 2017-06-12T11:05:12 | 2017-06-12T11:05:12 | 94,042,744 | 0 | 0 | null | 2017-06-12T01:17:45 | 2017-06-12T01:17:45 | null | UTF-8 | R | false | false | 1,350 | r | plot2.R | # loading the dataset of household_power_consumption
# and then making plot2
#read the data in
household <- read.table("household_power_consumption.txt",header = TRUE,
col.names = c("Date", "Time", "Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
sep = ";",colClasses = c('character','character' ,'numeric','numeric','numeric','numeric','numeric','numeric','numeric'),
na.strings = "?")
#set time format
household$DT <- strptime(paste(household$Date,household$Time), format = "%d/%m/%Y %H:%M:%S")
household$wd <- weekdays(household$DT,abbreviate = TRUE)
#using data from the dates 2007-02-02 and 2007-02-01
startdate <- strptime("1/2/2007", format = "%d/%m/%Y")
enddate <- strptime("2/2/2007", format = "%d/%m/%Y")
#change Date from character class to date class
household$Date <- strptime(household$Date, format = "%d/%m/%Y")
household_seb <- household[household$Date==startdate|household$Date==enddate, ]
#plot2
## use plot(x, y)
## set the ylab
## set the margin
## save it to png file
par(mar = c(3,4, 3, 3))
plot(household_seb$DT, household_seb$Global_active_power,type = "l",
ylab = "Global Active Power(Kilowatts)", xlab = NULL)
dev.copy(png,file = "plot2.png")
dev.off()
|
105a92b7958aa3154c922c9908ff355916dad66a | b869e753d5a4f433b768085235754a0038f42dd4 | /Código Dash.R | 34a386615e057910cb850a7109971c56dc2f315e | [] | no_license | nativanegas/prueba | 835824d07f4ad9efdac62cb580199b57897b7d4c | c5de747ad0079a3f7b35636d45998b755c7c7ac8 | refs/heads/master | 2023-02-27T04:48:12.800905 | 2021-01-31T20:15:00 | 2021-01-31T20:15:00 | 272,299,274 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 3,481 | r | Código Dash.R | library(sf) #Contiene funciones para codificar datos espaciales.
library(ggplot2) #Sirve para crear gráficos y mapear
library(tmap) #Importa mapas temáticos en los cuales se visualizan las distribuciones de datos espaciales.
library(tmaptools) #Conjunto de herramientas para leer y procesar datos espaciales.
library(leaflet) #Crea y personaliza mapas interactivos usando una biblioteca de JavaScript
library(dplyr) #Sirve para manipular datos.
library(rgdal) #Proporciona enlaces a la Biblioteca de abstracción de datos 'geoespaciales' y acceso
#a las operaciones de proyección desde la biblioteca 'PROJ'.
library(sp) #Clases y métodos para datos espaciales.
library(readxl) #Importa archivos de excel a R.
library(RSocrata) #Proporciona una interacción más fácil con los portales de datos abiertos.
library(mapview)
token <- "ew2rEMuESuzWPqMkyPfOSGJgE" #Es una herramienta para poder descargar los datos desde el INS.
df.ins <- read.socrata("https://www.datos.gov.co/resource/gt2j-8ykr.json", app_token = token) #Descarga los datos desde la página de la INS.
mapa=readOGR("/Users/Natalia/Desktop/TESIS/depto/depto.shp",stringsAsFactors=FALSE) #Exporta los datos para crear el mapa de Colombia dividio por departamentos
dashboard=st_read("/Users/Natalia/Desktop/TESIS/depto/depto.shp",stringsAsFactors=FALSE) #Exporta las características de cada departamento de Colombia
plot(mapa,main="COLOMBIA") #Crea el mapa de Colombia
str(mapa) #Muestra de forma compacta la estructura interna de un objeto.
df.ins$codigo=NA #Crea una nueva columna sin datos.
df.ins$codigo=trunc(as.numeric(df.ins$c_digo_divipola)/1000) #A la columna creada le asigna el valor del código divipola
#dividido por 1000, el cual es igual al código del dpto.
tabla=table(df.ins$codigo,df.ins$atenci_n) #Crea una tabla que cuenta la cantidad de atendidos dependiendo del código de dpto.
dashboard=arrange(dashboard, as.numeric(DPTO)) #Organiza de menor a mayor la base dashboard dependiendo del cód. de dpto.
rownames(tabla)=dashboard$NOMBRE_DPT #A cada fila de la tabla (23) se le asigna el nombre de cada departamento
tabla[,1]=tabla[,1]+tabla[,3]+tabla[,4] #Se suma atención: casa, hospital y hospital UCI.
tabla=tabla[,-c(3:5)] #Se eliminan las columnas hospital, hospital UCI y NA.
TOTAL_CONTAGIADOS=tabla[,1]+tabla[,2]+tabla[,3] #Crea una columna con la suma de las columnas existentes.
write.csv(tabla, file="covid19.csv") #Guarda en un archivo .csv la tabla creada.
covid19=read.csv("/Users/Natalia/Desktop/TESIS/DASHBOARD/covid19.csv") #Exporta el archivo creado anteriormente.
covid19=data.frame(covid19,TOTAL_CONTAGIADOS) #Concatena el archivo anterior con el total de contagiados.
names(covid19)=cbind("NOMBRE_DPT","CASOS ACTIVOS","FALLECIDOS","RECUPERADOS","TOTAL CONTAGIADOS") #Renombra las columnas.
dashboard=dashboard[,-1] #Elimina la primera columna de la base dashboard.
mapview(covid19[,2],layer.name="Total")
mapa_datos=inner_join(dashboard,covid19)#Une las bases dashboard y covid 19.
mapa_datos=mapa_datos[,-c(2:4)] #Elimina las columnas 2 a 4 de la base creada anteriormente.
tm_shape(mapa_datos)+
tm_layout(title = "CASOS DE COVID-19 EN COLOMBIA. Realizado por: Natalia Vanegas")+
tm_polygons("MAP_COLORS") #Crea el mapa estático de Colombia dependiendo de los datos anteriores.
test_map=tmap_last() #Guarda el mapa.
tmap_save(test_map,"mapa.html",add.titles = TRUE) #Crea el archivo HTML del mapa dinámico.
|
67f0d7354ff4f8e8eedd8ec1bbb6e3deb5994b76 | 6d3c4121289e2de67100a093fa2b615845eb199b | /R/makebreaks.R | bdf5ddd5f053f92a6f8acce2bcc4fd6e67c3f735 | [] | no_license | zzxxyui/ihm | 057728fe28f1320f6c276ac1761a61c5e2dda3a6 | 82ae8351ce8c9d3b145567d11e1273786cabfe25 | refs/heads/master | 2020-06-14T07:39:42.023810 | 2019-07-03T00:02:49 | 2019-07-03T00:02:49 | 194,950,090 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,218 | r | makebreaks.R | makebreaks <- function(dataset, breaks) {
minbr <- min(dataset, na.rm=TRUE)
maxbr <- max(dataset, na.rm=TRUE)
if(minbr == maxbr) {
stop("min and max are identical in the dataset. Quitting")
}
breaklength <- 256
computebreaks <- TRUE
if(!missing(breaks)) {
if(length(breaks)==1) {
breaklength <- breaks
computebreaks <- TRUE
} else {
mybreaks <- breaks
computebreaks <- FALSE
}
}
if(minbr >= 0) { ## i.e. if there are no negative numbers
if(computebreaks) {
mybreaks <- seq(from=0, to=maxbr, length.out=breaklength)
}
mycolors <- colorpanel(length(mybreaks)-1, "white", "red")
}
if(maxbr <= 0) { ## i.e. if there are no positive numbers
if(computebreaks) {
mybreaks <- seq(from=minbr, to=0, length.out=breaklength)
}
mycolors <- colorpanel(length(mybreaks)-1, "blue", "white")
}
if((minbr < 0) && (maxbr > 0)) {
if(computebreaks) {
mybreaks <- unique(c(seq(from=minbr, to=0, length.out=breaklength),
seq(from=0, to=maxbr, length.out=breaklength)))
}
mycolors <- colorpanel(length(mybreaks)-1, "blue", "white", "red")
}
return(list("breaks"=mybreaks,
"colors"=mycolors))
} |
48e870a18f9298a835bfe0070c6c265076ea9fea | 3329cc000d2caf88175a4933f04533fc460cbebb | /Teste.R | bf759d4724dbfa7a340a7838484f3828452acfed | [] | no_license | brmatheusluiz/PublicoPolicial | a00166403ba710d1e7e14789bc6017b3736c3935 | c878e7c1c1e9f9b0de998a52e04b8e9750cc566b | refs/heads/main | 2023-02-04T16:21:05.472657 | 2020-12-22T15:29:50 | 2020-12-22T15:29:50 | 320,060,703 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,594 | r | Teste.R | #Limpar
rm(list=ls())
library("ggplot2")
library("dplyr")
library("readxl")
library("ggridges")
#Arquivo Conseguido pelo link:
#https://dados.gov.br/dataset/sistema-nacional-de-estatisticas-de-seguranca-publica/resource/feeae05e-faba-406c-8a4a-512aec91a9d1
dados <- read_excel("indicadoressegurancapublicaufjul20.xlsx")
dados
nomes<-c("UF","TipoCrime","Ano","Mes","Ocorrencia")
names(dados)<-nomes
#Análise exloratória serve para entendermos um pouco sobre os nossos dados.
summary(dados)
#Transformar em factor e o numero de ocorrencias
dados$TipoCrime<-as.factor(dados$TipoCrime)
dados$UF<-as.factor(dados$UF)
dados$Mes<-as.factor(dados$Mes)
dados$Ocorrencia<-as.numeric(dados$Ocorrencia)
#Filtrando dados pelo Estado
dados.AC<-filter(dados,UF=="Acre")
dados.AL<-filter(dados,UF=="Alagoas")
dados.AP<-filter(dados,UF=="Amapá")
dados.AM<-filter(dados,UF=="Amazonas")
dados.BA<-filter(dados,UF=="Bahia")
dados.CE<-filter(dados,UF=="Ceará")
dados.DF<-filter(dados,UF=="Distrito Federal")
dados.ES<-filter(dados,UF=="Espírito Santo")
dados.GO<-filter(dados,UF=="Goiás")
dados.MA<-filter(dados,UF=="Maranhão")
dados.MT<-filter(dados,UF=="Mato Grosso")
dados.MS<-filter(dados,UF=="Mato Grosso do Sul")
dados.MG<-filter(dados,UF=="Minas Gerais")
dados.PA<-filter(dados,UF=="Pará")
dados.PB<-filter(dados,UF=="Paraíba")
dados.PR<-filter(dados,UF=="Paraná")
dados.PE<-filter(dados,UF=="Pernambuco")
dados.PI<-filter(dados,UF=="Piauí")
dados.RJ<-filter(dados,UF=="Rio de Janeiro")
dados.RN<-filter(dados,UF=="Rio Grande do Norte")
dados.RS<-filter(dados,UF=="Rio Grande do Sul")
dados.RO<-filter(dados,UF=="Rondônia")
dados.RR<-filter(dados,UF=="Roraima")
dados.SC<-filter(dados,UF=="Santa Catarina")
dados.SP<-filter(dados,UF=="São Paulo")
dados.SE<-filter(dados,UF=="Sergipe")
dados.TO<-filter(dados,UF=="Tocantins")
#Quantidade de Ocorrencias registradas no ano de 2019
#ACRE
dados.AC %>%
filter(Ano==2019) %>%
group_by(TipoCrime) %>%
count(TipoCrime,Quantidade=sum(Ocorrencia)) %>%
ggplot()+
geom_bar(aes(x=Quantidade,y=TipoCrime, fill = TipoCrime),stat='identity') +
ggtitle("Acre")
#Alagoas
dados.AL %>%
filter(Ano==2019) %>%
group_by(TipoCrime) %>%
count(TipoCrime,Quantidade=sum(Ocorrencia)) %>%
ggplot()+
geom_bar(aes(x=Quantidade,y=TipoCrime, fill = TipoCrime),stat='identity') +
ggtitle("Alagoas")
#Alagoas
dados.AP %>%
filter(Ano==2019) %>%
group_by(TipoCrime) %>%
count(TipoCrime,Quantidade=sum(Ocorrencia)) %>%
ggplot()+
geom_bar(aes(x=Quantidade,y=TipoCrime, fill = TipoCrime),stat='identity') +
ggtitle("Amapa")
|
7fe0da5933642cead1298af62f3b1ec912c8aaa0 | f9c75fd77a428475a84028e08e3c05d016f9fc9b | /ques5.R | 82aafe0a77a5f054255313831b6fcbef4d319959 | [] | no_license | SathvikKP/Data_Analytics_R_Programs | 8b8055f5f599896500dc6f37684d761543e3c023 | 88be6414954a55a336824cb206367e0a317a562c | refs/heads/main | 2023-03-07T08:00:03.458038 | 2021-02-16T21:02:14 | 2021-02-16T21:02:14 | 339,530,114 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,786 | r | ques5.R | setwd("/home/user/DA_LAB/final_exam_practice")
dataset <- read.csv("ques5.csv",header=TRUE,stringsAsFactors = FALSE)
print(dataset)
preprocess <- function(x)
{
if (x=="newspaper")
{
return (1)
}
else if (x=="radio")
{
return (2)
}
else if (x=="tv")
{
return (3)
}
else
{
return (0)
}
}
dataset$media <- sapply(dataset$media,preprocess)
print(dataset)
n <- nrow(dataset)
x2avg <- sum(dataset$budget)/n
x1avg <- sum(dataset$media)/n
yavg <- sum(dataset$sales)/n
mult <- function(t1,t2,t1avg,t2avg)
{
return ((t1-t1avg)*(t2-t2avg))
}
#calculate terms
#x2^2
x2x2 <- sum(mapply(mult,dataset$budget,dataset$budget,x2avg,x2avg))
#x1y
x1y <- sum(mapply(mult,dataset$media,dataset$sales,x1avg,yavg))
#x1x2
x1x2 <- sum(mapply(mult,dataset$media,dataset$budget,x1avg,x2avg))
#x2y
x2y <- sum(mapply(mult,dataset$budget,dataset$sales,x2avg,yavg))
#x1^2
x1x1 <- sum(mapply(mult,dataset$media,dataset$media,x1avg,x1avg))
#beta 1 and beta 2
numerator1 <- x2x2*x1y - x1x2*x2y
denominator <- x1x1*x2x2 - x1x2*x1x2
numerator2 <- x1x1*x2y - x1x2*x1y
beta1 <- numerator1/denominator
beta2 <- numerator2/denominator
beta0 <- yavg - beta1*x1avg - beta2*x2avg
print(paste0("beta 0 = ",beta0," beta 1 = ",beta1," beta2 = ",beta2))
print("Enter budget and media type : (newspaper-1/radio-2/tv-3)")
x2 <- readline()
x1 <- readline()
x1 <- as.numeric(x1)
x2 <- as.numeric(x2)
y <- beta0 + beta1*x1 + beta2*x2
linearmod <- lm(sales~media+budget,data=dataset)
modelsummary <- summary(linearmod)
modelcoeffs <- modelsummary$coefficients
print(modelcoeffs)
p <- data.frame(list(x1,x2))
colnames(p) <- c("media","budget")
pred <- predict(linearmod,newdata=p)
print(paste0("Prediction without predefined = ",y))
print(paste0("prediction with predefined = ",pred)) |
c3422c69a0d048b445be7d6b2d811c9c54819f1a | a456eeb492b0f2511a84b352f5f7ac0b104faf8a | /figures_code/plot_posterior_example.R | e92dda97585c71e93dfd7b1ac92025d241f82326 | [
"MIT"
] | permissive | ImperialCollegeLondon/BART-Int | 81a16ed3a074d24579ba8b03556f7737e609db73 | cbf870aabc562eb105d3d4779097a093fe16cda6 | refs/heads/master | 2023-01-07T12:58:32.983260 | 2020-11-06T10:16:20 | 2020-11-06T10:16:20 | 267,342,223 | 7 | 2 | MIT | 2020-11-06T10:16:21 | 2020-05-27T14:31:12 | R | UTF-8 | R | false | false | 8,408 | r | plot_posterior_example.R | # !/usr/bin/env R
# Load required packages
library(MASS)
library(cubature)
library(lhs)
library(data.tree)
library(dbarts)
library(matrixStats)
library(mvtnorm)
library(doParallel)
library(kernlab)
library(MCMCglmm)
orangered <- rgb(1, 0.271, 0, 0.3)
dodgerblue <- rgb(0.118, 0.565, 1, 0.3)
# define string formatting
`%--%` <- function(x, y)
# from stack exchange:
# https://stackoverflow.com/questions/46085274/is-there-a-string-formatting-operator-in-r-similar-to-pythons
{
do.call(sprintf, c(list(x), y))
}
# global parameters: dimension
dim <- 1
num_iterations <- 1
whichGenz <- 7
whichKernel <- "matern32"
jumps = 1
# turn on/off sequential design
# 1 denotes TRUE to sequential
# 0 denotes FALSE to sequential
cat("\nBegin testing:\n")
sequential <- TRUE
cat("Sequantial design set to", sequential, "\n")
# prior measure over the inputs
# uniform by default
measure <- "uniform"
cat("Prior measure:", measure, "\n")
# extra parameter for step function
print(c(dim, num_iterations, whichGenz))
source("src/genz/genz.R") # genz function to test
if (whichGenz < 1 | whichGenz > 8) { stop("undefined genz function. Change 3rd argument to 1-8") }
if (whichGenz == 3 & dim == 1) { stop("incorrect dimension. Discrete Genz function only defined for dimension >= 2") }
if (whichGenz == 1) { genz <- cont; genzFunctionName <- deparse(substitute(cont)) }
if (whichGenz == 2) { genz <- copeak; genzFunctionName <- deparse(substitute(copeak)) }
if (whichGenz == 3) { genz <- disc; genzFunctionName <- deparse(substitute(disc)) }
if (whichGenz == 4) { genz <- gaussian; genzFunctionName <- deparse(substitute(gaussian)) }
if (whichGenz == 5) { genz <- oscil; genzFunctionName <- deparse(substitute(oscil)) }
if (whichGenz == 6) { genz <- prpeak; genzFunctionName <- deparse(substitute(prpeak)) }
if (whichGenz == 7) { genz <- function(xx) { return(step(xx, jumps = jumps)) }; genzFunctionName <- deparse(substitute(step)) }
if (whichGenz == 8) { genz <- mix; genzFunctionName <- deparse(substitute(mix)) }
print("Testing with: %s" %--% genzFunctionName)
set.seed(2)
# prepare training dataset
if (measure == "uniform") {
trainX <- replicate(dim, runif(20))
} else if (measure == "gaussian") {
trainX <- replicate(dim, rtnorm(20, lower = 0, upper = 1))
}
trainY <- genz(trainX)
source("src/BARTInt.R")
t0 <- proc.time()
posterior_model <- BART_posterior(dim, trainX, trainY, num_iterations, FUN = genz, sequential, measure)
t1 <- proc.time()
bartTime <- (t1 - t0)[[1]]
x_plot <- replicate(dim, runif(500))
x_plot <- x_plot[order(x_plot),]
y_pred <- predict(posterior_model$model, x_plot)
y_pred_mean <- colMeans(y_pred)
y_pred_sd <- sqrt(colVars(y_pred))
# obtain posterior samples
integrals <- sampleIntegrals(posterior_model$model, dim, measure)
ymin <- min(posterior_model$trainData[, (dim + 1)]);
ymax <- max(posterior_model$trainData[, (dim + 1)])
integrals <- (integrals + 0.5) * (ymax - ymin) + ymin
# plot(x_plot, y_pred_mean)
# hist(integrals)
#
# if (!sequential){
# figName <- "Figures/%s/drawBART%s%sDimNoSequential.pdf" %--% c(whichGenz, genzFunctionName, dim)
# csvName <- "Figures/%s/drawBART%s%sDimNoSequential.csv" %--% c(whichGenz, genzFunctionName, dim)
# groundTruthName <- "Figures/%s/trainDrawBart%s%sDimNoSequential.csv" %--% c(whichGenz, genzFunctionName, dim)
# } else {
# figName <- "Figures/%s/drawBART%s%sDim.pdf" %--% c(whichGenz, genzFunctionName, dim)
# csvName <- "Figures/%s/drawBART%s%sDim.csv" %--% c(whichGenz, genzFunctionName, dim)
# groundTruthName <- "Figures/%s/trainDrawBart%s%sDim.csv" %--% c(whichGenz, genzFunctionName, dim)
# }
results <- data.frame(
"x_plot" = x_plot,
"y_pred" = y_pred_mean
)
groundTruth <- data.frame(
"trainX" = trainX,
"trainY" = trainY
)
# write.csv(results, file = csvName, row.names=FALSE)
# write.csv(groundTruth, file = groundTruthName, row.names=FALSE)
# Bayesian Quadrature with Gaussian Process
print("Begin Gaussian Process Integration")
library(reticulate)
source("src/optimise_gp.R")
lengthscale <- optimise_gp_r(trainX, trainY, kernel = "matern32", epochs = 500)
source("src/GPBQ.R")
t0 <- proc.time()
# need to add in function to optimise the hyperparameters
predictionGPBQ <- computeGPBQ_matern(trainX, trainY, dim, epochs = num_iterations, kernel = "matern32", FUN = genz, lengthscale, sequential, measure)
t1 <- proc.time()
GPTime <- (t1 - t0)[[1]]
results_models <- list("BART" = posterior_model, "GP" = predictionGPBQ, "trainX" = trainX, "trainY" = trainY, "lengthscale" = lengthscale)
# save(results_models, file = "plot_posterior_example.RData")
K <- predictionGPBQ$K
X <- predictionGPBQ$X
# Y <- Y[order(X)]
Y <- predictionGPBQ$Y
maternKernel <- maternKernelWrapper_2(lengthscale = lengthscale)
k_xstar_x <- kernelMatrix(maternKernel, matrix(x_plot, ncol = 1), X)
k_xstar_xstar <- kernelMatrix(maternKernel,
matrix(x_plot, ncol = 1),
matrix(x_plot, ncol = 1))
jitter = 1e-6
K_inv <- solve(K + diag(jitter, nrow(K)))
gp_post_mean <- k_xstar_x %*% K_inv %*% Y
gp_post_cov <- k_xstar_xstar - k_xstar_x %*% K_inv %*% t(k_xstar_x)
gp_post_sd <- sqrt(diag(gp_post_cov))
#plot of integrals
xx <- seq(0, 1, 0.001)
GPdensity <- dnorm(
xx,
mean = predictionGPBQ$meanValueGP[1],
sd = sqrt(predictionGPBQ$varianceGP[1])
)
KDE_BART <- density(integrals)
kde_fun <- function(t){
xs <- integrals
h <- KDE_BART$bw
kernelValues <- rep(0,length(xs))
for(i in 1:length(xs)){
transformed = (t - xs[i]) / h
kernelValues[i] <- dnorm(transformed, mean = 0, sd = 1) / h
}
return(sum(kernelValues) / length(xs))
}
BARTdensity <- sapply(xx, kde_fun)
pdf("Figures/posterior_step.pdf", width=9, height=3)
par(mfrow = c(1,3), pty = "s")
bartlty = 1
plot(
xx,
GPdensity,
ty="l",
col = "dodgerblue",
xlim = c(0.3,0.7),
ylim = c(0, 60),
xlab = "x",
ylab = "Posterior density",
cex.lab = 1.7,
cex.axis = 1.7,
lwd=2.5
)
polygon(c(xx, max(xx), min(xx)), c(GPdensity, 0, 0), border=NA, col=dodgerblue)
points(xx, BARTdensity, ty="l", col = "orangered", lwd=2.5, lty=bartlty)
polygon(c(xx, max(xx), min(xx)), c(BARTdensity, 0, 0), border=NA, col=orangered)
abline(v=0.5, lwd=2.5, lty="dashed")
# legend(0.25, 82,
# # "topleft",
# legend=c("BART", "GP", expression(Pi*"[f]")),
# col=c("orangered", "dodgerblue", "black"), lty = c(bartlty,1,1), cex=2.4, bty="n",
# horiz=TRUE, xpd=TRUE,
# # inset=c(0.5, 0.5)
# )
# legend(0.3, 82,
# legend=c(expression(Pi*"[f]")),
# col=c("black"), lty = c(1), cex=2.4, bty="n",
# horiz=TRUE, xpd=TRUE,
# )
plot(x_plot, y_pred_mean, col = "orangered", ty="l", lwd=3,ylim=c(-0.2, 1.5),
ylab = expression(f(x)), xlab = "x", cex.lab = 1.7,
cex.axis = 1.7)
points(c(0,0.5,0.5, 1), c(0,0,1,1), ty="l", lwd=1, col="black")
for (i in seq(1, 500, 10)) {
points(rep(x_plot[i], 1000), y_pred[,i], col=orangered, bg=orangered,
cex=0.2, alpha=0.001, pch=16)
}
# legend(-0.15, 2.1,
# legend=c("BART"),
# col=c("orangered"), lty = c(bartlty), cex=2.4, bty="n",
# horiz=TRUE, xpd=TRUE,
# )
a <-density(integrals)$y
# plot(trainX, trainY, ylim=c(-0.2, 1.3))
plot(x_plot,
gp_post_mean,
col = "dodgerblue",
cex=0.2,
ty="l",
ylim=c(-0.2, 1.5),
xlab = "x",
ylab = expression(f(x)),
cex.lab = 1.7,
cex.axis = 1.7,
lwd=3
)
points(c(0,0.5,0.5, 1), c(0,0,1,1), ty="l", lwd=1, col="black")
# points(trainX[order(trainX),], trainY[order(trainX), ], col = "black", bg='black', pch=21, lwd=3)
polygon(c(x_plot, rev(x_plot)),
c(
gp_post_mean + 1.96*gp_post_sd,
rev(gp_post_mean - 1.96*gp_post_sd)
),
col = adjustcolor("dodgerblue", alpha.f = 0.40),
border = "dodgerblue", lty = c("dashed", "solid"))
# legend(-0.15, 2.1,
# legend=c("GP"),
# col=c("dodgerblue"), lty = c(1), cex=2.4, bty="n",
# horiz=TRUE, xpd=TRUE,
# )
# add legend
par(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), mar = c(0, 0, 0, 0), new = TRUE)
plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", cex.lab=0.01)
legend(-1.8, 1.1,
legend=c("BART", "GP", expression(Pi*"[f]")),
col=c("orangered", "dodgerblue", "black"), lty = c(bartlty, 1, 2), cex=2.4, bty="n", lwd=c(1.5, 1.5, 2),
horiz=TRUE, xpd=TRUE,
)
dev.off()
|
e111b200a132d70b050e6d1d5880cdf745da0613 | 3ce9f380710a6e69958073ad9c7451a3e50c1cc9 | /updateDataExchanges.R | fe7406edfc8cda9d1d486f6b5233b28e07dc867e | [] | no_license | helixscript/geneTherapyReportPipeline | 78fe9d351cb3cece0af76fd092028d783e312042 | 30740bbdaa1b033af50e97b5c0b05f7055571ccc | refs/heads/main | 2023-08-27T12:19:14.303054 | 2023-08-07T18:14:48 | 2023-08-07T18:14:48 | 350,525,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,961 | r | updateDataExchanges.R | #!/opt/R-3.4.4-20180823/lib64/R/bin/Rscript
library(dplyr)
library(readr)
library(RMySQL)
projectDir <- '/media/lorax/data/export/projects'
outputDir <- '/media/lorax/data/export/projects/exchange'
softwareDir <- '/media/lorax/data/software/geneTherapyReports'
d <- data.frame(file = system(paste0('find ', projectDir, ' -name intSites.csv '), intern = TRUE))
dbConn <- dbConnect(MySQL(), group='specimen_management')
samples <- dbGetQuery(dbConn, 'select * from gtsp')
dbDisconnect(dbConn)
d$trial <- unlist(lapply(strsplit(as.character(d$file), '/'), function(x) x[length(x)-2]))
d$subject <- unlist(lapply(strsplit(as.character(d$file), '/'), function(x) x[length(x)-1]))
invisible(lapply(split(d, d$trial), function(x){
f <- unlist(strsplit(as.character(x[1,]$file), '/'))
trialName <- f[length(f) - 2]
trialDir <- paste0(f[1:(length(f)-2)], collapse = '/')
if(dir.exists(file.path(outputDir, trialName))) unlink(file.path(outputDir, trialName), recursive = TRUE)
if(file.exists(paste0(file.path(outputDir, trialName), '.zip'))) file.remove(paste0(file.path(outputDir, trialName), '.zip'))
dir.create(file.path(outputDir, trialName))
dir.create(file.path(outputDir, trialName, 'reports'))
reports <- system(paste0('find ', trialDir, ' -name *.pdf '), intern = TRUE)
invisible(lapply(reports, function(r) system(paste0('cp ', r, ' ', file.path(outputDir, trialName, 'reports')))))
r <- bind_rows(lapply(as.character(x$file), function(x2){
x2 <- read_csv(x2)
x2$patient <- as.character(x2$patient)
x2$cellType <- as.character(x2$cellType)
x2$dataSource <- as.character(x2$dataSource)
x2$timePoint <- as.character(x2$timePoint)
x2
})) %>% left_join(select(samples, SpecimenAccNum, SamplePatientCode), by = c('GTSP' = 'SpecimenAccNum'))
o <- select(r, seqnames, start, strand, refGenome, reads, patient, SamplePatientCode, GTSP, cellType,
timePoint, estAbund, relAbund, nearestFeature, inFeature, nearestFeatureStrand, inFeatureExon,
nearestFeatureDist, nearestOncoFeature, nearestOncoFeatureDist)
names(o) <- c('chromosome', 'position', 'strand','refGenome', 'reads', 'subject', 'externalSampleID',
'internalSampleID', 'cellType', 'timePoint', 'estAbund', 'relAbund', 'nearestFeature', 'inFeature',
'nearestFeatureStrand', 'inFeatureExon', 'nearestFeatureDist', 'nearestOncoFeature', 'nearestOncoFeatureDist')
write_tsv(o, file.path(outputDir, trialName, 'intSites.tsv'), col_names = TRUE)
system(paste0('cp ', softwareDir, '/intSites_readMe.txt ', file.path(outputDir, trialName, 'readMe.txt')))
dir <- getwd()
setwd(file.path(outputDir, trialName))
#system(paste0('zip -r ', paste0(file.path(outputDir, trialName), '.zip'), ' ', file.path(outputDir, trialName)))
system(paste0('zip -r ', paste0(file.path(outputDir, trialName), '.zip'), ' *'))
setwd(dir)
}))
|
dae9585991f9c0418cae2580a1bd11ef413dbd87 | 5e613fdaaf680b7220a9331133d79a7dcbca8acd | /R/deps/taxize-master/tests/test-all.R | 689b52a160c9a3efdfbdf15823e1c8eab9fb5a31 | [
"MIT"
] | permissive | hmarx/Alpine-Sky-Islands | df0fd965ca4e1d4e3071aa9362ee615a5510175d | 72ab7d914fea6c76c9ae105e042e11088a9be87f | refs/heads/master | 2021-05-01T02:44:59.818086 | 2017-08-08T15:02:45 | 2017-08-08T15:02:45 | 39,544,747 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 38 | r | test-all.R | library(testthat)
test_check("taxize") |
112e9df76d652310e569ab01d7b23295f4644695 | 07ca789edc86a0df1ccfc4b7fe89eb4b416f6e78 | /SCRIPTS/Emory/get_ann.r | 89c82ab5835b3e54793e4a6c65ff7ba51ad4f20d | [] | no_license | niaid/h5n1-chi-010 | 1316b5cbcb34b9699ef0405d0d97d66b9bfbbf0d | 35487afdd368bb91c9693d5b79750c98b326614c | refs/heads/main | 2023-05-24T14:47:00.117636 | 2021-06-23T21:24:04 | 2021-06-23T21:24:04 | 379,733,109 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,052 | r | get_ann.r | source("SCRIPTS/0_initialize.r")
fn = file.path(PROJECT_DIR, "DATA_ORIGINAL/Emory/GPL13158.annot.gz")
library(GEOquery)
# geo = getGEO("GPL13158", AnnotGPL=T, getGPL=T) # get annotation directly from GEO
geo = getGEO(filename=fn) # use predownloaded data for reproducibility
ann = geo@dataTable@table
dim(ann)
adx = match(rownames(mat), ann$ID)
# all.equal(rownames(mat), as.character(ann$ID)[adx])
out = ann %>% select(ID,`Gene symbol`) %>%
rename(gene=`Gene symbol`) %>%
mutate(ID=as.character(ID), gene=as.character(gene)) %>%
filter(!grepl("///",gene) & gene!="")
fn.map = file.path(PROJECT_DIR, "DATA_PROCESSED/Emory/GPL13158.ann.txt")
write.table(out, file=fn.map, sep="\t", quote=F, col.names=T, row.names=F)
source(file.path(PROJECT_DIR, "SCRIPTS/functions/pick.probeset.r"))
pick.probeset(eset, fn.map) # generates file.map.pc1
# fn.map = file.path(PROJECT_DIR, "DATA_PROCESSED/Emory/GPL13158.ann_PC1.txt")
# gene.map = read.table(file.map.pc1, sep="\t", header=T, row.names=NULL, stringsAsFactors = F)
|
81403b8170c7420ece1f968f83a5d590aaecae29 | 9ef2241db80df30e3b2201b961e92ebb2a2f869d | /Webscraping Extracting Table.R | 1175b6bc2086654b39202a3f88913f96063d8f62 | [] | no_license | nbizzle23/Web-Scraping | 9a7377a89f4718160e52c604dca686fc0db95923 | 19eaafb0ab93dc8cf2b665cb310cb788549f145e | refs/heads/master | 2022-11-23T11:14:08.217766 | 2020-07-30T18:30:53 | 2020-07-30T18:30:53 | 279,686,359 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 540 | r | Webscraping Extracting Table.R | #Webscraping Extracting Table
library(rvest)
#Download the HTML and turn it into an XML file with read_html()
orlando <- read_html("http://www.bestplaces.net/climate/city/florida/orlando")
#Use html_table
tables <- html_nodes(orlando, css = "table")
html_table(tables, fill = TRUE)
#Create data frame of extact table
dftable <- html_table(tables, fill = TRUE)[[1]]
dftable
#Label columns accordingly
colnames(dftable) <- c("Climate", "Orlando, Florida", "United States")
#dftable <- dftable[-c(1),] removes first row
View(dftable)
|
3c8965dba4158aee2cd2154a8cce3c47e409606f | 99a16aa031c6d3e6d1a1c9849523c09ee3de11b2 | /Udacity/Project 3 - Prosper in R/Prosper in R.R | 78de6b2069cd8b4a0163fc997a1b793721f10c0a | [] | no_license | Ntweat/Projects | 7d2eb6fd10601323c5f9e70dde860eacc43daf3b | b5ce907f2a8ee8fda55da820c7461fdc5b2b1b89 | refs/heads/master | 2021-09-12T09:44:59.222690 | 2018-04-16T01:21:29 | 2018-04-16T01:21:29 | 99,296,336 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 840 | r | Prosper in R.R | ---
Examine and Summarize Data Using R - Prosper
=============================================
**By: Saurabh Hinduja**
---
## Introduction
This section contains information about the dataset selected, reasons why the dataset was selected
*Dataset Selected:* Propser
**What is Propser**
There are many peer to peer networks, like file sharing, meeting new friends and many more. Prosper is America's first and leading peer-to-peer lending network. In this kind of network, lenders research borrowers and lend them money directly.
Prosper works because it has lower rate of interest than banks and multiple lenders can contribute to the same borrower. Therefore, it is lucrative for borrowers because it has lower rate of interest and lenders like it cause it reduces their risk and can contribute towards a bigger loan amount.
|
ea0feade256bd702b37a4cd64592041b76d82136 | 60f33fdf39113d7c2a4dec149725e596ad227f48 | /R/get_xy_lims.R | ec2e308a4b1a82d038c1c642ea00b5e531ed0264 | [] | no_license | BenaroyaResearch/limmaTools | 958907faef92b7e0ffac1e3f63d1f7483af97c84 | a8c8aa109c95cf852fc48f870d7dda7134aa5a94 | refs/heads/master | 2023-06-02T01:44:15.792587 | 2021-06-22T22:14:01 | 2021-06-22T22:14:01 | 75,264,990 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,574 | r | get_xy_lims.R | #' Accessory function for determining x and y limits for volcano plots
#'
#' This function determines appropriate x and y limits for volcano plots or sets of volcano plots.
#' @param topGenes a data frame or list of data frame. Each data frame typically contains the output of a call to \code{topTable}. Must contain genes, log2 fold-change, and adjusted p-values.
#' @param pairwise logical, whether \code{topGenes} contains multiple comparisons. Defaults to FALSE. If TRUE, the function finds common limits for all plots based on the most extreme values across all comparisons.
#' @param x_symmetric logical, whether the x-axis should be made symmetric.
#' @param adjust_factor numeric, the factor for adjusting the limits of the x- and y-axes. The returned limits will be extended by this much to allow points at the extremes of the plot to fit. Defaults to 0.01.
#' @param y_floor_zero logical, whether to set the lower limit of y to 0. Defaults to TRUE.
#' @param x_col,y_col character or integer, the name or number of the columns containing the data to be plotted on the x- and y-axes. Default to "logFC" and "adj.P.Val", respectively.
#' @param neg_log10_ycol logical, whether the y-axis will be plotted as \code{(-log10(y))}. Defaults to TRUE, as the function is intended for use with p-values.
#' @param min_x_abs numeric, the minimum absolute value for the extent of the x-axis. Defaults to NULL, which sets no default. Typically used when plotting with thresholds, to ensure that threshold value is shown.
#' @param min_y2 numeric, the minimum value for upper limit the y-axis. Defaults to NULL, which sets no default. Typically used when plotting with thresholds, to ensure that threshold value is shown.
#' @export
#' @return A list, with vectors for x- and y-limits.
#' @details This function finds reasonable x- and y-limits for plots; it is specifically designed for use with volcano plots.
#' @usage \code{
#' get_xy_lims(topGenes,
#' pairwise=FALSE,
#' x_symmetric=TRUE,
#' adjust_factor=0.01,
#' y_floor_zero=TRUE,
#' x_col="logFC", y_col="adj.P.Val",
#' neg_log10_ycol=TRUE,
#' min_x_abs=NULL,
#' min_y2=NULL)}
get_xy_lims <-
function(topGenes,
pairwise=FALSE,
x_symmetric=TRUE,
adjust_factor=0.01,
y_floor_zero=TRUE,
x_col="logFC", y_col="adj.P.Val",
neg_log10_ycol=TRUE,
min_x_abs=NULL,
min_y2=NULL) {
if (pairwise) {
x_lims <- range(unlist(lapply(topGenes, function(x) x[,grep(x_col, colnames(x), value=TRUE)])))
y_lims <- range(unlist(lapply(topGenes, function(x) x[,grep(y_col, colnames(x), value=TRUE)])))
} else {
x_lims <- range(topGenes[,x_col])
y_lims <- range(topGenes[,y_col])
}
if (neg_log10_ycol) y_lims <- rev(-log10(y_lims))
if (!is.null(min_y2))
if (y_lims[2] < min_y2) y_lims[2] <- min_y2
if (!is.null(min_x_abs)) {
if (x_lims[1] > -min_x_abs) x_lims[1] <- min_x_abs
if (x_lims[2] < min_x_abs) x_lims[2] <- min_x_abs
}
if (x_symmetric) x_lims <- c(-1,1) * max(abs(x_lims))
x_adjustment <- (x_lims[2] - x_lims[1]) * adjust_factor
x_lims <- x_lims + (c(-1,1) * x_adjustment)
if (y_floor_zero & (y_lims[1] >= 0)) {
y_lims[1] <- 0
y_adjustment <- (y_lims[2] - y_lims[1]) * adjust_factor
y_lims[2] <- y_lims[2] + y_adjustment
} else {
y_adjustment <- (y_lims[2] - y_lims[1]) * adjust_factor
y_lims <- y_lims + (c(-1,1) * y_adjustment)
}
list(x=x_lims, y=y_lims)
}
|
5d2961a9a3bf481fcb714c5616db38ac618051d1 | 037bee00f2fe46ae4708eeed46bf519d87f10f7c | /R/degree.R | 83dd9a4bd6ce0d6a51fc3fff16774bce8a5faed3 | [] | no_license | cran/HyperG | 60e03023748d5cd16fe0ad1b52dbb6b6d9993228 | 48fea6d712fa369a6a85d486ce490a9fb0bac605 | refs/heads/master | 2023-03-23T09:26:59.501007 | 2021-03-04T08:20:11 | 2021-03-04T08:20:11 | 344,524,380 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,764 | r | degree.R | ## HyperG R Package
##
## Copyright (c) 2021 David J. Marchette <dmarchette@gmail.com>
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
hdegree <-
function(h)
{
Matrix::colSums(h$M)
}
plotDegreeDistribution <- function(h,
xlab="Degree",
ylab="Density",
add.line=FALSE,
lty=2,lwd=1,line.col=1,
...)
{
if(is.igraph(h)) {
deg <- degree(h)
} else {
deg <- hdegree(h)
}
hi <- hist(deg,-1:max(deg),plot=FALSE)$density
z <- which(hi==0)
X <- (1:length(hi))[-z]
Y <- hi[-z]
x <- log(X,10)
y <- log(Y,10)
plot(X,Y,xlab=xlab,ylab=ylab,log='xy',...)
if(add.line){
l <- lm(y ~ x,data=data.frame(x=x,y=y))
abline(reg=l,lty=lty,col=line.col,lwd=lwd)
}
}
|
f493c3919ff25da6bd4a356bf369522c8933a1d8 | 34e9163c89795d6b398c4418c7047a242581bf43 | /LogisticRegressioncreditcard.R | 774e69e59bdd85d72c32e82544bd5ae0aac0e6e4 | [] | no_license | lokeshlav95/Logistic_regression_creditcard | 50550c5afe1486a791e4e85ff35f9e348ff4a308 | 81c987739233598cab3d3206ab78506e1ac57068 | refs/heads/master | 2022-11-06T23:28:36.814339 | 2020-07-07T19:22:22 | 2020-07-07T19:22:22 | 277,903,418 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,115 | r | LogisticRegressioncreditcard.R | credit_card <- read.csv(file.choose())
View(credit_card[-1]) #except first column selecting all
str(credit_card)
install.packages("caret")
library(caret)
dmy <- dummyVars(" ~ .", data = credit_card,fullRank = T)
model <- glm(card~.,data = credit_card,family = "binomial")
summary(model)
prob <- predict(model,type = c("response"),credit_card)
prob
confusion <- table(prob>0.5,credit_card$card)
confusion
accuracy <- sum(diag(confusion)/sum(confusion))
accuracy #0.901838
#Roc Curve
install.packages("ROCR")
library(ROCR)
rocpred <- prediction(prob,credit_card$card)
rocperf <- performance(rocpred,'tpr','fpr')
plot(rocperf,colorize=T,text.adj=c(-0.2,4))
#####
pred_values <- NULL
yes_no <- NULL
for (i in 1:45211){
pred_values[i] <- ifelse(prob[i]>=0.5,1,0)
yes_no[i] <- ifelse(prob[i]>=0.5,"yes","no")
}
bank_data[,"prob"] <- prob
bank_data[,"pred_values"] <- pred_values
bank_data[,"yes_no"] <- yes_no
View(bank_data)
View(bank_data[,c(17,18,19,20)])
# Accuracy
acc <- table(bank_data$term.deposit,pred_values)
acc
Accuracy<-sum(diag(acc)/sum(acc))
Accuracy
|
09d81b06406781fd9d7dd70aa8a659a49aa506df | 7b757b26bf190e35b2917e02d20ff796b6073369 | /scripts/flux-appendix-figs.R | c70a4163ce7ad51fe9625595464dbfbbc1359a56 | [
"MIT"
] | permissive | BNU-Reggie/jgr-co2-flux | 5db923dce3269f039db074a5a5f01227643e13b7 | ab2cfcf37b3e6c0fe4fdad584400a8baa3a7c29b | refs/heads/master | 2020-05-25T04:38:10.562818 | 2018-06-06T18:07:32 | 2018-06-06T18:07:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,084 | r | flux-appendix-figs.R | ## code for appendix figures + reviewer response plots following revision
library('ggplot2')
library("mgcv")
library("plyr")
library("dplyr")
library('reshape2')
library('gridExtra')
library('extrafont')
## read in regvars and co2expl and models
regvars <- readRDS("../data/private/regvars.rds")
expl <- readRDS("../data/private/co2explained.rds")
weathers <- readRDS('../data/weathers.rds')
egmodlagged <- readRDS("../data/private/egmodlaggedsimp.rds")
egmodtdn <- readRDS("../data/private/egmodlaggedtdn.rds")
## create a theme to save linespace in plots
papertheme <- theme_bw(base_size=10, base_family = 'Arial') +
theme(legend.position='top')
## create function for shared legends
g_legend<-function(a.gplot){
tmp <- ggplot_gtable(ggplot_build(a.gplot))
leg <- which(sapply(tmp$grobs, function(x) x$name) == "guide-box")
legend <- tmp$grobs[[leg]]
return(legend)}
## my lazy labeler fix
mylabel_parsed <- function (labels, multi_line = FALSE)
{
labels <- label_value(labels, multi_line = multi_line)
if (multi_line) {
lapply(unname(labels), lapply, function(values) {
c(parse(text = as.character(values)))
})
}
else {
lapply(labels, function(values) {
values <- paste0("list(", values, ")")
lapply(values, function(expr) c(parse(text = expr)))
})
}
}
## put data together for facetted plot
nrow(expl)
nrow(regvars)
names(expl)[names(expl) %in% c("YEAR","LAKE")] <- c("Year","Lake")
names(expl)[names(expl) %in% names(regvars)] # some overlap
extras <- names(expl)[!names(expl) %in% names(regvars)]
regvars <- merge(regvars, expl[,c("Year","Lake","Month","Date",extras)])
subdf <- regvars[,c("Date","Lake","Year","Month","DOY","Chl_a_ug_L", "TDN_ug_L","DOC_mg_L","TDP_ug_L",
"TIC_mg_L" , "pH_surface")]
monthmeans <- aggregate(cbind(Chl_a_ug_L, TDN_ug_L,DOC_mg_L,TDP_ug_L,
TIC_mg_L , pH_surface)~Month + Lake + Year, data=subdf, mean, na.rm=TRUE, na.action=NULL)
monthmeans <- with(subdf, split(subdf, list(Month, Lake, Year), drop=TRUE))
monthmeans <- monthmeans[sapply(monthmeans, function(x) dim(x)[1]) > 0]
mycolMeans <- function(df, cols) {
df <- as.data.frame(df)
subdf <- subset(df, select = cols)
means <- colMeans(subdf, na.rm = TRUE)
cbind(data.frame(Lake = df['Lake'][1,], Month = df['Month'][1,], Year = df['Year'][1,], t(means)))
}
monthmeans <- do.call(rbind, lapply(monthmeans, mycolMeans, cols=c("Chl_a_ug_L", "TDN_ug_L","DOC_mg_L",
"TDP_ug_L","TIC_mg_L" , "pH_surface")))
monthmelt <- melt(monthmeans, id.vars = c("Lake","Year","Month"))
monthmelt$realname <- factor(monthmelt$variable,
labels=c('plain(Chl)~italic(a)~mu~L^{-1}', 'plain(TDN)~mu~N~L^{-1}',
'plain(DOC)~plain(mg)~C~L^{-1}', 'plain(TDP)~mu~P~L^{-1}',
'plain(DIC)~mu~C~L^{-1}',"pH"))
monthmelt <- droplevels(monthmelt)
monthmelt$shortlake <- monthmelt$Lake
monthmelt$Lake <- factor(monthmelt$Lake, labels=c("Katepwa","Last Mountain","Buffalo Pound",
"Crooked","Diefenbaker","Wascana","Pasqua"))
## plot..
appendixplot <- ggplot(monthmelt[monthmelt$Month > 4 & monthmelt$Month <9,],
aes(y=value, x=Month,group=Lake)) +
papertheme +
geom_point(aes(col=Lake), alpha=0.8, size=.8) +
geom_line(aes(col=Lake)) +
facet_wrap(realname~Year, scales = "free_y",labeller = mylabel_parsed, dir="h")+
theme(strip.background = element_rect(fill="white", colour = "white")) +
guides(col=guide_legend(nrow=1, override.aes = list(alpha=1, size=2))) +
scale_color_manual(values=c('#8c510a','#d8b365','#fc8d59','black','#c7eae5','#5ab4ac','#01665e'))
ggsave("../docs/private/flux-appendix.pdf", appendixplot, width=18, height=10)
## plot diagnostics for the flux model
# check tdn mod if want
#gam.check(egmodtdn)
#summary(egmodtdn)
type <- "deviance" ## "pearson" & "response" are other valid choices
resid <- residuals(egmodlagged, type = type)
linpred <- napredict(egmodlagged$na.action, egmodlagged$linear.predictors)
observed.y <- napredict(egmodlagged$na.action, egmodlagged$y)
## change qq plot to capitalised axis labels
myQQ <- function (object, rep = 0, level = 0.9, s.rep = 10,
type = c("deviance",
"pearson", "response"), pch = ".", rl.col = 2, rep.col = "gray80", ...) {
type <- match.arg(type)
ylab <- "Deviance residuals"
if (inherits(object, c("glm", "gam"))) {
if (is.null(object$sig2))
object$sig2 <- summary(object)$dispersion
}
else stop("object is not a glm or gam")
object$na.action <- NULL
D <- residuals(object, type = type)
if (object$method %in% c("PQL", "lme.ML", "lme.REML", "lmer.REML",
"lmer.ML", "glmer.ML")) {
qqnorm(D, ylab = ylab, pch = pch, ...)
return()
}
lim <- Dq <- NULL
if (rep == 0) {
fam <- fix.family.qf(object$family)
if (is.null(fam$qf))
rep <- 50
level <- 0
}
n <- length(D)
if (rep > 0) {
fam <- fix.family.rd(object$family)
if (!is.null(fam$rd)) {
dm <- matrix(0, n, rep)
for (i in 1:rep) {
yr <- fam$rd(object$fitted.values, object$prior.weights,
object$sig2)
object$y <- yr
dm[, i] <- sort(residuals(object, type = type))
}
Dq <- quantile(as.numeric(dm), (1:n - 0.5)/n)
alpha <- (1 - level)/2
if (alpha > 0.5 || alpha < 0)
alpha <- 0.05
if (level > 0 && level < 1)
lim <- apply(dm, 1, FUN = quantile, p = c(alpha,
1 - alpha))
else if (level >= 1)
lim <- level
}
}
else {
U <- (1:n - 0.5)/n
if (!is.null(fam$qf)) {
dm <- matrix(0, n, s.rep)
for (i in 1:s.rep) {
U <- sample(U, n)
q0 <- fam$qf(U, object$fitted.values, object$prior.weights,
object$sig2)
object$y <- q0
dm[, i] <- sort(residuals(object, type = type))
}
Dq <- sort(rowMeans(dm))
}
}
if (!is.null(Dq)) {
qqplot(Dq, D, ylab = ylab, xlab = "Theoretical quantiles",
ylim = range(c(lim, D)), pch = pch, ...)
abline(0, 1, col = rl.col)
if (!is.null(lim)) {
if (level >= 1)
for (i in 1:rep) lines(Dq, dm[, i], col = rep.col)
else {
n <- length(Dq)
polygon(c(Dq, Dq[n:1], Dq[1]), c(lim[1, ], lim[2,
n:1], lim[1, 1]), col = rep.col, border = NA)
}
abline(0, 1, col = rl.col)
}
points(Dq, sort(D), pch = pch, ...)
return(invisible(Dq))
}
else qqnorm(D, ylab = ylab, pch = pch, ...)
}
pdf("../docs/private/appendix2.pdf", onefile=TRUE)
op <- par(mfrow = c(2,2))
myQQ(egmodlagged, rep = 0, level = 0.9, type = type, rl.col = 2,
rep.col = "gray80", main="QQ plot")
hist(resid, xlab = "Residuals", main = "Histogram of residuals")
plot(linpred, resid, main = "Residuals vs linear predictor",
xlab = "Linear predictor", ylab = "Residuals")
plot(fitted(egmodlagged), observed.y, xlab = "Fitted Values",
ylab = "Response", main = "Response vs Fitted Values")
par(op)
dev.off()
## save text of model output
## save summary as txt document
egmodsum <- summary(egmodlagged)
sink("../docs/private/egmodsummary.txt")
egmodsum
sink()
## hypothetical prediction:
newdata <- data.frame(Chl_a_ug_L = c(200, 5),DOC_mg_L = c(50,5), Oxygen_ppm = c(8,3),
SPEI02 = c(-1.5, 0.2), PDOmean=c(1.5, 0.2),
SOImean = c(-1.1,-1.1), Year=c(2009, 2012), Lake=c("WW","B"), dummy=c(1,1))
newdata <- data.frame(Chl_a_ug_L = 40,DOC_mg_L = 12, Oxygen_ppm = 9,
SPEI02 = 0.2, PDOmean=0.01,
SOImean = 0.21, Year=1995, Lake=c("WW","B","D","L","K","C"), dummy=1)
predict(egmodlagged, newdata=newdata, se.fit = TRUE)
#summary(predict(egmodlagged))
#Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
# 7.765 8.648 8.861 8.881 9.108 10.057 169
#summary(egmodlagged$y)
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#7.050 8.590 8.820 8.878 9.200 10.940
## plot of predicted vs observed pH over time
regvarf <- regvars
regvarf <- merge(regvarf, weathers)
regvarf <- transform(regvarf, Year = as.factor(Year)) # make Year into factor for re
regvarf2 <- regvarf
regvarf2$`Chl_a_ug_L`[regvarf2$`Chl_a_ug_L` <=0 ] <- 0
regvarf2$`DOC_mg_L`[regvarf2$`DOC_mg_L` <=0 ] <- 0
regvarf2$`Chl_a_ug_L` <- regvarf2$`Chl_a_ug_L` + 1
regvarf2$`DOC_mg_L` <- regvarf2$`DOC_mg_L` + 1
regvarf2 <- transform(regvarf2, dummy = rep(1, nrow(regvarf2)))
regvarf2$preds <- predict(egmodlagged, newdata=regvarf2, type = "response")
realnames <- data.frame(Lake = c(as.character(unique(regvarf2$Lake))),
LakeName = c("BuffaloPound","Crooked","Diefenbaker","Katepwa",
"LastMountain","Pasqua","Wascana"))
regvarf2 <- merge(regvarf2, realnames)
regvarf2$error <- regvarf2$pH_surface - regvarf2$preds
summaries <- ddply(regvarf2, .(LakeName, Year, Month), dplyr::summarise, meanpH = mean(pH_surface, na.rm=TRUE),
meanPred = mean(preds, na.rm = TRUE), meanError = mean(error, na.rm = TRUE))
testdf <- reshape2::melt(regvarf2, id.vars = c('LakeName', 'Date', 'Month', 'Year'),
measure.vars = c('preds', 'pH_surface','error'))
testdf$Class <- ifelse(testdf$variable == "preds", "Predicted",
ifelse(testdf$variable == "error", "Error", "Measured"))
testdf2 <- melt(summaries, measure.vars = c('meanpH',"meanPred","meanError"))
testdf2$Class <- ifelse(testdf2$variable == "meanPred", "Predicted",
ifelse(testdf2$variable == "meanError", "Error", "Measured"))
testdf2 <- testdf2[-which(testdf2$Year %in% c(1994, 1996, 2002)),]
testdf2$cohort <- ifelse(testdf2$Year %in% c(2006:2014), "one",'two')
firstyears <- ggplot(testdf2[testdf2$cohort == 'two' & testdf2$Class!="Error",],
aes(y=value, x=Month, group=LakeName, col=LakeName)) +
papertheme +
geom_point(size=1, alpha=0.8) +
facet_grid(Class~Year) +
scale_color_manual(values=c('#8c510a','#d8b365','#fc8d59','black','#c7eae5','#5ab4ac','#01665e')) +
theme(axis.text.x = element_text(angle=45)) +
geom_smooth(aes(y=value), se=FALSE, size=0.5, alpha=1) + ylab('pH')
secondyears <-
ggplot(testdf2[testdf2$cohort == 'one' & testdf2$Class!="Error" & testdf2$Month > 4 & testdf2$Month <10,], aes(y=value, x=Month, group=LakeName, col=LakeName)) +
papertheme +
geom_point(size=1, alpha=0.8) +
facet_grid(Class~Year) +
scale_color_manual(values=c('#8c510a','#d8b365','#fc8d59','black','#c7eae5','#5ab4ac','#01665e')) +
theme(axis.text.x = element_text(angle=45)) +
geom_smooth(aes(y=value), se=FALSE, size=0.5, alpha=1) +ylab('pH')
allyears <- ggplot(testdf2[testdf2$Class=="Measured" & testdf2$Month > 4 & testdf2$Month <10,],
aes(y=value, x=Month, group=LakeName, col=LakeName)) +
papertheme +
geom_point(size=1, alpha=0.8) +
facet_grid(LakeName~Year) +
scale_color_manual("Lake", values=c('#8c510a','#d8b365','#fc8d59','black','#542788','#5ab4ac','#01665e')) +
theme(axis.text.x = element_text(angle=45),
legend.position = 'none', strip.text = element_text(size=6)) +
geom_smooth(data=testdf2[testdf2$Class=="Predicted" & testdf2$Month > 4 & testdf2$Month <10,],
aes(y=value), se=FALSE, size=0.5, alpha=1) +ylab('pH: predicted (line) & measured (points)')
#allyears <- grid.arrange(firstyears, secondyears, ncol=1)
#ggsave(plot = allyears, filename = "../docs/private/truevspred.pdf", height=7, width=10)
ggsave(plot = allyears, filename = "../docs/private/truevspred-allyears.pdf", height=6, width=10)
## =================================================================================================
## indication of intra-annual variation between May and Sep for each lake all years
myRange <- function(dat) {range=max(dat, na.rm=TRUE)-min(dat,na.rm = TRUE)}
vardf <- ddply(regvars[regvars$Month>4 & regvars$Month <10,], .(Lake, Year), summarise, chl=myRange(Chl_a_ug_L),
tdn=myRange(TDN_ug_L), doc=myRange(DOC_mg_L), pH=myRange(pH_surface),
tdp=myRange(TDP_ug_L), resp=myRange(R_h))
is.na(vardf) <- sapply(vardf, is.infinite)
vardfmad <- ddply(regvars[regvars$Month>4 & regvars$Month <10,], .(Lake, Year), summarise, chl=mad(Chl_a_ug_L, na.rm = TRUE),
tdn=mad(TDN_ug_L, na.rm = TRUE), doc=mad(DOC_mg_L, na.rm = TRUE), pH=mad(pH_surface, na.rm = TRUE),
tdp=mad(TDP_ug_L, na.rm = TRUE), resp=mad(R_h, na.rm = TRUE))
is.na(vardfmad) <- sapply(vardfmad, is.infinite)
deg <- 29.531
vardf <- droplevels(vardf)
vardfmad <- droplevels(vardfmad)
lakelist <- levels(vardfmad$Lake)
# need to create the difference in values to create a legible rose plot close to origin
switch <- vardfmad
varmelt <- melt(switch, id.vars = c("Lake","Year"))
varmelt$Lake <- as.character(varmelt$Lake)
varmelt <- by(varmelt, list(varmelt$Year, varmelt$variable), function(x) {x <- x[order(x$value),]
x$order <- 1:nrow(x)
return(x)})
varmelt <- do.call(rbind, varmelt)
vartest <- by(varmelt, list(varmelt$Year, varmelt$variable), function(x) {
x$diff <- c(x$value[1], diff(x$value))
return(x)
})
vartest <- do.call(rbind,vartest)
vartest$value[vartest$variable=="tdp"& vartest$Lake=="WW"&vartest$value > 1000] <- NA
## order data so that geom_col with identity position plots everything is the correct order
ordered_data <- vartest[order(-vartest$value), ]
ordered_data$Lake <- factor(ordered_data$Lake)
ordered_data$Lake <- mapvalues(ordered_data$Lake, from = c(levels(ordered_data$Lake)),
to = c("Buffalo Pound","Crooked","Diefenbaker","Katepwa","Last Mountain",
"Pasqua","Wascana"))
## create function to plot all that i want
plotall <- function(df, varname) {
ymax <- max(df$value[df$variable==varname], na.rm = TRUE)
yinter <- ymax/8
segments <- data.frame(x=seq(1,21,1), xend=seq(1,21,1), y=rep(200), yend=rep(ymax))
plott <- ggplot(df[df$variable==varname,], aes(x=factor(Year), y=value)) +#, fill=Lake
theme_bw(base_size=9, base_family = 'Arial') +
geom_hline(yintercept = seq(0,ymax,yinter), size=0.3, col="grey60",lty=3) +
geom_vline(xintercept=seq(1,21,1), size=0.3, col='grey30', lty=2) +
geom_col(position='identity', width=1, size=0.5, aes(fill=Lake)) +
scale_x_discrete() +
coord_polar(start=-0.15) +
scale_fill_manual(name="Lake",
values=c('#1b9e77','#d95f02','#7570b3','#e7298a','#66a61e','#e6ab02','#a6761d')) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
axis.title.x = element_blank(),axis.title.y=element_blank(), plot.margin = unit(c(17,0,0,0),'pt'),
plot.title = element_text(hjust = 0.5, size=9)) }
## specify what I want
vartitles <- c(expression("Chl"~a~mu*g~L^{-1}),expression("TDN"~mu*g~L^{-1}),
expression("DOC"~mg~L^{-1}),"pH",expression("TDP"~mu*g~L^{-1}),
expression("R"~O[2]~h^{-1}))
iwant <- c(levels(ordered_data$variable))
plotlist <- lapply(iwant, plotall, df=ordered_data)
plotlist <- Map(function(x,y) {x <- x+
ggtitle(y)}, plotlist, vartitles)
nullplot <- plotlist[[1]] + theme(legend.position="bottom", legend.direction = "horizontal") +
guides(fill=guide_legend(nrow=1, ncol = 7))
plotlist <- lapply(plotlist, function(x) {x + theme(legend.position = "none") +
cowplot::panel_border(remove=TRUE)})
plotlist <- lapply(plotlist, ggplotGrob)
plottest <- lapply(plotlist, function(x) {x$widths <- plotlist[[1]]$widths
x$heights <- plotlist[[1]]$heights
return(x)})
## create plots without legend
justplots <-
cowplot::plot_grid(plotlist = plottest, ncol=3, labels = c(letters[1:length(plotlist)]),
label_fontface = "plain", hjust=-1.3, vjust=2.2) #vjust = 0.9, , hjust=-0.5
## create 'plot' of legend
legendplot <- cowplot::get_legend(nullplot)
## plot the result
p <-
cowplot::plot_grid( justplots, legendplot, ncol = 1, rel_heights = c(1, .3))
ggsave("../docs/private/rangeplot.pdf", width = 10,height=7)
## ================================================================================================
## what is the variability in pH when set against DIC?
dicsum <- ddply(regvars, .(Lake,Year), summarise,
pHvar = max(pH_surface, na.rm = TRUE)-min(pH_surface, na.rm=TRUE),
meanDIC=mean(TIC_mg_L, na.rm = TRUE), meanpH = mean(pH_surface, na.rm = TRUE))
ggplot(dicsum, aes(y=meanDIC, x=pHvar, group=Lake)) +
papertheme +
geom_point() +
facet_wrap(~Lake) +
ylab("Mean annual DIC (mg/L)") + xlab("Annual range in pH")
ggplot(dicsum, aes(y=meanDIC, x=meanpH, group=Lake)) +
papertheme +
geom_point() +
facet_wrap(~Lake) +
ylab("Mean annual DIC (mg/L)") + xlab("Mean annual pH")
|
09568306d3a833b100b0e509295c1a41a7a3d0fd | 7d3e7c1d0a80ac881103bef9c1ba8a2a8693fddd | /R_Tutorials/Significance_Testing/conf_5pct.R | 45bddecba1b080ad7d367f6f45f3bc64274ea931 | [] | no_license | wagafo/Applied_Statistics | aa6c7418e14bad7154b1405847147e962d42cd47 | 3d1b9fe9ec4f8187b1bcb80c0b7a2cfa75b1460a | refs/heads/master | 2021-06-26T21:30:43.063498 | 2020-11-22T12:14:44 | 2020-11-22T12:14:44 | 184,111,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 391 | r | conf_5pct.R | x <- seq(-8,8, length = 2000)
dat <- data.frame(x=x, y=dnorm(x))
g <- ggplot(dat, aes(x = x, y = y)) + geom_line(size = 1.5) + xlim(-4, 4) + scale_y_continuous(limits=c(0,max(dat$y)))
suppressWarnings(g <- g+ layer("area", stat="identity", position="identity",mapping = aes(x=ifelse(x>qnorm(.95),x,NA)),
params=list(fill="red",alpha=.5, na.rm=TRUE)) )
suppressWarnings(print(g))
|
cacc18aa530989ca94a875a06f8d72185db7e6ae | 97570cb7c88c56149495e2c3056ede83e81960ea | /extra/temp.R | fdbb47302cec842be495ad0b89270654147e6b28 | [] | no_license | Jess050/honours-project | 75438cc7cbef4ee6a4f15bc587019424a635f3b9 | a1a65a9b96c2f1515210968e2d39472f3dd57466 | refs/heads/master | 2020-03-16T06:50:14.921900 | 2018-11-19T07:39:12 | 2018-11-19T07:39:12 | 132,563,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,645 | r | temp.R | # analysing temperature data
# load libraries ----------------------------------------------------------
library(tidyverse)
library(ggpubr)
library(dplyr)
library(lubridate)
library(gridExtra)
library(ggplot2)
# load data ---------------------------------------------------------------
load("D:/honours/honours-project/data/SACTN_monthly_v4.2.Rdata")
temp <- SACTN_monthly_v4.2 %>%
mutate(dec1 = date %% 1, # find fractional part of number
year = ceiling(date), # find integer part of number
int = round((dec1 * 12) + 1, 1), # convert fraction to integer presenting month
month = month.abb[int]) # make month abbreviation
# desriptive statistics of temp data
na.omit(temp)
summary(temp)
# create site data frame
sites <- c("Muizenberg", "Muizenberg", "Muizenberg", "Kalk Bay", "Miller's Point", "Miller's Point", "Miller's Point",
"Bordjies", "Buffelsbaai", "Kommetjie", "Kommetjie", "Kommetjie","Kommetjie", "Oudekraal", "Oudekraal", "Yzerfontein")
# separate index into site and source, and replace sites with the new sites df
temp1 <- temp %>%
separate(index, c("site", "source"), "/") %>%
filter(site %in% sites)
temp2 <-temp1 %>%
select(-date, -dec1, -int)
# order of sites
# "St_James_N"
# "St James "
# "St_James_S"
# "Kalk_Bay"
# "Miller`s_A"
# "Miller`s_B"
# "Miller`s_C"
# "Black_rocks"
# "Buffels"
# "Olifantsbos"
# "Soetwater"
# "Slangkop"
# "Kommetjie"
# "Oudekraal"
# "Bakoven"
# "Yzerfontein"
# summarising by site and date : mean of month for all years
dat1 <- temp2 %>%
group_by(site, month) %>%
summarise(mn.temp = mean(temp, na.rm = TRUE),
sd.temp = sd(temp, na.rm = TRUE))
# summarise by unique site and source, mean temp for all years for which there is data ?
mean <- temp2 %>%
group_by(site) %>%
summarise(mn.temp = mean(temp, na.rm = TRUE),
sd.temp = sd(temp, na.rm = TRUE))
#visualise
ggplot(temp2, aes(x = site, y = temp)) +
facet_wrap(~ month) +
geom_boxplot(aes(colour = site)) +
geom_point(data = mean, aes(x = site, y = mn.temp), shape = 2) +
theme_classic()
# ggplot(mean, aes(x = site, y = mn.temp)) +
# geom_col(aes(fill = site)) +
# geom_errorbar(aes(ymin = mn.temp - sd.temp,
# ymax = mn.temp + sd.temp), size = 0.2)
################
# dont have to replicate sites, because morph isnt gona be combined with temp,
# as temp is temp data to be merged with wave data, those sites should correspond.
###############
# plot the monthly temperatures;
# create a facet for each site, and colour-code the src
# plot1 <- ggplot(temp1, aes(x = date, y = temp)) +
# geom_line(aes(colour = site), size = 0.2) +
# facet_wrap(~site, ncol = 3) +
# xlab(NULL) + ylab("Temperature (°C)") + ggtitle("coastal water temperature") +
# theme(axis.text.x = element_text(angle = 90, hjust = 1))
# plot1
# not using this
# map of sea surface temperature ------------------------------------------
# Load libraries
library(tidyverse)
library(ggpubr)
# Load data
load("data/south_africa_coast.RData")
load("data/sa_provinces.RData")
load("data/rast_annual.RData")
# Choose which SST product you would like to use
## The colour pallette we will use for ocean temperature
cols11 <- c("#004dcd", "#0068db", "#007ddb", "#008dcf", "#009bbc",
"#00a7a9", "#1bb298", "#6cba8f", "#9ac290", "#bec99a")
site_list <- read.csv("D:/honours/honours-project/data/sites_updated.csv", sep=";")
# select the required sites, merge long and lat with temperature of those sites
# remove sites and temperatures
new <- mean %>%
select(site, mn.temp)
# replicate sites from temp data to nearest sites in site_list
newer <- rbind(new, new[1:6, ])[-12,][-7,]
newest <- rbind(newer, new[2:4,])
sites.new <- as.data.frame(c("Muizenberg", "Oudekraal", "Oudekraal", "Yzerfontein", "Miller's Point","Miller's Point","Miller's Point", "Kommetjie", "Kommetjie", "Kommetjie", "Muizenberg", "Muizenberg", "Kalk Bay"))
# # merge temp to site list
# merge <- cbind(newest, site_list) # problem???
#
# fuckj <- as.data.frame(merge)
# plot on map of western cape
ggplot(data = south_africa_coast, aes(x = lon, y = lat)) +
# geom_raster(data = temp, aes(fill = temp)) +
geom_polygon(colour = "black", fill = "grey70", aes(group = group)) +
geom_path(data = sa_provinces, aes(group = group)) +
geom_tile(data = rast_annual, aes(x = lon, y = lat, fill = bins),
colour = "white", size = 0.1) + # Monthly coastal temperature values
scale_fill_manual("Temp. (°C)", values = cols11) +
coord_equal(xlim = c(17, 24), ylim = c(-36, -30), expand = 0)
# using too many datasets?
########
library(data.table)
site_list$site
#try <- setattr(site_list, "row.names", c("Muizenberg", "Oudekraal", "Oudekraal", "Yzerfontein", "Miller's Point","Miller's Point","Miller's Point", "Kommetjie", "Kommetjie", "Kommetjie", "Muizenberg", "Muizenberg", "Kalk Bay"))
sites.new <- as.data.frame(c("Muizenberg", "Oudekraal", "Oudekraal", "Yzerfontein", "Miller's Point","Miller's Point","Miller's Point", "Kommetjie", "Kommetjie", "Kommetjie", "Muizenberg", "Muizenberg", "Kalk Bay")) %>%
dplyr::rename(site_new = c("Muizenberg", "Oudekraal", "Oudekraal", "Yzerfontein", "Miller's Point","Miller's Point","Miller's Point", "Kommetjie", "Kommetjie", "Kommetjie", "Muizenberg", "Muizenberg", "Kalk Bay"))
merge <- merge(newest, site_list, by = "site")
new <- c("Muizenberg", "Oudekraal", "Oudekraal", "Yzerfontein", "Miller's Point","Miller's Point","Miller's Point", "Kommetjie", "Kommetjie", "Kommetjie", "Muizenberg", "Muizenberg", "Kalk Bay")
dplyr::arrange() |
0cb70d132cc9e9cf08eb96d8c8f1fa07b3245e5c | 84ca86a2b97ef2856e131433d6fc6076a13534fe | /scripts/functions_richness.R | d436e4f88cb72db161670f15bfd91601abf3a095 | [] | no_license | oueme-fungi/oueme-fungi-transect | 8063947f4c58f2d1309329f4e432348ec66de6b9 | 143b9b746bfbb0213288ca753b242de470688abb | refs/heads/master | 2023-04-06T07:30:27.125367 | 2021-04-12T11:07:43 | 2021-04-12T11:07:43 | 162,268,464 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,470 | r | functions_richness.R | # functions to calculate targets for ASV/OTU richness comparisons
nonzero_asv_counts <- function(seq_table, platemap) {
tibble::as_tibble(seq_table, rownames = "file") %>%
tidyr::extract(
file,
c("seq_run", "plate", "well"),
regex = "([ipS][sbH][-_]\\d{3,4})_([0OT]{2}\\d)_([A-H]1?[0-9])_ITS2"
) %>%
dplyr::semi_join(platemap, by = c("plate", "well")) %>%
dplyr::select(-plate) %>%
dplyr::group_by(seq_run, well) %>%
dplyr::summarize_all(sum) %>%
tidyr::unite("sample", c(seq_run, well)) %>%
tidyr::pivot_longer(-sample, names_to = "ASV", values_to = "nread") %>%
dplyr::filter(nread > 0) %>%
dplyr::group_by(sample) %>%
dplyr::filter(sum(nread) >= 100) %>%
dplyr::ungroup() %>%
dplyr::select(sample, nread) %>%
tibble::deframe() %>%
split(names(.))
}
nonzero_otu_counts <- function(seq_table_file, platemap) {
read_tsv(
seq_table_file,
col_types = cols(.default = col_integer(), `#OTU ID` = col_character())
) %>%
dplyr::rename(OTU = `#OTU ID`) %>%
tidyr::pivot_longer(-OTU, names_to = "sample", values_to = "nread") %>%
dplyr::filter(nread > 0) %>%
tidyr::extract(
col = sample,
into = c("seq_run", "plate", "well"),
regex = "([piS][bsH][-_]\\d{3,4})_(\\d{3})([A-H]1?[0-9])"
) %>%
dplyr::semi_join(readd(platemap), by = c("plate", "well")) %>%
tidyr::unite("sample", seq_run, well) %>%
dplyr::group_by(sample, OTU) %>%
dplyr::summarize_at("nread", sum) %>%
dplyr::group_by(sample) %>%
dplyr::filter(sum(nread) >= 100) %>%
dplyr::ungroup() %>%
dplyr::select(sample, nread) %>%
tibble::deframe() %>%
split(names(.))
}
reshape_rarefy_data <- function(sample_rarefy) {
purrr::map_dfr(sample_rarefy, tibble::enframe) %>%
tidyr::extract(
name,
c("seq_run", "well"),
regex = "([iSp][sHb][-_][0-9]+)_([A-H]1?[0-9])"
) %>%
dplyr::left_join(datasets, by = "seq_run") %>%
dplyr::mutate(strategy = paste(tech, amplicon)) %>%
dplyr::mutate_at(
"strategy",
factor,
ordered = TRUE,
levels = c("Ion Torrent Short", "Illumina Short", "PacBio Short",
"PacBio Long")
) %>%
dplyr::select(well, strategy, value) %>%
dplyr::left_join(., ., by = "well", suffix = c("_x", "_y")) %>%
dplyr::filter(strategy_x < strategy_y)
}
fit_deming_slope <- function(rarefy_data) {
dplyr::group_nest(rarefy_data, strategy_x, strategy_y, .key = "deming") %>%
dplyr::mutate(
deming = purrr::map(
deming,
~ deming::deming(value_y ~ value_x - 1, data = .) %$%
c(
list(slope = coefficients[2]),
as.list(ci[2,]),
list(r_squared = cor(model)[1,2])
) %>%
tibble::as_tibble()
)
) %>%
tidyr::unnest(deming) %>%
dplyr::mutate(
slope_label = sprintf("m==%.2f(%.2f-%.2f)",
slope, `lower 0.95`, `upper 0.95`),
r2_label = sprintf("R^2==%.2f", r_squared)
)
}
make_alpha_plot <- function(rarefy_data, demingfits, cluster) {
ggplot(rarefy_data, aes(x = value_x, y = value_y)) +
geom_abline(slope = 1, intercept = 0, linetype = "dashed",
color = "gray50") +
geom_point(shape = 1, alpha = 0.8) +
geom_abline(
aes(slope = slope, intercept = 0),
color = "blue",
alpha = 0.8,
size = 0.5,
data = demingfits
) +
geom_abline(
aes(slope = `lower 0.95`, intercept = 0),
color = "blue",
alpha = 0.5,
size = 0.2,
data = demingfits
) +
geom_abline(
aes(slope = `upper 0.95`, intercept = 0),
color = "blue",
alpha = 0.5,
size = 0.2,
data = demingfits
) +
geom_text(
aes(x = 0, y = 60, label = slope_label),
hjust = 0,
# color = "blue",
size = 2,
parse = TRUE,
data = demingfits
) +
geom_text(
aes(x = 0, y = 52, label = r2_label),
hjust = 0,
# color = "blue",
size = 2,
parse = TRUE,
data = demingfits
) +
facet_grid(strategy_y ~ strategy_x, switch = "both") +
theme_bw() +
coord_equal() +
theme(strip.background = element_blank(),
strip.placement = "outside") +
xlab(paste(cluster, "richness")) +
ylab(paste(cluster, "richness"))
}
make_accum_plot <- function(sample_iNEXT, cluster_type) {
purrr::flatten(sample_iNEXT) %>%
dplyr::bind_rows(.id = "sample") %>%
tidyr::extract(
sample,
into = c("seq_run", "well"),
regex = "([ipS][sbH][-_][0-9]{3,4})_([A-H]1?[0-9])",
remove = FALSE
) %>%
dplyr::left_join(datasets, by = "seq_run") %>%
dplyr::mutate(strategy = paste(tech, amplicon)) %>%
dplyr::filter(method != "extrapolated") %>%
ggplot(aes(x = m, y = qD, color = strategy)) +
# line for rarefaction
geom_vline(xintercept = 100, color = "gray50", linetype = "dashed") +
# lines for each sample
geom_line(aes(group = sample), alpha = 0.15) +
# small points to represent the actual observed read depth and richness
geom_point(data = ~dplyr::filter(., method == "observed"),
alpha = 0.8, size = 1, shape = 1) +
xlab("Number of rarefied reads") +
ylab(paste(cluster_type, "richness")) +
scale_color_strategy(name = NULL) +
ggforce::facet_zoom(
xlim = c(0, 3500),
ylim = c(0, 350),
zoom.size = 1
) +
theme_bw() +
theme(legend.position = "top")
}
|
7d0864257473512fd1aacd7e9af34daf5f317470 | fef507ac41bb7749840e7f5141ba87fde26d6f95 | /code/analysis/07_layer_differential_expression/deprecated/03_layer_DE.R | 982076cbbfb55b3f332bf0714ad9bd7cc43cdd4b | [] | no_license | LieberInstitute/spatialDLPFC | 84bc2f0b694473182d315b4d39ab3d18c2dd3536 | 840700ae86cdd414e024d9658b09dd11712ef470 | refs/heads/main | 2023-08-04T03:37:18.425698 | 2023-07-25T18:27:36 | 2023-07-25T18:27:36 | 314,001,778 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,657 | r | 03_layer_DE.R | # library(sgejobs)
# sgejobs::job_loop(
# loops = list(spetype = c(
# "wholegenome", "targeted"
# )),
# name = "03_model_pathology",
# create_shell = TRUE,
# queue = "bluejay",
# memory = "15G")
# To execute the script builder, use: sh 03_model_pathology.sh
# Required libraries
# library("getopt")
## Specify parameters
# spec <- matrix(c(
# "spetype", "s", 2, "character", "SPE spetype: wholegenome or targeted",
# "help", "h", 0, "logical", "Display help"
# ), byrow = TRUE, ncol = 5)
# opt <- getopt(spec = spec)
## if help was asked for print a friendly message
## and exit with a non-zero error code
# if (!is.null(opt$help)) {
# cat(getopt(spec, usage = TRUE))
# q(status = 1)
# }
## For testing
# if (FALSE) {
# opt <- list(spetype = "wholegenome")
# }
library("here")
library("sessioninfo")
library("SingleCellExperiment")
library(rafalib)
library("limma")
## output directory
# dir_rdata <- here::here("processed-data","rdata","spe", "07_layer_differential_expression", opt$spetype)
# dir.create(dir_rdata, showWarnings = FALSE, recursive = TRUE)
# stopifnot(file.exists(dir_rdata)) ## Check that it was created successfully
# dir_plots <- here::here("plots", "07_layer_differential_expression", opt$spetype)
# dir.create(dir_plots, showWarnings = FALSE, recursive = TRUE)
# stopifnot(file.exists(dir_plots))
k <- as.numeric(Sys.getenv("SGE_TASK_ID"))
## load spe data
spe_pseudo <-
readRDS(
file = here::here(
"processed-data",
"rdata",
"spe",
"pseudo_bulked_spe",
paste0("spe_pseudobulk_bayesSpace_normalized_filtered_cluster_k", k, ".RDS")
)
)
# boxplots of spots per cluster
pdf(file = here::here("plots", "07_layer_differential_expression", paste0("ncells_per_cluster_k", k, ".pdf")))
boxplot(ncells ~ spe_pseudo$BayesSpace, data = colData(spe_pseudo))
dev.off()
## Extract the data
mat <- assays(spe_pseudo)$logcounts
# make mat_formula
# var_oi = paste0("bayesSpace_harmony_",k)
var_oi <- "BayesSpace"
covars <- c("region", "age", "sex")
mat_formula <- eval(str2expression(paste("~", "0", "+", var_oi, "+", paste(covars, collapse = " + "))))
# make sure everything is a factor
colData(spe_pseudo)[[var_oi]] <- as.factor(colData(spe_pseudo)[[var_oi]])
colData(spe_pseudo)$region <- as.factor(colData(spe_pseudo)$region)
colData(spe_pseudo)$age <- as.numeric(colData(spe_pseudo)$age)
colData(spe_pseudo)$sex <- as.factor(colData(spe_pseudo)$sex)
colData(spe_pseudo)$diagnosis <- as.factor(colData(spe_pseudo)$diagnosis)
colData(spe_pseudo)$subject <- as.factor(colData(spe_pseudo)$subject)
## Compute correlation
## Adapted from https://github.com/LieberInstitute/Visium_IF_AD/blob/7973fcebb7c4b17cc3e23be2c31ac324d1cc099b/code/10_spatial_registration/01_spatial_registration.R#L134-L150
mod <- model.matrix(mat_formula,
data = colData(spe_pseudo)
)
message(Sys.time(), " running duplicateCorrelation()")
corfit <- duplicateCorrelation(mat, mod,
block = spe_pseudo$sample_id
)
message("Detected correlation: ", corfit$consensus.correlation)
######### ENRICHMENT t-stats ######################
## Adapted from https://github.com/LieberInstitute/HumanPilot/blob/7049cd42925e00b187c0866f93409196dbcdd526/Analysis/Layer_Guesses/layer_specificity.R#L1423-L1443
# cluster_idx <- splitit(spe_pseudo$bayesSpace_harmony_9) #split by layers not path_grups
cluster_idx <- splitit(colData(spe_pseudo)[, var_oi])
message(Sys.time(), " running the enrichment model")
eb0_list <- lapply(cluster_idx, function(x) {
res <- rep(0, ncol(spe_pseudo))
res[x] <- 1
res_formula <- paste("~", "res", "+", paste(covars, collapse = " + "))
m <- with(
colData(spe_pseudo),
model.matrix(eval(str2expression(res_formula)))
)
eBayes(
lmFit(
mat,
design = m,
block = spe_pseudo$sample_id,
correlation = corfit$consensus.correlation
)
)
})
######### PAIRWISE t-stats ######################
## Adapted from https://github.com/LieberInstitute/HumanPilot/blob/7049cd42925e00b187c0866f93409196dbcdd526/Analysis/Layer_Guesses/layer_specificity.R#L1355-L1383
## Build a group model
mod_p <- model.matrix(~ 0 + BayesSpace,
data = colData(spe_pseudo)
)
# colnames(mod) <- gsub("bayesSpace_harmony_9", "", colnames(mod))
message(Sys.time(), " running the baseline pairwise model")
fit <-
lmFit(
mat,
design = mod_p,
block = spe_pseudo$sample_id,
correlation = corfit$consensus.correlation
)
eb <- eBayes(fit)
## Define the contrasts for each pathology group vs another one
message(Sys.time(), " run pairwise models")
# cluster_combs <- combn(colnames(mod)[grep("BayesSpace",colnames(mod))], 2)
cluster_combs <- combn(colnames(mod_p), 2)
cluster_constrats <- apply(cluster_combs, 2, function(x) {
z <- paste(x, collapse = "-")
makeContrasts(contrasts = z, levels = mod_p)
})
rownames(cluster_constrats) <- colnames(mod_p)
colnames(cluster_constrats) <-
apply(cluster_combs, 2, paste, collapse = "-")
# cluster_constrats <- cluster_constrats[grep("BayesSpace",rownames(cluster_constrats)),]
eb_contrasts <- eBayes(contrasts.fit(fit, cluster_constrats))
######### ANOVA t-stats ######################
## Adapted from https://github.com/LieberInstitute/HumanPilot/blob/7049cd42925e00b187c0866f93409196dbcdd526/Analysis/Layer_Guesses/layer_specificity_fstats.R#L24-L85
## From layer_specificity.R
fit_f_model <- function(sce) { # will want to do this with and without white matter, look at original code from link above
message(paste(Sys.time(), "starting the model run"))
## Extract the data
mat <- assays(sce)$logcounts
## For dropping un-used levels
# sce$bayesSpace_harmony_9 <- factor(sce$bayesSpace_harmony_9)
colData(sce)[[var_oi]] <- as.factor(colData(sce)[[var_oi]])
## Build a group model
# already made in beginning of script #remember to adjust for age or sex
## Takes like 2 min to run
corfit <-
duplicateCorrelation(mat, mod, block = sce$subject)
message(paste(Sys.time(), "correlation:", corfit$consensus.correlation))
fit <-
lmFit(
mat,
design = mod,
block = sce$subject,
correlation = corfit$consensus.correlation
)
eb <- eBayes(fit)
return(eb)
}
ebF_list <-
lapply(list("noWM" = spe_pseudo), fit_f_model)
## Extract F-statistics
f_stats <- do.call(cbind, lapply(names(ebF_list), function(i) {
x <- ebF_list[[i]]
y <- ncol(x$coefficients) - 4
top <-
topTable(
x,
coef = 2:y,
# coef = 2:ncol(x$coefficients), # CAREFUL make sure you pick columns from mod that are for your coefiicients of interest. will have 8
sort.by = "none",
number = length(x$F)
)
# identical(p.adjust(top$P.Value, 'fdr'), top$adj.P.Val)
res <- data.frame(
"f" = top$F,
"p_value" = top$P.Value,
"fdr" = top$adj.P.Val,
"AveExpr" = top$AveExpr,
stringsAsFactors = FALSE
)
colnames(res) <- paste0(i, "_", colnames(res))
return(res)
}))
f_stats$ensembl <- rownames(spe_pseudo)
f_stats$gene <- rowData(spe_pseudo)$gene_name
rownames(f_stats) <- NULL
head(f_stats)
save(
f_stats,
eb0_list,
eb_contrasts,
file = here::here("processed-data", "rdata", "spe", "07_layer_differential_expression", paste0("cluster_modeling_results_k", k, ".Rdata"))
)
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
|
e37611236f5f52296fc1d52f06418311763bd4c8 | 27d63cba653bba104d6632d045195c54b52226ad | /man/unlevered_beta.Rd | 3966d0260f63f92b25b60420713e2444c1158375 | [] | no_license | olaoritsland/finmod | e6cb3a5083a27cb6b53563f5c759fd6403e5416c | 1e09e682130b916709f01da6f2a529977fdb6ce5 | refs/heads/main | 2023-05-01T12:48:02.855987 | 2021-05-04T15:48:40 | 2021-05-04T17:30:54 | 350,452,438 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 425 | rd | unlevered_beta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unlevered_beta.R
\name{unlevered_beta}
\alias{unlevered_beta}
\title{Unlevered beta}
\usage{
unlevered_beta(
url = "http://www.stern.nyu.edu/~adamodar/pc/datasets/betas.xls",
industry,
time_period = "last",
...
)
}
\arguments{
\item{url}{}
\item{industry}{}
\item{time_period}{}
\item{...}{}
}
\value{
}
\description{
Unlevered beta
}
|
127e62e4375b1a5738f5b5a3009e7d43cf03eaca | 462dc2c286ffa1f61f04ee130ab83adb7be12bda | /man/bigsearch.Rd | c007d8a529e440b436b60c4f7c113d0b4c006627 | [
"MIT"
] | permissive | gaurav/rvertnet | 5b5c26b4247fb01989ff84d3d8afc08a031bbab9 | 0c65bb5c9dfe4c8f81242a8108d66ee1c89b0542 | refs/heads/master | 2021-01-23T04:40:23.252882 | 2017-01-27T20:38:22 | 2017-01-27T20:38:22 | 80,369,929 | 0 | 0 | null | 2017-01-29T20:20:12 | 2017-01-29T20:20:12 | null | UTF-8 | R | false | true | 4,450 | rd | bigsearch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigsearch.R
\name{bigsearch}
\alias{bigsearch}
\title{Request to download a large number of VertNet records.}
\usage{
bigsearch(specificepithet = NULL, genus = NULL, family = NULL,
order = NULL, class = NULL, compact = FALSE, year = NULL,
date = NULL, mappable = NULL, error = NULL, continent = NULL,
cntry = NULL, stateprovince = NULL, county = NULL, island = NULL,
igroup = NULL, inst = NULL, id = NULL, catalognumber = NULL,
collector = NULL, type = NULL, hastypestatus = NULL, media = NULL,
rank = NULL, tissue = NULL, resource = NULL, rfile, email,
verbose = TRUE, ...)
}
\arguments{
\item{specificepithet}{(character) Taxonomic specific epithet, e.g. (sapiens
in Homo sapiens)}
\item{genus}{(character) Taxonomic genus}
\item{family}{(character) Taxonomic family}
\item{order}{(character) Taxonomic order}
\item{class}{(character) Taxonomic class}
\item{compact}{Return a compact data frame (logical)}
\item{year}{Year (numeric) or range of years designated by comparison
operators "<", ">", "<=" or ">=". You can pass in more than one of these
queries, in a vector. See example below. (character)}
\item{date}{Event date associated with this occurrence record; yyyy-mm-dd
or the range yyyy-mm-dd/yyyy-mm-dd (character)}
\item{mappable}{Record includes valid coordinates in decimal latitude and
decimal longitude (logical)}
\item{error}{Coordinate uncertainty in meters (numeric) or range of
uncertainty values designated by comparison operators "<", ">", "<="
or ">=" (character)}
\item{continent}{Continent to search for occurrence (character)}
\item{cntry}{Country to search for occurrence (character)}
\item{stateprovince}{State or province to search for occurrence (character)}
\item{county}{County to search for occurrence (character)}
\item{island}{Island to search for occurrence (character)}
\item{igroup}{Island group to search for occurrence (character)}
\item{inst}{Code name for the provider/institution of record (character)}
\item{id}{Provider's unique identifier for this occurrence record (character)}
\item{catalognumber}{Provider's catalog number or other ID for this record
(character)}
\item{collector}{Collector name (character)}
\item{type}{Type of record; "specimen" or "observation" (character)}
\item{hastypestatus}{Specimen associated with this record is identified as a
holotype, paratype, neotype, etc. (character)}
\item{media}{Record also references associated media, such as a film or
video (logical)}
\item{rank}{TBD (numeric)}
\item{tissue}{Record is likely to reference tissues (logical)}
\item{resource}{Identifier for the resource/dataset from which the record was
indexed (character)}
\item{rfile}{A name for the results file that you will download (character). Required.}
\item{email}{An email address where you can be contacted when your records are
ready for download (character). Required.}
\item{verbose}{Print progress and information messages. Default: \code{TRUE}}
\item{...}{Curl arguments passed on to \code{\link[httr]{GET}}}
}
\value{
Prints messages on progress, but returns NULL
}
\description{
Specifies a termwise search (like \code{\link{searchbyterm}}) and requests that all available
records be made available for download as a tab-delimited text file.
}
\details{
\code{\link{bigsearch}} allows you to request records as a tab-delimited text file.
This is the best way to access a large number of records, such as when your search
results indicate that >1000 records are available. You will be notified by email
when your records are ready for download.
}
\section{Reading data}{
We suggest reading data in with \code{fread()} from the package \pkg{data.table} - as it's
very fast for the sometimes large datasets you will get from using this function,
and is usually robust to formatting issues.
}
\examples{
\dontrun{
# replace "big@search.luv" with your own email address
bigsearch(genus = "ochotona", rf = "pikaRecords", email = "big@search.luv")
# Pass in curl options for curl debugging
library("httr")
bigsearch(genus = "ochotona", rfile = "pikaRecords", email = "big@search.luv", config=verbose())
# Use more than one year query
bigsearch(class = "aves", year = c(">=1976", "<=1986"),
rfile = "test-bigsearch1", email = "big@search.luv")
}
}
\references{
\url{https://github.com/VertNet/webapp/wiki/The-API-search-function}
}
|
21d69bdbd659ca8ba542105ef452a8d296a6a826 | 29585dff702209dd446c0ab52ceea046c58e384e | /lessR/R/Histogram.R | 75075f0814a594a43bbb8cb6863f7f4fa9a962e0 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,343 | r | Histogram.R | Histogram <-
function(x=NULL, data=mydata, n.cat=getOption("n.cat"), Rmd=NULL,
color.fill=getOption("color.fill.bar"),
color.stroke=getOption("color.stroke.bar"),
color.bg=getOption("color.bg"),
color.grid=getOption("color.grid"),
color.box=getOption("color.box"),
color.reg="snow2", over.grid=FALSE,
cex.axis=0.75, color.axis="gray30",
rotate.values=0, offset=0.5,
breaks="Sturges", bin.start=NULL, bin.width=NULL, bin.end=NULL,
prop=FALSE, cumul=c("off", "on", "both"), hist.counts=FALSE,
digits.d=NULL, xlab=NULL, ylab=NULL, main=NULL, sub=NULL,
quiet=getOption("quiet"),
pdf.file=NULL, pdf.width=5, pdf.height=5,
fun.call=NULL, ...) {
if (is.null(fun.call)) fun.call <- match.call()
# limit actual argument to alternatives, perhaps abbreviated
cumul <- match.arg(cumul)
for (i in 1:length(color.fill))
if (color.fill[i] == "off") color.fill[i] <- "transparent"
for (i in 1:length(color.stroke))
if (color.stroke[i] == "off") color.stroke[i] <- "transparent"
if (color.bg == "off") color.bg <- "transparent"
if (color.grid == "off" ) color.grid <- "transparent"
if (color.box == "off") color.box <- "transparent"
if (!is.null(pdf.file))
if (!grepl(".pdf", pdf.file)) pdf.file <- paste(pdf.file, ".pdf", sep="")
kf <- FALSE
lbls <- FALSE
dots <- list(...) # check for deprecated parameters
if (length(dots) > 0) {
old.nm <- c("col.fill", "col.stroke", "col.bg", "col.grid", "col.box",
"col.reg", "col.axis")
for (i in 1:length(dots)) {
if (names(dots)[i] %in% old.nm) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"options that began with the abbreviation col now begin with ",
"color \n\n")
}
if (names(dots)[i] == "knitr.file") kf <- TRUE
if (names(dots)[i] == "labels") lbls <- TRUE
}
}
if (lbls) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"labels has multiple definitions in R\n",
"Instead use hist.counts to get the bar labels displayed\n\n")
}
if (kf) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"knitr.file no longer used\n",
"Instead use Rmd for R Markdown file\n\n")
}
if (is.numeric(breaks) && !is.null(bin.start)) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"Choose only one option to specify a start value.\n",
"Either choose the option breaks or the option bin.start.\n\n")
}
# get actual variable name before potential call of data$x
x.name <- deparse(substitute(x)) # could be a list of var names
options(xname = x.name)
df.name <- deparse(substitute(data))
options(dname = df.name)
pdf.nm <- FALSE
if (!missing(pdf.file)) pdf.nm <- TRUE
# -----------------------------------------------------------
# establish if a data frame, if not then identify variable(s)
if (!missing(x)) {
if (!exists(x.name, where=.GlobalEnv)) { # x not in global env, in df
.nodf(df.name) # check to see if data frame container exists
.xcheck(x.name, df.name, data) # var in df?, vars lists not checked
all.vars <- as.list(seq_along(data)) # even if only a single var
names(all.vars) <- names(data) # all data in data frame
x.col <- eval(substitute(x), envir=all.vars) # col num selected vars
if (!("list" %in% class(data))) {
data <- data[, x.col]
if (length(x.col) == 1) { # x is 1 var
if (!is.numeric(data)) {
cat("\n"); stop(call.=FALSE, "\n","------\n",
"A histogram is only computed from a numeric variable\n",
"For the frequencies of a categorical variable:\n\n",
" Plot(", x.name, ", topic=\"count\")\n",
"or\n",
" BarChart(", x.name, ")\n\n", sep="")
}
data <- data.frame(data)
names(data) <- x.name
}
}
else { # class of data is "list"
data <- data.frame(data[[x.col]])
names(data) <- x.name
}
}
else { # x is in the global environment (vector or data frame)
if (is.data.frame(x)) # x a data frame
data <- x
else { # x a vector in global
if (!is.function(x))
data <- data.frame(x) # x is 1 var
else
data <- data.frame(eval(substitute(data$x))) # x is 1 var
names(data) <- x.name
}
}
}
# ---------------
# do the analysis
go.pdf <- FALSE
if (pdf.nm || ncol(data) > 1) go.pdf <- TRUE
if (ncol(data) > 1) {
sug <- getOption("suggest")
options(suggest = FALSE)
}
for (i in 1:ncol(data)) {
nu <- length(unique(na.omit(data[,i])))
x.name <- names(data)[i]
options(xname = x.name)
if (is.numeric(data[,i])) {
# let 1 variable go through, even if num.cat
if (ncol(data) == 1 || !.is.num.cat(data[,i], n.cat)) {
pdf.fnm <- .pdfname("Hist", x.name, go.pdf, pdf.nm, pdf.file)
.opendev(pdf.fnm, pdf.width, pdf.height)
txss <- ""
if (!quiet) {
ssstuff <- .ss.numeric(data[,i], digits.d=digits.d, brief=TRUE)
txss <- ssstuff$tx
}
# nothing returned if quiet=TRUE
stuff <- .hst.main(data[,i], color.fill, color.stroke, color.bg,
color.grid, color.box, color.reg,
over.grid, cex.axis, color.axis, rotate.values, offset,
breaks, bin.start, bin.width,
bin.end, prop, hist.counts, cumul, xlab, ylab, main, sub,
quiet, fun.call=fun.call, ...)
txsug <- stuff$txsug
if (is.null(txsug)) txsug <- ""
txdst <- stuff$ttx
if (is.null(txdst)) txdst <- ""
txotl <- ""
if (!quiet) {
txotl <- .outliers(data[,i])
if (length(txotl)==0) txotl <- "No outliers"
}
if (ncol(data) > 1) { # for a variable range, print the text output
class(txss) <- "out_piece"
class(txdst) <- "out_piece"
class(txotl) <- "out_piece"
output <- list(out_ss=txss, out_freq=txdst, out_outliers=txotl)
class(output) <- "out_all"
print(output)
}
if (go.pdf) {
dev.off()
if (!quiet) .showfile(pdf.fnm, "Histogram")
}
} # nu > n.cat
else
.ncat("Histogram", x.name, nu, n.cat)
} # is.numeric(data[,i])
} # for
if (ncol(data) > 1) options(suggest = sug)
dev.set(which=2) # reset graphics window for standard R functions
if (ncol(data)==1) {
# R Markdown
txkfl <- ""
if (!is.null(Rmd)) {
if (!grepl(".Rmd", Rmd)) Rmd <- paste(Rmd, ".Rmd", sep="")
txknt <- .dist.Rmd(x.name, df.name, fun.call, digits.d)
cat(txknt, file=Rmd, sep="\n")
txkfl <- .showfile2(Rmd, "R Markdown instructions")
}
class(txsug) <- "out_piece"
class(txss) <- "out_piece"
class(txdst) <- "out_piece"
class(txotl) <- "out_piece"
class(txkfl) <- "out_piece"
output <- list(type="Histogram",
call=fun.call,
out_suggest=txsug, out_ss=txss, out_outliers=txotl, out_freq=txdst,
out_file=txkfl,
bin_width=stuff$bin.width, n_bins=stuff$n.bins,
breaks=stuff$breaks,
mids=stuff$mids, counts=stuff$counts, prop=stuff$prop,
counts_cumul=stuff$counts_cum, prop_cumul=stuff$prop_cum)
class(output) <- "out_all"
return(output)
}
}
|
88feb704e6fcef005fbe00f441a8be45a935288e | cc8df90cfee6af22b716353c1610fe9f4a96e59c | /Visualization.R | 92ed44169e10fbb1f16c9726721b195ac1401bb0 | [] | no_license | kevingeorge0123/Data-Analysis-using-R-Hypothosis_Regression | 8578ea79deb90af1570bd01deb2e70bb426651f2 | 3602ddb6472f54b651ad6ebea8a64e754cf2fbb6 | refs/heads/main | 2022-12-22T06:22:41.759882 | 2020-10-04T20:09:17 | 2020-10-04T20:09:17 | 301,217,342 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,428 | r | Visualization.R | library(tidyverse)
install.packages("ggplot2")
hdi<- read.csv("D:/ONLINE COURSES/Udemy/R/hdi-cpi.csv",stringsAsFactors =FALSE )
library(tibble)
library(ggplot2)
hdi<-as_tibble(hdi)
hdi
s<-ggplot(hdi,aes(CPI.2015,HDI.2015))
s
s+geom_point()
s+geom_point()+facet_grid(Region~.)
s+geom_point(aes(color=Region),size=3)+facet_grid(Region~.)+stat_smooth()
s+geom_point(aes(color=Region),size=3)+facet_grid(Region~.)+stat_smooth()+coord_cartesian(xlim = c(0.75,1))
s+geom_point(aes(color=Region),size=3)+stat_smooth()+theme_minimal()
hist<- ggplot(data=hdi,aes(x=CPI.2015))
hist +geom_histogram()
hist +geom_histogram(binwidth = 0.2,color="darkslategrey",fill="darkslategrey",alpha=0.5)+ggtitle("CPI")+labs(y="number",x="CPI")
###########
####
library(tidyverse)
install.packages("ggthemes")
library(ggthemes)
emp <- read.csv("D:/ONLINE COURSES/Udemy/R/employee-data1.csv", skip = 23, stringsAsFactors = FALSE)
emp <- as.tibble(emp)
emp$gender <- as.factor(emp$gender)
emp$title <- as.factor(emp$title)
emp
emp.a <- filter(emp, "salary" > 45000)
hist <- ggplot(emp.a, aes(x=salary))
hist + geom_histogram(binwidth = 5000, color = "darkslategray",
fill = "darkseagreen2", alpha = 0.7) +
labs(title = "Salary distribution in the employee data",
x = "Salary", y = "Number of employees in the salary bracket") +
theme_solarized_2(light = FALSE, base_size = 15, base_family = "serif")
####
library(tidyverse)
library(ggthemes)
bar <- ggplot(emp, aes(title, fill = gender))
bar + geom_bar() + theme_fivethirtyeight() + scale_fill_manual(values = c("chartreuse4", "darkorange")) +
labs(title = "Job Positions by Gender",
y = "Employee count",
x = "Job position")
# theme_fivethirtyeight() does not allow us to name the x- and y-axis; you can change it to one that works
# trying to pass the legend.position= argument into any available theme won't work; if you want to customise
# your theme beyong font type and size, you would need to create a theme for yourself with the theme() function;
# it takes an abundance of arguments allowing you to modify virtually every aspect of your visualisation
bar <- ggplot(emp, aes(gender, fill = title))
bar + geom_bar() + theme_fivethirtyeight() + scale_fill_manual(values = c("magenta", "darkorange", "midnightblue",
"springgreen4", "brown1", "gold")) +
labs(title = "Job Positions by Gender")
# The aes(x = gender, fill = title) mapping is a lot more difficult to read;
# the only thing it makes relatively easy for me to see is that the data is too symmetrially distributed, which
# suggests it has indeed been artifically generated (shocker!)
# look up scale_fill_manual, and scale_color_manual functions
# Can you set title and x and y axis names? Why? Try using a different theme. Can you do it now?
# What happens if you try to set the theme() argument legend.position = "right". Why do you think that is?
# Change the mappings so that gender is plotted and the bars are filled with position segmentation.
# Do you find this graph useful and easy to read?
# Perhaps the only thing it convinces us in is that the data has been simulated.
library(tidyverse)
library(ggthemes)
install.packages("wesanderson")
library(wesanderson)
emp <- read.csv("D:/ONLINE COURSES/Udemy/R/employee_data.csv", skip = 23, stringsAsFactors = FALSE)
emp <- as.tibble(emp)
emp$gender <- as.factor(emp$gender)
emp$title <- as.factor(emp$title)
emp.a <- filter(emp, "salary" > 45000)
boxx <- ggplot(emp, aes(x = title, y = salary))
my.bp <- boxx + geom_boxplot(outlier.color = "orangered1", outlier.shape = 3) +
geom_jitter(width = 0.3, aes(color = gender)) +
ggtitle("Salary distribution", subtitle = "based on position and gender") +
ylab("Salary") + xlab("Job position") +
theme_economist_white() +
theme(legend.position = "right", axis.text.x = element_text(angle = 90, hjust = 1)) +
coord_flip() # this can be added if the axis.text.x doesn't make sense to you; it's also easier to read
my.bp + scale_color_manual(values=wes_palette(name = "Darjeeling", n = 2))
# or
my.bp + scale_color_brewer(palette="Set1")
# the palette is part of the RColorBrewer package which you should already have on your
# machines because it comes with the tidyverse |
f3ce29c5c5636fdd4c3d8bf774f41363dd196eea | f02395164acf0d52fa73854d5efe7af5df165345 | /additional_analyses/MT_genomes/MT_genomes_new.R | 85684f4400da13984f1748e056ad42a8f201a17c | [] | no_license | shaman-narayanasamy/IMP_manuscript_analysis | 667e5da70c7216530e74aa9833dd19d8aba2abbd | 0b196ae1232f838a6827333b181627a575f777a6 | refs/heads/master | 2021-01-17T15:07:34.549780 | 2016-10-01T11:33:13 | 2016-10-01T11:33:13 | 69,734,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,615 | r | MT_genomes_new.R | #!/bin/R
require(xtable)
require(genomeIntervals)
require(ggplot2)
require(gtable)
require(beanplot)
require(reshape)
dat.1 <- read.table("/home/shaman/Work/Data/integrated-omics-pipeline/MS_analysis/MT_genomes/HF_ref/Escherichia_coli_P12b-stats.tsv", header=T, na.strings="-")
dat.1 <- cbind.data.frame(Dataset=c("HF1", "HF2", "HF3", "HF4", "HF5"), dat.1)
colnames(dat.1)[c(2,4)] <- c("Species", "IMP-megahit")
dat.2 <- read.table("/home/shaman/Work/Data/integrated-omics-pipeline/MS_analysis/MT_genomes/HF_ref/Collinsella_intestinalis_DSM_13280-stats.tsv", header=T, na.strings="-")
dat.2 <- cbind.data.frame(Dataset=c("HF1", "HF2", "HF3", "HF4", "HF5"), dat.2)
colnames(dat.2)[c(2,4)] <- c("Species", "IMP-megahit")
dat <- rbind.data.frame(dat.1, dat.2)
## Visualize HF1 data
HF1.dat <- dat[which(dat$Dataset=="HF1"), which(colnames(dat)%in%c("Dataset",
"Species",
"IMP", "IMP-megahit", "MetAmos_MGMT", "MOCAT_MGMT",
"IMP_MG", "MetAmos_MG", "MOCAT_MG",
"IMP_MT"))]
m.HF1.dat <- melt(HF1.dat)
colnames(m.HF1.dat)[3:4] <- c("Assembly","value")
pdf("/home/shaman/Documents/Publications/IMP-manuscript/figures/second_iteration/genome_recovery.pdf", width=5, height=2.5)
ggplot(m.HF1.dat, aes(x=Assembly, y=value, fill=Assembly)) +
geom_bar(stat="identity", position="dodge") +
ylab("% recovery") +
facet_grid(.~Species) +
theme_bw() +
theme(legend.position="bottom",
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
dev.off()
## Visualize all data for E coli
all.dat <- dat.1[, which(colnames(dat.1)%in%c("Dataset",
"IMP", "IMP-megahit", "MetAmos_MGMT", "MOCAT_MGMT",
"IMP_MG", "MetAmos_MG", "MOCAT_MG",
"IMP_MT"))]
m.dat <- melt(all.dat)
colnames(m.dat) <- c("Dataset", "Assembly","value")
png("/home/shaman/Documents/Publications/IMP-manuscript/figures/second_iteration/genome_recovery-Supp-v2.png",
width=7.5, height=7.5, units='in', res=200)
ggplot(m.dat, aes(x=Assembly, y=value, fill=Assembly)) +
geom_bar(stat="identity", position="dodge") +
facet_grid(Dataset~.) +
ylab("Genome fraction (%)") +
theme_bw() +
theme(legend.position="none",
axis.text.x=element_text(angle=45, hjust=1)
)
dev.off()
write.table(dat, "/home/shaman/Documents/Publications/IMP-manuscript/tables/second_iteration/genome_recovery.tsv",
row.names=F, sep="\t", quote=F)
#save.image("/home/shaman/Work/Data/integrated-omics-pipeline/MS_analysis/MT_genomes/HF_ref/MT_genomes_new.Rdat")
#load("/home/shaman/Work/Data/integrated-omics-pipeline/MS_analysis/MT_genomes/HF_ref/MT_genomes_new.Rdat")
|
acc400734aab594d26c14d8eb477e12da8b7f3ba | 3f858f84495ae252181b9a32ef4807634c8afc93 | /rabbitGUI_code/PlotROCCurve.R | fc77f85751d471691cb7d0ffff78b8dca6c60bc1 | [] | no_license | anabrandusa/rabbitGUI | a8cb73edea9fbc0856034cf8831969a7c879adaa | f9947bf0b67270c6fccc7930f5f11c47af99c12c | refs/heads/master | 2021-01-12T12:06:25.545735 | 2017-03-02T18:42:40 | 2017-03-02T18:42:40 | 72,299,526 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,415 | r | PlotROCCurve.R | plotROCCurve = function(locale.input, input.all.data.table, input.auc.headers, input.auc.means, model.source) {
#selected.model.number = select.auc.model(locale.input, model.source, input.auc.headers, input.auc.means)
selected.model.number = select.auc.model(locale.input, model.source)
#message("Using number: ", selected.model.number)
score.data = subset(input.all.data.table, Model == selected.model.number)[, c("Score", "TrueClass", "Direction")]
true.class.vector = as.numeric(as.factor(score.data[, "TrueClass"]))
score.vector = as.numeric(score.data[, "Score"])
roc.result = get.roc.result(auc.order.criterion, true.class.vector, score.vector, all(score.data[, "Direction"]) == 0)
#auc.performance <- roc.result$auc
#auc.performance = mean(subset(auc.table, Model == selected.model.number)[, "AUC"])
#auc.performance = subset(auc.means, Model == selected.model.number)[, "AUC"]
auc.performance = subset(input.auc.means, Model == selected.model.number)[, auc.order.criterion]
auc.performance.numeric = as.numeric(as.character(auc.performance))
auc.label = paste("ROC AUC:", toString(round(as.numeric(auc.performance.numeric), digits = 2)))
plot(roc.result, main = auc.label, cex.lab = cex.scale, cex.main = cex.scale, lwd = plot.line.width)
#plot(roc.result, height = 500, width = 500, main = auc.label, cex.lab = cex.scale, cex.main = cex.scale)
} |
959fbd69474dd5da96659b2c30e32a89eacb7a91 | a0c1151adda8ae89726602a35051fbc056cb3902 | /MLEmethod2.r | d88d0253075ca945428f34482259a47cd704fca2 | [] | no_license | Zerthimon21/Gaussian-Mixture-MALDI-TOF | 7f251e5529b416bfb1cf9be6a1522edff4ed9fae | 82eaee7c75bc24925a089f0cbfbcad0058833204 | refs/heads/master | 2022-11-11T16:05:58.727858 | 2020-06-26T20:52:30 | 2020-06-26T20:52:30 | 275,242,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,122 | r | MLEmethod2.r | rm(list = ls())
FilePath <- "C:/Documents and Settings/Zerthimon21/Desktop/mzxml"
setwd(FilePath)
source("IsotpoicFitLoad11.R")
#Generate a model with no baseline noise or Ar(1) error
PepMass=MassCalc('DRVYIHPF')
PepIsotope <- IsotopicCountFFT('DRVYIHPF') ##this must match peaks in figure
length(PepIsotope)
PepIsotope =PepIsotope/sum(PepIsotope)
sum(PepIsotope)
mzError = .09
Sigma = .1
Range = seq(PepMass-(2*1.008664916),PepMass+(9*1.008664916),.02)
#This acts as if already standardized
PD <- MultiNormCalc(Range,'DRVYIHPF',PepMass,Sigma,mzError)
SpectraAverage <- PD[PD[,1]>=(PepMass-1.008664916) & PD[,1]<=(PepMass+(length(PepIsotope)*1.008664916)),]
plot(PD[,1],PD[,2],type='l')
plot(SpectraAverage[,1],SpectraAverage[,2],type='l') ##No normalization
range(SpectraAverage[,1])
##SpectraAverage[,2]=SpectraAverage[,2]/sum(SpectraAverage[,2])
#DeltaHat/mzError Estimator
#calculate expected value of kappa sum(point distrobution * isotope expected mass) This is for (K-1)N
ExK = 0
for(i in 1:length(PepIsotope)){
ExK <- ExK + PepIsotope[i]*i
}
#Sum of h,labda,x, including sum of hi
DoubleSum=0
for (i in 1:length(SpectraAverage[,2])){
for(k in 1:length(PepIsotope)){
DoubleSum <- DoubleSum+as.numeric((SpectraAverage[i,2]/sum(SpectraAverage[,2]))*PepIsotope[k]*SpectraAverage[i,1])
}
}
#mzError Estimator, delta hat
mzErrorHat = DoubleSum - PepMass - ExK*1.008664916 + 1.008664916 ##
mzErrorHat
(mzErrorHat-mzError)/mzError
########################################################################################################
##One peak of simulation
SpectraAverage <- PD[PD[,1]>=(PepMass+.5*1.008664916) & PD[,1]<=(PepMass+1.5*1.008664916),]
SSigma = 0
for (i in 1:length(SpectraAverage[,2])){
bump <- SpectraAverage[i,2]*((SpectraAverage[i,1]-(PepMass+1.008664916+mzErrorHat))^2)
bump <- bump/sum(SpectraAverage[,2])
SSigma <- SSigma+bump
}
SSigma
Sigma=sqrt(SSigma)
Sigma
#Series of 7 peaks of simulation, note both of these miss *PepIsotope[k] in bump line
for(k in 1:length(PepIsotope)){
SpectraAverage <- PD[PD[,1]>=(PepMass+(k-1.5)*1.008664916) & PD[,1]<=(PepMass+(k-.5)*1.008664916),]
SSigma = 0
StdSum=sum(SpectraAverage[,2])
for (i in 1:length(SpectraAverage[,2])){
bump <- SpectraAverage[i,2]*((SpectraAverage[i,1]-(PepMass+((k-1)*1.008664916)+mzErrorHat))^2)
bump <- bump/StdSum
SSigma <- SSigma+bump
}
##print(sum(SpectraAverage[,2]))
SSigma
Sigma=sqrt(SSigma)
print(Sigma)
}
##########################################################################################################
#SigmaSquared/variance,standard deviation/resolution/SigmaHat Estimator
SSigma = 0
SpectraAverage <- PD[PD[,1]>=(PepMass-(.5*1.008664916)) & PD[,1]<=(PepMass+((length(PepIsotope)-.5)*1.008664916)),]
for (i in 1:length(SpectraAverage[,2])){
for(k in 1:length(PepIsotope)){
bump <- SpectraAverage[i,2]*PepIsotope[k]*((SpectraAverage[i,1]-(PepMass+((k-1)*1.008664916)+mzErrorHat))^2)
bump <- bump/sum(SpectraAverage[,2])
SSigma <- SSigma+bump
}}
SSigma/sum(SpectraAverage[,2])
sqrt(SSigma/sum(SpectraAverage[,2])) |
8751b2c2acb6800bef154a29f3fa5bf18ea3bd4e | 768a5e8713ed0751fdea1fc0512dc5e87c1c06b0 | /R/PotSolarInst.R | ee6dc99d58369e08c4f403a695b6945e92cdb0fb | [] | no_license | cran/EcoHydRology | c854757a7f70f91b3d33d6f7c5313752bf2819e3 | d152909c12e6bb0c1f16b987aa7f49737cdcf3d3 | refs/heads/master | 2020-05-16T21:01:18.881492 | 2018-09-24T11:52:33 | 2018-09-24T11:52:33 | 17,691,749 | 6 | 6 | null | 2018-08-29T19:54:05 | 2014-03-13T02:26:21 | R | UTF-8 | R | false | false | 1,022 | r | PotSolarInst.R | PotSolarInst <-
function(Jday, hour = 12, lat = 42.44*pi/180, sunrise = NULL, sunset = NULL, SolarNoon = mean(c(sunrise,sunset)), units = "Wm2", latUnits = "unknown"){
# lat[assumes rad, but can handle degrees if abs. value above 1.5]
#Jday[day of year]
#hour[hour of day, 0-24]
#Either sunrise and sunset times, or SolarNoon is needed [in hours, 0-24]
if ((abs(lat) > pi/2 & latUnits == "unknown") | latUnits == "degrees" ){
lat <- lat*pi/180
} else if (latUnits == "unknown"){
warning("in PotSolarInst call: Latitude assumed to be in radians, if using degrees, please set latUnits = 'degrees'")
}
SolarConstant <- 118000 # [kJ/m2/d]
AngVeloc <- 0.2618 #rad/hr = 15 deg/hr
DayAngle <- 2*pi*(Jday-1)/365
dec <- declination(Jday)
ZenithAngle <- acos(sin(lat)*sin(dec) + cos(lat)*cos(dec)*cos(AngVeloc*(hour-SolarNoon)))
if (units == "Wm2") convert <- 86.4 else convert <- 1
PotSol <- SolarConstant * cos(ZenithAngle) / convert
PotSol[which(PotSol < 0)] <- 0
return ( signif(PotSol, 3) )
}
|
a281878450c9914cdcc68e251e9b33e33434c2ad | 524b2f5ec8e4a59deda1cc55286b9f86ca451473 | /tests/testthat.R | f98dd5f9cb3cc9126595d0de23dcd1f10d83a306 | [
"MIT"
] | permissive | nschiett/fishflux | cf725405fda185e5bc3f252bdb7768af2464f3ba | 418bfd4a412a0bc269bc3b1df126f775edd308f6 | refs/heads/master | 2022-05-12T15:48:56.974176 | 2022-05-02T16:26:17 | 2022-05-02T16:26:17 | 148,555,709 | 7 | 4 | NOASSERTION | 2022-04-07T16:59:50 | 2018-09-12T23:48:09 | R | UTF-8 | R | false | false | 60 | r | testthat.R | library(testthat)
library(fishflux)
test_check("fishflux")
|
d8955ee6b59cb92c686fb223246d982f94a9cfe5 | 9ae449a15a7a15daef7635ebc81b02fb9002c912 | /complexheatmap_QAS/SVG/ComplexHeatmap.R | 3afe6dbdf8dab83cd0334bc9929d040e322cc2e1 | [] | no_license | TMCjp/QAS | f516e4ea8cf51aae86e64ea99ce787fb07924904 | 35640dda8f154941e9237e3889bd25d003b663c3 | refs/heads/master | 2021-01-13T13:59:07.230601 | 2017-01-19T06:44:20 | 2017-01-19T06:44:20 | 79,102,770 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,653 | r | ComplexHeatmap.R | #安装需要的R包
pkgs <- rownames(installed.packages())
#本程序所需包
packages_1=c('ComplexHeatmap','circlize','colorspace','GetoptLong','RSvgDevice','data.table','RMySQL')
#将要安装的R包
packages_2=setdiff(packages_1,pkgs)
#安装所需R包
if(length(packages_2)>0){
install.packages(packages_2, repos="http://cran.rstudio.com/")
source("http://bioconductor.org/biocLite.R")
biocLite(packages_2)
}
suppressWarnings(suppressMessages(library(ComplexHeatmap)))
suppressWarnings(suppressMessages(library(circlize)))
suppressWarnings(suppressMessages(library(colorspace)))
suppressWarnings(suppressMessages(library(GetoptLong)))
suppressWarnings(suppressMessages(library(RSvgDevice)))
argv <- commandArgs(TRUE);
library(data.table)
library(RMySQL)
all_gene <- fread(argv[1])
diff_gene <- fread(argv[2])
symbols <- diff_gene$symbol
samples <- colnames(all_gene)[2:ncol(all_gene)]
rm(all_gene,diff_gene)
#从后台直接加载表达值数据
selected_sample <- toString(samples)
selected_sample <- gsub(', ','|',selected_sample,fixed = TRUE)
#连接数据库
conn <- dbConnect(MySQL(),
dbname = "questionworkflow",
username="root", password="lihl@1123",
host="111.198.139.95",port=3306) #192.168.99.20
cancer <- argv[3]
selected_matrix <- dbGetQuery(conn, paste0("SELECT * FROM ",cancer,"_uncv2_mrnaseq_rsem ", " where sample_name REGEXP '",selected_sample,"'"))
dbDisconnect(conn)
rm(conn,cancer,selected_sample)
#数据处理阶段
genename <- fread("/home/ubuntu/galaxy/tools/prodigy_test/genenames/geneSort.csv",header = FALSE)
selected_value <- as.character(selected_matrix[,2])
sample_name <- as.character(selected_matrix[,1])
rm(selected_matrix)
value_list <- alist()
for(i in 1:length(selected_value)){
value_list[i] <- strsplit(selected_value[i],'#',fixed = TRUE)
}
rm(i)
selected_value <- as.matrix(as.data.frame(value_list))
rm(value_list)
rownames(selected_value) <- as.character(as.matrix(genename)[1,])
colnames(selected_value) <- sample_name
rm(genename,sample_name)
#转为数值型矩阵并去缺失
suppressWarnings(storage.mode(selected_value) <- "numeric")
selected_value <- na.omit(selected_value)
#symbol进行相应的匹配
math <- match(symbols,rownames(selected_value))
math <- na.omit(math)
selected_value <- selected_value[math,]
#设定各项参数(原complexheatmap代码)
cluster_rows = as.logical(argv[4])
row_hclust_side = argv[5]
row_names_side = argv[6]
cluster_columns = as.logical(argv[7])
column_names_side = argv[8]
column_hclust_side = argv[9]
km = as.numeric(argv[10])
outPath = "/var/www/heatmapoutput/"
numberRange = c(as.numeric(argv[11]),as.numeric(argv[12]),as.numeric(argv[13]))
colorRange = c(argv[14],argv[15],argv[16])
colorRange = gsub(pattern="__pd__", replacement="#", colorRange)
ha_mix_col_his_val = argv[17]
ha_mix_col_den_val = argv[18]
ha_mix_col_vio_val = argv[19]
ha_mix_col_heatmap_val = argv[20]
ha_mix_row_his_val = argv[21]
ha_mix_row_den_val = argv[22]
ha_mix_row_vio_val = argv[23]
ha_mix_row_heatmap_val = argv[24]
fileNamePrefix <- runif(1,0,1000000000)
fileNamePrefix <- round(fileNamePrefix)
#构建复杂热图
ff_1 <- hclust(dist(t(selected_value)))
selected_value <- selected_value[, ff_1$order]
kc <- kmeans(selected_value, km)
cla <- kc$cluster
clas <- unique(kc$cluster)
data <- NULL
for(i in 1:km) {
cs <- names(cla[cla==i])
#cs <- names(cla[cla==clas[i]])
protein_us <- selected_value[cs, ]
ff <- hclust(dist(protein_us))
write.csv(as.data.frame(protein_us[ff$order, ]), paste(outPath,fileNamePrefix,"inte_", i,".csv", sep="", collapse = ""))
}
ha_mix_col_his = anno_histogram(selected_value, which = "column")
ha_mix_col_den = anno_density(selected_value, type = "line", which = "column")
ha_mix_col_vio = anno_density(selected_value, type = "violin", which = "column")
ha_mix_col_heatmap = anno_density(selected_value, type = "heatmap", which = "column")
ha_mix_row_his = HeatmapAnnotation(histogram = anno_histogram(selected_value, which = "row"),which = "row",width = unit(2, "cm"))
ha_mix_row_den = HeatmapAnnotation(density_line = anno_density(selected_value, type = "line", which = "row"),which = "row",width = unit(2, "cm"))
ha_mix_row_vio = HeatmapAnnotation(violin = anno_density(selected_value, type = "violin", which = "row"),which = "row",width = unit(2, "cm"))
ha_mix_row_heatmap = HeatmapAnnotation(heatmap = anno_density(selected_value, type = "heatmap", which = "row"), which = "row",width = unit(2, "cm"))
col_anno <- c(ha_mix_col_his_val, ha_mix_col_den_val, ha_mix_col_vio_val, ha_mix_col_heatmap_val)
row_anno <- c(ha_mix_row_his_val, ha_mix_row_den_val, ha_mix_row_vio_val, ha_mix_row_heatmap_val)
col_anno <- col_anno[which(col_anno!="NULL")]
options(expressions=50000)
devSVG(paste(outPath,fileNamePrefix,"complex.SVG",sep=""))
if(length(col_anno)==0) {
Heatmap(selected_value, name = "foo") + get(row_anno[1]) + get(row_anno[2]) + get(row_anno[3]) +get(row_anno[4])
} else if(length(col_anno)==1) {
Heatmap(selected_value, name = "foo", cluster_rows =cluster_rows,row_dend_side=row_hclust_side,row_names_side=row_names_side,cluster_columns=cluster_columns,column_names_side=column_names_side,column_dend_side=column_hclust_side, split = data.frame(cla), combined_name_fun = NULL,col = colorRamp2(numberRange,colorRange),top_annotation = HeatmapAnnotation(col_anno_1=get(col_anno[1])), top_annotation_height = unit(2, "cm")) + get(row_anno[1]) + get(row_anno[2]) + get(row_anno[3]) +get(row_anno[4])
} else if(length(col_anno)==2) {
Heatmap(selected_value, name = "foo", cluster_rows =cluster_rows,row_dend_side=row_hclust_side,row_names_side=row_names_side,cluster_columns=cluster_columns,column_names_side=column_names_side,column_dend_side=column_hclust_side, split = data.frame(cla), combined_name_fun = NULL,col = colorRamp2(numberRange,colorRange),top_annotation = HeatmapAnnotation(col_anno_1=get(col_anno[1]), col_anno_2=get(col_anno[2])), top_annotation_height = unit(4, "cm")) + get(row_anno[1]) + get(row_anno[2]) + get(row_anno[3]) +get(row_anno[4])
} else if(length(col_anno)==3){
Heatmap(selected_value, name = "foo", cluster_rows =cluster_rows,row_dend_side=row_hclust_side,row_names_side=row_names_side,cluster_columns=cluster_columns,column_names_side=column_names_side,column_dend_side=column_hclust_side, split = data.frame(cla), combined_name_fun = NULL,col = colorRamp2(numberRange,colorRange),top_annotation = HeatmapAnnotation(col_anno_1=get(col_anno[1]), col_anno_2=get(col_anno[2]), col_anno_3=get(col_anno[3])), top_annotation_height = unit(6, "cm")) + get(row_anno[1]) + get(row_anno[2]) + get(row_anno[3]) +get(row_anno[4])
} else if(length(col_anno)==4){
Heatmap(selected_value, name = "foo", cluster_rows =cluster_rows,row_dend_side=row_hclust_side,row_names_side=row_names_side,cluster_columns=cluster_columns,column_names_side=column_names_side,column_dend_side=column_hclust_side, split = data.frame(cla), combined_name_fun = NULL,col = colorRamp2(numberRange,colorRange),top_annotation = HeatmapAnnotation(col_anno_1=get(col_anno[1]), col_anno_2=get(col_anno[2]), col_anno_3=get(col_anno[3]), col_anno_4=get(col_anno[4])), top_annotation_height = unit(8, "cm")) + get(row_anno[1]) + get(row_anno[2]) + get(row_anno[3]) +get(row_anno[4])
}
dev.off()
write.table(paste(outPath,fileNamePrefix,"complex.SVG",sep=""),argv[25],row.names=FALSE,col.names=FALSE)
|
f9061c18cacf49f0a00b586bfa3cefd067f904a6 | 066a5c53525c100c453e7369c9825a9ce582baa9 | /output/Efficient-Responsive-Single-delta.R | 0416c03ddd64faf533370fab594cf6bb7cff1823 | [
"MIT"
] | permissive | tong-wang/Efficient-Responsive | a44817925f345353f29c79616d903beaba3bf71a | 66d6efc1ce8150569bfd45ad5cdc71aa2306658f | refs/heads/master | 2016-09-05T18:02:11.061236 | 2014-05-17T07:30:50 | 2014-05-17T07:30:50 | 9,501,843 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,139 | r | Efficient-Responsive-Single-delta.R | #################
# R code for analyzing output and plot figures
# v1.0 (organized on 2013-04-17)
# for Figure 12 and 13 in the paper
#################
#NEED TO FIRST SET R WORKING DIRECTORY TO WHERE THE FILES ARE LOCATED!!!
setwd("/Users/buxx/Desktop/test/")
#read the output file
data <- read.table("Efficient-Responsive-Single-Nash-delta.txt", header=TRUE)
#prepare data for Figure 12 and 13
data.delta2 <- data[data$delta==1.2,]
if (nrow(data.delta2[data.delta2$TS12==20,]) >0 ) data.delta2[data.delta2$TS12==20,]$TS12 <- NA
data.delta5 <- data[data$delta==1.5,]
if (nrow(data.delta5[data.delta5 $NashE==20,]) >0 ) data.delta5[data.delta5 $NashE==20,]$NashE <- NA
if (nrow(data.delta5[data.delta5 $NashR==20,]) >0 ) data.delta5[data.delta5 $NashR==20,]$NashR <- NA
if (nrow(data.delta5[data.delta5 $TS12==20,]) >0 ) data.delta5[data.delta5 $TS12==20,]$TS12 <- NA
#plot figure 12 (delta=1.2)
pdf('Figure12-delta12.pdf', width = 12, height = 7)
par(oma=c(0,0,2,0))
par(mfrow=c(1,2))
xrange = c(0, 1)
yrange = c(0, 20)
plot(xrange, yrange, type="n", xlab="b", ylab=expression(paste("VOI (", sigma^2, ")")) , xaxt="n", yaxt="n")
lines(data.delta2 $b, data.delta2 $NashE, lty=1, lwd=3, col="red")
lines(data.delta2 $b, data.delta2 $NashR, lty=1, lwd=3, col="blue")
axis(side=1, at=seq(0,1,0.1), labels=seq(0,1,0.1))
axis(side=2, at=seq(0,20,2), labels=seq(0,20,2))
title(main="(a) Firms' Strategic Choices")
plot(xrange, yrange, type="n", xlab="b", ylab=expression(paste("VOI (", sigma^2, ")")) , xaxt="n", yaxt="n")
lines(data.delta2 $b, data.delta2 $TS01, lty=1, lwd=3, col="darkolivegreen")
lines(data.delta2 $b, data.delta2 $TS12, lty=1, lwd=3, col="darkolivegreen")
axis(side=1, at=seq(0,1,0.1), labels=seq(0,1,0.1))
axis(side=2, at=seq(0,20,2), labels=seq(0,20,2))
title(main="(b) Socially Efficient Strategies")
title(main=expression(paste("Figure 12. ", Delta, " = 1.2")), outer=T)
dev.off()
#plot figure 13 (delta=1.5)
pdf('Figure13-delta15.pdf', width = 12, height = 7)
par(oma=c(0,0,2,0))
par(mfrow=c(1,2))
xrange = c(0, 1)
yrange = c(0, 20)
plot(xrange, yrange, type="n", xlab="b", ylab=expression(paste("VOI (", sigma^2, ")")) , xaxt="n", yaxt="n")
lines(data.delta5 $b, data.delta5 $NashE, lty=1, lwd=3, col="red")
lines(data.delta5 $b, data.delta5 $NashR, lty=1, lwd=3, col="blue")
axis(side=1, at=seq(0,1,0.1), labels=seq(0,1,0.1))
axis(side=2, at=seq(0,20,2), labels=seq(0,20,2))
title(main="(a) Firms' Strategic Choices")
plot(xrange, yrange, type="n", xlab="b", ylab=expression(paste("VOI (", sigma^2, ")")) , xaxt="n", yaxt="n")
lines(data.delta5 $b, data.delta5 $TS01, lty=1, lwd=3, col="darkolivegreen")
lines(data.delta5 $b, data.delta5 $TS12, lty=1, lwd=3, col="darkolivegreen")
axis(side=1, at=seq(0,1,0.1), labels=seq(0,1,0.1))
axis(side=2, at=seq(0,20,2), labels=seq(0,20,2))
title(main="(b) Socially Efficient Strategies")
title(main=expression(paste("Figure 13. ", Delta, " = 1.5")), outer=T)
dev.off()
|
80a66cc723e8626d4f75025fe8c050d5c69ddfc7 | a90e67b9f5dfc4f935ac90a07a751244a33ca545 | /man/diagonalize.Rd | 11271ab546f0d7757d72127a99850ad62fedf6e5 | [
"MIT"
] | permissive | ccrandall07/mmtable2 | 0a64255dd3d887a5c6f349b06db06eabe684b35b | 7226a1cff99bb721f639a9969b4275559c3719b3 | refs/heads/master | 2023-04-19T21:01:32.535104 | 2021-05-02T06:41:00 | 2021-05-02T06:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 360 | rd | diagonalize.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagonalize.R
\name{diagonalize}
\alias{diagonalize}
\title{Conditionally apply a function}
\usage{
diagonalize(df, var_char)
}
\arguments{
\item{df}{a data frame}
\item{var_char}{the column to be diagonalized}
}
\value{
data frame
}
\description{
Conditionally apply a function
}
|
1c443092bc24db3ac41056a92cab47bb62f49496 | a4948192e6e43df1a62ee476f21176c93d0c9232 | /unfiled/indi2lineup_model.R | 0fea2e677a3a920102650dd40f463e10b7e8a080 | [] | no_license | vjl110/bball | 81c2f42fabb55bea7cbb3d6ff000e0eff35f5db9 | daec40c73779e4abdeb34f06b3d894dc35431b6f | refs/heads/master | 2021-01-21T18:00:29.804311 | 2015-06-20T17:29:39 | 2015-06-20T17:29:39 | 34,592,858 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,731 | r | indi2lineup_model.R | setwd("~/GitHub/bball")
indi <- read.csv("data/indi2lineup_data.csv", strip.white = T)
lnp <- read.csv("data/lineup_data.csv", strip.white = T)
lnp <- subset(lnp, Poss.O_lnp > 50)
lnp <- subset(lnp, Season > 2005)
lnp$Name1 <- as.character(lnp$Name1)
lnp$Name2 <- as.character(lnp$Name2)
lnp$Name3 <- as.character(lnp$Name3)
lnp$Name4 <- as.character(lnp$Name4)
lnp$Name5 <- as.character(lnp$Name5)
indi$Name <- as.character(indi$Name)
indi$Tm <- as.character(indi$Tm)
lnp$Tm <- as.character(lnp$Tm)
# Organize Indi dataset
# ADDDDD TIPS ONCE I GET INTERNONEONTZZZZ
indi$MID <- indi$JMP + indi$HOOK
indi$MIDA <- indi$JMPA + indi$HOOKA
indi$RIM <- indi$LAY + indi$DNK + indi$TIP
indi$RIMA <- indi$LAYA + indi$DNKA + indi$TIPA
indi$FG <- indi$X2P + indi$X3P
indi$FGA <- indi$X2PA + indi$X3PA
indi$FTR <- indi$FT/(indi$X2PA + indi$X3PA)
# convert basic stats to per100
for(i in c(10:23, 39:48, 50:55)){
indi[,i] <- (indi[,i]/indi$Poss_O)*100
}
getB <- read.csv("data/getbucks_off.csv", strip.white = T)
getB <- getB[-c(8:10)]
indi <- merge(indi, getB, by = c("Name", "Season"), all.x = T)
# PUlLING DATA FOR LINEUPS
for(i in 1:nrow(lnp)){ #
vals <- c(indi$O.eFG[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.eFG[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.eFG[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.eFG[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.eFG[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$O.eFG[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$O.TOV[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.TOV[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.TOV[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.TOV[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.TOV[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$O.TOV[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$O.REB[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.REB[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.REB[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.REB[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.REB[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$O.REB[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$O.FT[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.FT[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.FT[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.FT[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$O.FT[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$O.FT[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
att <- c(indi$FGA[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FGA[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FGA[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FGA[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FGA[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
efg <- c(indi$eFG.[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$eFG.[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$eFG.[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$eFG.[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$eFG.[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
if(length(att) == 5 & length(efg) == 5){
vals <- data.frame(att, efg)
vals <- vals[order(vals$att, decreasing = T), ]
lnp$ATT1[i] <- vals[1,1]
lnp$eFG1[i] <- vals[1,2]
lnp$ATT2[i] <- vals[2,1]
lnp$eFG2[i] <- vals[2,2]
lnp$ATT3[i] <- vals[3,1]
lnp$eFG3[i] <- vals[3,2]
lnp$ATT4[i] <- vals[4,1]
lnp$eFG4[i] <- vals[4,2]
lnp$ATT5[i] <- vals[5,1]
lnp$eFG5[i] <- vals[5,2]
}else{
}
}
for(i in 1:nrow(lnp)){ #
ast <- c(indi$AST.[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$AST.[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$AST.[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$AST.[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$AST.[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
tov <- c(indi$TOV.[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$TOV.[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$TOV.[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$TOV.[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$TOV.[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
if(length(ast) == 5 & length(tov) == 5){
vals <- data.frame(ast, tov)
vals <- vals[order(vals$ast, decreasing = T), ]
lnp$AST1[i] <- vals[1,1]
lnp$TOV1[i] <- vals[1,2]
lnp$AST2[i] <- vals[2,1]
lnp$TOV2[i] <- vals[2,2]
lnp$AST3[i] <- vals[3,1]
lnp$TOV3[i] <- vals[3,2]
lnp$AST4[i] <- vals[4,1]
lnp$TOV4[i] <- vals[4,2]
lnp$AST5[i] <- vals[5,1]
lnp$TOV5[i] <- vals[5,2]
}else{
}
}
for(i in 1:nrow(lnp)){ #
orb <- c(indi$ORB.[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ORB.[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ORB.[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ORB.[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ORB.[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
if(length(orb) == 5){
vals <- as.numeric(c(orb))
vals <- sort(vals, decreasing = T)
lnp$ORB1[i] <- vals[1]
lnp$ORB2[i] <- vals[2]
lnp$ORB3[i] <- vals[3]
lnp$ORB4[i] <- vals[4]
lnp$ORB5[i] <- vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
ft <- c(indi$FT[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FT[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FT[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FT[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$FT[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
if(length(orb) == 5){
vals <- as.numeric(c(ft))
vals <- sort(vals, decreasing = T)
lnp$FT1[i] <- vals[1]
lnp$FT2[i] <- vals[2]
lnp$FT3[i] <- vals[3]
lnp$FT4[i] <- vals[4]
lnp$FT5[i] <- vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$MIDA[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$MIDA[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$MIDA[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$MIDA[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$MIDA[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$MIDA[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$RIMA[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$RIMA[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$RIMA[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$RIMA[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$RIMA[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$RIMA[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$X3PA[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$X3PA[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$X3PA[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$X3PA[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$X3PA[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$X3PA[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
for(i in 1:nrow(lnp)){ #
vals <- c(indi$ASD[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season]*indi$FGA[lnp$Name1[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ASD[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season]*indi$FGA[lnp$Name2[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ASD[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season]*indi$FGA[lnp$Name3[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ASD[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season]*indi$FGA[lnp$Name4[i] == indi$Name & lnp$Season[i] == indi$Season], indi$ASD[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season]*indi$FGA[lnp$Name5[i] == indi$Name & lnp$Season[i] == indi$Season])
vals <- sort(vals, decreasing = T)
if(length(vals == 5)){
lnp$ASD[i] <- vals[1] + vals[2] + vals[3] + vals[4] + vals[5]
}else{
}
}
#######
m1 <- lm(scale(FFO.efg) ~ scale(eFG1) + scale(eFG2) + scale(eFG3) + scale(eFG4) + scale(eFG5) + scale(X3PA) + scale(MIDA) + scale(RIMA) + scale(AST) + scale(I(ATT1 + ATT2 + ATT3 + ATT4 + ATT5)), data = lnp, weights = MP)
summary(m1)
m1 <- lm(FFO.efg ~ eFG1 + eFG2 + eFG3 + eFG4 + eFG5 + X3PA + MIDA + RIMA + AST + I(ATT1 + ATT2 + ATT3 + ATT4 + ATT5), data = lnp, weights = MP)
summary(m1)
m1 <- lm(FFO.efg ~ O.eFG + eFG1 + eFG2 + eFG3 + eFG4 + eFG5 + I(ASD/(AST1 + AST2 + AST3 + AST4 +AST5)) + I(X3PA/(RIMA + MIDA)), data = lnp, weights = MP)
summary(m1)
m1 <- lm(FFO.reb ~ ORB1 + ORB2 + ORB3 + ORB4 + ORB5, data = lnp, weights = MP)
summary(m1)
m1 <- lm(FFD.reb ~ DRB1 + DRB2 + DRB3 + DRB4 + DRB5, data = lnp, weights = MP)
m1 <- lm(FFD.reb ~ I(DRB1 + DRB2 + DRB3 + DRB4 + DRB5) + DRB1 + DRB5, data = lnp, weights = MP)
summary(m1)
m1 <- lm(FFO.efg ~ eFG1 + eFG2 + eFG3 + eFG4 + eFG5 + X3PA + MIDA + RIMA + I(AST1 + AST2 + AST3 + AST4 + AST5) + I(ATT1 + ATT2 + ATT3 + ATT4 + ATT5), data = lnp, weights = MP)
m1 <- lm(FFO.tov ~ I(AST1 + AST2 + AST3 + AST4 + AST5) + I(TOV1 + TOV2 + TOV3 + TOV4 + TOV5) + X3PA + RIMA, data = lnp, weights = MP)
m1 <- lm(FFO.reb ~ ORB1 + ORB2 + ORB3 + ORB4 + ORB5 + X3PA + MIDA + RIMA, data = lnp, weights = MP)
m1 <- lm(FFO.ftr ~ I(FT1 + FT2 + FT3 + FT4 + FT5) + X3PA + MIDA + RIMA + I(AST1 + AST2 + AST3 + AST4 + AST5), data = lnp, weights = MP)
#########
oefg <- lm(scale(FFO.efg) ~ scale(O.eFG) + scale(eFG1) + scale(eFG2) + scale(eFG3) + scale(eFG4) + scale(eFG5) + scale(I(ASD/AST1)) + scale(I(X3PA/(X3PA + RIMA + MIDA))), data = lnp, weights = MP)
#summary(oefg)
#
otov <- lm(scale(FFO.tov) ~ scale(O.TOV) + scale(TOV5) + scale(I(TOV1 + TOV2 + TOV3 + TOV4)) + scale(I(AST1 + AST2 + AST3 + AST4 + AST5)) + scale(I(ATT1 + ATT2 + ATT3 + ATT4 + ATT5)), data = lnp, weights = MP)
#summary(otov)
#
oreb <- lm(scale(FFO.reb) ~ scale(O.REB) + scale(ORB1) + scale(ORB2) + scale(ORB3) + scale(ORB4) + scale(ORB5) + scale(X3PA), data = lnp, weights = MP)
#summary(oreb)
#
oftr <- lm(scale(FFO.ftr) ~ scale(O.FT) + scale(I(FT1 + FT2 + FT3 + FT4 + FT5)) + scale(I(ATT1 + ATT2 + ATT3 + ATT4 + ATT5)) + scale(X3PA) + scale(MIDA) + scale(RIMA), data = lnp, weights = MP)
#summary(oftr)
########
lnp$oefg <- round(as.numeric(predict(oefg, newdata = lnp)), 2)
lnp$otov <- round(as.numeric(predict(otov, newdata = lnp)), 2)
lnp$oreb <- round(as.numeric(predict(oreb, newdata = lnp)), 2)
lnp$oftr <- round(as.numeric(predict(oftr, newdata = lnp)), 2)
|
35bb0b7f7be05e6ff1624afe7928a1bdbbf4d0fc | 986d1737c5a1d8b1c20f3f6490949c1865487a83 | /tests/testthat/errors.R | a20f2d3a7e97cfee9ee0651edf1568f1c314abb3 | [] | no_license | jhollist/rio | 7bceacf8548f52452a90d7f708d784929a4b315e | f278e545752c78143216ac955c25b204559fbf0b | refs/heads/master | 2021-01-21T18:33:54.351956 | 2015-07-01T12:38:20 | 2015-07-01T12:38:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,227 | r | errors.R | context("Errors")
test_that("Function suggestions for unsupported import and export", {
expect_error(import("test.jpg"), "jpg format not supported. Consider using the `jpeg::readJPEG` function")
expect_error(export(data.frame(1), "test.jpg"), "jpg format not supported. Consider using the `jpeg::writeJPEG` function")
})
test_that("Error for unsupported file types", {
expect_error(import("test.mat"), "Unrecognized file format")
expect_error(export(data.frame(1), "test.mat"), "Unrecognized file format")
})
test_that("Error for mixed support file types", {
expect_error(import("test.gnumeric"), "gnumeric format not supported. Consider using the `gnumeric::read.gnumeric.sheet` function")
expect_error(export(data.frame(1), "test.gnumeric"), "Unrecognized file format")
expect_error(import("test.por"), "The system cannot find the file specified")
expect_error(export(data.frame(1), "test.por"), "Unrecognized file format")
})
test_that("Only export data.frame or matrix", {
expect_error(export(1, "test.csv"), "`x` is not a data.frame or matrix")
})
test_that("Column widths printed for fixed-width format", {
expect_message(export(data.frame(1), "test.txt", format = "fwf"))
})
|
d38153731865e1f91789f4ba00e3bd9577c425bc | c734ca3b2f2eda498461049be11c2c007e99d367 | /tools/Rscripts/Hidden_proteome_explained_by_TU.R | 346dbf6c7a5a4f393125a9f73b3f9091113df9ea | [] | no_license | nnalpas/Proteogenomics_reannotation | 95b7387f3c81dbe1c9c26409fc5118b2572c79cb | b054de55f29bd095001f37db09adfda8b155f2f2 | refs/heads/master | 2023-08-22T14:38:55.164844 | 2023-08-08T10:33:31 | 2023-08-08T10:33:31 | 111,789,188 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,397 | r | Hidden_proteome_explained_by_TU.R |
library(magrittr)
library(ggplot2)
my_plots <- list()
my_transcri_f <- "H:/data/Synechocystis_6frame/Kopf_2014_TU/Transcriptional_units_expr.txt"
my_transcri <- data.table::fread(
input = my_transcri_f, sep = "\t", quote = "", header = TRUE,
stringsAsFactors = FALSE)
my_fasta_f <- c(
"H:/data/Synechocystis_6frame/Genome/micro_proteins_Synechocystis_sp_PCC6803_20180419.fasta",
"H:/data/Synechocystis_6frame/Genome/Synechocystis_sp_PCC_6803_cds_aa.fasta"
)
my_fasta <- lapply(my_fasta_f, function(x) {
seqinr::read.fasta(file = x, seqtype = "AA", as.string = TRUE)
}) %>%
unlist(., recursive = FALSE)
complete_proteome <- data.frame(Proteins = names(my_fasta), Proteome = TRUE)
my_pg_files <- c(
SCyCode = "H:/data/Synechocystis_6frame/MQ_6frame_valid/combined/txt/proteinGroups.txt",
pxd_nolabel = "H:/data/Synechocystis_6frame/ORF_validation/pxd_nolabel/combined/txt/proteinGroups.txt",
pxd_005085 = "H:/data/Synechocystis_6frame/ORF_validation/pxd005085/combined/txt/proteinGroups.txt",
pxd_014662_1 = "H:/data/Synechocystis_6frame/ORF_validation/pxd014662_1/combined/txt/proteinGroups.txt",
pxd_014662_2 = "H:/data/Synechocystis_6frame/ORF_validation/pxd014662_2/combined/txt/proteinGroups.txt"
)
my_data <- lapply(my_pg_files, function(x) {
data.table::fread(
input = x, sep = "\t", quote = "", header = TRUE,
stringsAsFactors = FALSE, colClasses = "character",
na.strings = "NaN") %>%
tidyr::separate_rows(data = ., `Protein IDs`, sep = ";") %>%
dplyr::filter(., !grepl("^(REV__|CON__|chr|pca|pcb|psy)", `Protein IDs`))
#.[["Protein IDs"]] %>%
#grep("^(REV|CON|chr|pca|pcb|psy)__", ., invert = TRUE, value = TRUE) %>%
#unique(.)
}) %>%
plyr::ldply(., data.table::data.table, .id = "Processing") %>%
dplyr::select(
., Processing, `Protein IDs`, Intensity, `Intensity L`,
`Intensity M`, `Intensity H`) %>%
dplyr::rowwise(.) %>%
dplyr::mutate(., Intensity = dplyr::case_when(
!is.na(Intensity) ~ as.double(Intensity),
TRUE ~ sum(as.double(c(
`Intensity L`, `Intensity M`, `Intensity H`)), na.rm = TRUE)
)) %>%
dplyr::select(., Processing, Proteins = `Protein IDs`, Intensity) %>%
tidyr::separate(
data = ., col = Processing, into = c("Type"),
sep = "_", remove = FALSE, extra = "drop")
toplot <- my_data %>%
dplyr::group_by(., Processing) %>%
dplyr::summarise(., Count = dplyr::n_distinct(Proteins)) %>%
dplyr::ungroup(.)
my_cols <- c("#387eb8", "#404040", "#e21e25", "#fbaf3f", "#d1d2d4") %>%
set_names(toplot$Processing)
my_plots[["histogram_proc_identification"]] <- ggplot(
data = toplot,
mapping = aes(x = Processing, y = Count, fill = Processing, colour = Processing)) +
geom_bar(stat = "identity", position="dodge", alpha = 0.5) +
ggpubr::theme_pubr() +
scale_fill_manual(values = my_cols) +
scale_colour_manual(values = my_cols)
toplot <- my_data %>%
dplyr::select(., -Processing, -Intensity) %>%
unique(.) %>%
dplyr::mutate(., value = TRUE) %>%
tidyr::pivot_wider(data = ., names_from = Type, values_from = value) %>%
dplyr::left_join(x = complete_proteome, y = .) %>%
dplyr::mutate_all(~tidyr::replace_na(data = ., replace = FALSE))
my_plots[["venn_identification_processing"]] <- ggvenn::ggvenn(
data = toplot,
columns = c("Proteome", "SCyCode", "pxd"),
fill_color = c("#387eb8", "#d1d2d4", "#e21e25"))
never_identified <- list(
never = toplot %>%
dplyr::filter(Proteome == TRUE & SCyCode == FALSE & pxd == FALSE) %>%
.[["Proteins"]],
unique_SCyCode = toplot %>%
dplyr::filter(Proteome == TRUE & SCyCode == TRUE & pxd == FALSE) %>%
.[["Proteins"]],
unique_PXD = toplot %>%
dplyr::filter(Proteome == TRUE & SCyCode == FALSE & pxd == TRUE) %>%
.[["Proteins"]]
)
for (x in names(never_identified)) {
tmp <- my_fasta[never_identified[[x]]]
tmp_names <- lapply(tmp, function(y) {attr(x = y, which = "Annot")}) %>%
unlist(.) %>%
sub("^>", "", .)
seqinr::write.fasta(
sequences = tmp, names = tmp_names,
file.out = paste0("Never_identified_in_", x, ".fasta"),
open = "w", as.string = TRUE)
}
toplot <- my_data %>%
dplyr::filter(., Proteins %in% never_identified$unique_PXD) %>%
dplyr::select(., -Type, -Intensity) %>%
#unique(.) %>%
dplyr::mutate(., value = TRUE) %>%
tidyr::pivot_wider(data = ., names_from = Processing, values_from = value) %>%
#dplyr::left_join(x = complete_proteome, y = .) %>%
dplyr::mutate_all(~tidyr::replace_na(data = ., replace = FALSE))
my_plots[["venn_identification_PXD"]] <- ggvenn::ggvenn(
data = toplot,
columns = c("pxd_nolabel", "pxd_005085", "pxd_014662_1", "pxd_014662_2"),
fill_color = c("#387eb8", "#d1d2d4", "#e21e25", "#fbaf3f"))
my_data_format <- my_data %>%
dplyr::mutate(., Unique = dplyr::case_when(
Proteins %in% c(
never_identified$unique_SCyCode, never_identified$unique_PXD) ~ TRUE,
TRUE ~ FALSE
)) %>%
dplyr::group_by(., Processing) %>%
dplyr::arrange(., dplyr::desc(Intensity)) %>%
dplyr::mutate(., Rank = 1:dplyr::n()) %>%
dplyr::ungroup(.)
my_plots[["density_unique_identification"]] <- ggplot(data = my_data_format,
mapping = aes(x = Intensity, fill = Unique)) +
geom_density(alpha = 0.5) +
ggpubr::theme_pubr() +
scale_x_log10() +
facet_grid(rows = vars(Processing), scales = "free_y") +
scale_fill_manual(values = c(`FALSE` = "#d1d2d4", `TRUE` = "#387eb8"))
my_plots[["histogram_unique_identification"]] <- ggplot(
data = my_data_format,
mapping = aes(x = Intensity, fill = Unique)) +
geom_histogram(position="identity", alpha = 0.5, colour = "black") +
ggpubr::theme_pubr() +
scale_x_log10() +
facet_grid(rows = vars(Processing), scales = "free_y") +
scale_fill_manual(values = c(`FALSE` = "#d1d2d4", `TRUE` = "#387eb8"))
my_data_oa <- my_data_format %>%
dplyr::bind_rows(
.,
complete_proteome %>%
dplyr::mutate(
.,
Processing = "Proteome",
Type = "Proteome",
Unique = dplyr::case_when(
Proteins %in% never_identified$never ~ TRUE,
TRUE ~ FALSE
))) %>%
dplyr::filter(., Unique == TRUE) %>%
dplyr::select(., Processing, Type, Proteins) %>%
dplyr::mutate(., value = 1)
my_data_oa_proc <- my_data_oa %>%
dplyr::select(., -Type) %>%
tidyr::pivot_wider(data = ., names_from = Processing, values_from = value) %>%
dplyr::mutate_all(~tidyr::replace_na(data = ., replace = 0))
my_data_oa_type <- my_data_oa %>%
dplyr::select(., -Processing) %>%
unique(.) %>%
tidyr::pivot_wider(data = ., names_from = Type, values_from = value) %>%
dplyr::mutate_all(~tidyr::replace_na(data = ., replace = 0))
data.table::fwrite(
x = my_data_oa_proc, file = "Hidden_proteome_for_oa_by_processing.txt",
append = FALSE, quote = FALSE, sep = "\t",
row.names = FALSE, col.names = TRUE)
data.table::fwrite(
x = my_data_oa_type, file = "Hidden_proteome_for_oa_by_type.txt",
append = FALSE, quote = FALSE, sep = "\t",
row.names = FALSE, col.names = TRUE)
my_transcri_format <- my_transcri %>%
dplyr::mutate(
., Hidden = grepl(
paste0(never_identified$never, collapse = "|"), id)) %>%
dplyr::filter(., grepl(".+~", id))
data.table::fwrite(
x = my_transcri_format, file = "Hidden_proteome_found_by_TU.txt",
append = FALSE, quote = FALSE, sep = "\t",
row.names = FALSE, col.names = TRUE)
# sigB performed in Perseus
my_transcri_format_sig <- data.table::fread(
input = "H:/data/Synechocystis_6frame/2022-02-02_Hidden_proteome/Hidden_proteome_found_by_TU_sigB.txt",
sep = "\t", quote = "", header = TRUE,
stringsAsFactors = FALSE) %>%
dplyr::mutate(., Label = ifelse(
`Max/Avg B significant` == "+" & Hidden == TRUE, id, NA))
my_plots[["density_unique_TU_identification"]] <- ggplot(
data = my_transcri_format,
mapping = aes(x = Average, fill = Hidden)) +
geom_density(alpha = 0.5) +
ggpubr::theme_pubr() +
scale_x_log10() +
scale_fill_manual(values = c(`FALSE` = "#d1d2d4", `TRUE` = "#387eb8"))
my_plots[["histogram_unique_TU_identification"]] <- ggplot(
data = my_transcri_format,
mapping = aes(x = Average, fill = Hidden)) +
geom_histogram(position="identity", alpha = 0.5, colour = "black") +
ggpubr::theme_pubr() +
scale_x_log10() +
scale_fill_manual(values = c(`FALSE` = "#d1d2d4", `TRUE` = "#387eb8"))
toplot <- data.frame(
Proteins = never_identified$never) %>%
dplyr::rowwise(.) %>%
dplyr::mutate(
., TUid = dplyr::case_when(
!any(grepl(Proteins, my_transcri_format$id)) ~ "No TU",
any(grepl(paste0("^", Proteins, "~"), my_transcri_format$id)) ~ "Single gene in TU",
TRUE ~ "Multiple genes in TU"
))
toplot$TUid <- factor(
x = toplot$TUid, levels = c(
"No TU", "Single gene in TU", "Multiple genes in TU"),
ordered = TRUE)
my_cols <- c(
`Single gene in TU` = "#d1d2d4", `Multiple genes in TU` = "#e21e25",
`No TU` = "#387eb8")
my_plots[["histogram_proteins_per_TU"]] <- ggplot(
data = toplot,
mapping = aes(x = TUid, fill = TUid, colour = TUid)) +
geom_bar(stat = "count", position = "dodge", alpha = 0.9) +
geom_text(stat = "count", aes(label = ..count..), vjust = -0.3) +
ggpubr::theme_pubr() +
scale_fill_manual(values = my_cols) +
scale_colour_manual(values = my_cols)
my_plots[["volcano_TU_abundance"]] <- ggplot(
data = my_transcri_format_sig,
mapping = aes(
x = `Max/Avg`, y = Average,
fill = Hidden,
size = `Max/Avg B significant`, shape = `Max/Avg B significant`,
label = Label)) +
geom_point() +
ggrepel::geom_text_repel() +
ggpubr::theme_pubr() +
#scale_y_log10() +
scale_fill_manual(values = c(`FALSE` = "#d1d2d4", `TRUE` = "#387eb8")) +
scale_shape_manual(values = c(21, 22)) +
scale_size_manual(values = c(2, 3.5))
pdf("Hidden_proteome_explained_by_TU.pdf", 10, 10)
my_plots
dev.off()
save.image("Hidden_proteome_explained_by_TU.RData")
|
9bff7b149083e5a5a8de6a0a96598c9064663d5c | 99a695f64f28d9da2981e3d8447b5b6a287d69d0 | /Get_EPA_National_Emissions_Inventory_PM25_Data.R | bcf62b29a0af9d908ce8348f3c257c6444800cee | [] | no_license | JimCallahanOrlando/ExData_Plotting2 | 5ce37891809a7aa36ae076d18034449e01997871 | 50b4c3267665e8d1cd42fc95410fd9a855238592 | refs/heads/master | 2020-04-21T09:02:43.874118 | 2015-09-23T18:55:52 | 2015-09-23T18:55:52 | 41,051,321 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,248 | r | Get_EPA_National_Emissions_Inventory_PM25_Data.R | ### FILE: Get_EPA_Natoinal_Emissions_Inventory_Data.R
### PURPOSE: Download EPA Air Pollution Data Zip file
###
### LANGUAGE: R statistical programming language
### R version 3.2.2 (2015-08-14) -- "Fire Safety"
### Copyright (C) 2015 The R Foundation for Statistical Computing
### Platform: x86_64-w64-mingw32/x64 (64-bit)
###
### IDE: RStudio
### Version 0.98.1103 © 2009-2014 RStudio, Inc.
###
### PLATFORM: Microsoft Windows 7 Professional [Version 6.1.7601]
### MODIFIED: 08/09/2015 Changed to use /ExData_Plotting1/.data
### NOTE: This project has a 20 meg datafile that must be ignored by git/GitHub.
### NOTE: .data folder is intended to be both "hidden" and ignored by git/GitHub.
### NOTE: Creating an RStudio project will create ".gitignore".
### NOTE: MUST edit ".gitignore" to add /data and /.data directories.
### NOTE: This program has a KISS/brute force philosophy
### NOTE: though it is possible to read directly from .zip; we unzip to .txt.
### descr has the file.head function for dumping the first few lines of a file without interpretation
require(descr)
### If you comment out this specific setwd(); use the getwd() to note what directory you are in.
setwd("~/GitHub/ExData_Plotting2")
### Where am I? (in the directory tree -- useful for debugging file string)
getwd()
### What is the date? (useful for bibliography: retrieved URL on date)
dateDownloaded <- date()
### Get National Emissions Inventory Zip file
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url,'~\\GitHub\\ExData_Plotting2\\.data\\exdata%2Fdata%2FNEI_data.zip', mode="wb")
### Unzip the data file
unzip("~\\GitHub\\ExData_Plotting2\\.data\\exdata%2Fdata%2FNEI_data.zip",
overwrite = TRUE, exdir = "", unzip = "internal", setTimes = TRUE)
## dump the first few lines of file using file.head() from the descr package
## could have used readlines() for this, but easier to remember head and file.head .
# file.head("~\\GitHub\\ExData_Plotting1\\.data\\household_power_consumption.txt")
### Data file should be downloaded and unzipped; ready for next program read into R.
### End of: Get_EPA_Natoinal_Emissions_Inventory_Data.R |
b52c88b2d130a82e7649646eb34d3943be53ee66 | c9e0c41b6e838d5d91c81cd1800e513ec53cd5ab | /man/gtkToolbarGetDropIndex.Rd | 4ac83fc2077739dfb750f462ef0c728781fca77a | [] | no_license | cran/RGtk2.10 | 3eb71086e637163c34e372c7c742922b079209e3 | 75aacd92d4b2db7d0942a3a6bc62105163b35c5e | refs/heads/master | 2021-01-22T23:26:26.975959 | 2007-05-05T00:00:00 | 2007-05-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 820 | rd | gtkToolbarGetDropIndex.Rd | \alias{gtkToolbarGetDropIndex}
\name{gtkToolbarGetDropIndex}
\title{gtkToolbarGetDropIndex}
\description{Returns the position corresponding to the indicated point on
\code{toolbar}. This is useful when dragging items to the toolbar:
this function returns the position a new item should be
inserted.}
\usage{gtkToolbarGetDropIndex(object, x, y)}
\arguments{
\item{\code{object}}{[\code{\link{GtkToolbar}}] a \code{\link{GtkToolbar}}}
\item{\code{x}}{[integer] x coordinate of a point on the toolbar}
\item{\code{y}}{[integer] y coordinate of a point on the toolbar}
}
\details{\code{x} and \code{y} are in \code{toolbar} coordinates.
Since 2.4}
\value{[integer] The position corresponding to the point (\code{x}, \code{y}) on the toolbar.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
accd9da7f1dd07def6f33b09a62829924baea47a | bb62ca9fc6d9ced52229a2655802a40ad9c4f40a | /Archived/X - Prepare quantity variables (county).R | d6e70b30970a5e9665d6f908cddf54cd08034354 | [] | no_license | BrianAronson/Indiana-MAT-Opioid | 48c1ab64fb10684444e6ed1705b3175ac9a0056d | 5a74f0b7b19bcb900322c20087d92c104fcb7355 | refs/heads/master | 2020-12-05T02:52:41.496168 | 2020-01-05T23:42:54 | 2020-01-05T23:42:54 | 231,986,208 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,230 | r | X - Prepare quantity variables (county).R | #0) Prepare workspace
#a) load packages
library(docshop)
#b) find main directory; use AWS if working from EC2
# if(grepl("briarons",getwd())){
# main.dir<-"C:/Users/briarons/Desktop/Analysis - Data/Postdoc"
# AWS=F
# }
# if(grepl("bda13",getwd())){
# main.dir<-"C:/Users/bda13/Desktop/Analysis - Data/Postdoc"
# AWS=F
# }else{
main.dir<-"hidden"
AWS=T
# }
#c) name files and directories to be created
quantdir<-file.path(main.dir,"Pill quantities")
quantpath<-file.path(quantdir,"Pill quantities (county yearly).fst")
#d) create directory for files to save
if(!AWS){
dir.create(quantdir,showWarnings = F)
}else{
put_folder(quantdir)
}
#d) load data for identifying opioids and MAT
df.drugs_all = s3read_csv(file.path(
"hidden",
"additional_data",
"drug.tsv"))
df.drugs_all[,NDC:=create_leading_zeros(NDC,11)]
df.drugs_opioid = df.drugs_all[Class=='Opioid',]
df.drugs_MAT = s3read_csv(file.path(
"hidden",
"additional_data",
"MAT_drug.tsv"
))
df.drugs_MAT[,NDC:=create_leading_zeros(NDC,11)]
#1) Prepare objects for use in loop
#a) list to append looped datatables to
l.prescriptions_by_county<-list()
#b) v.dates/quarters to read
v.dates<-paste0(rep(2007:2018,each=4),c("q1","q2","q3","q4"),sep="")
v.dates<-v.dates[-((length(v.dates)-2):length(v.dates))]
#2) Start loop
for(i in 1:length(v.dates)){
#3) Iteratively input names of raw prescription data and merged geo data
v.raw_prescriptions_name <-
paste("hidden",
v.dates[i],
".fst",
sep = "")
v.merged_geo_name <-
paste(
"hidden",
v.dates[i],
".csv.gz",
sep = ""
)
#4) Load data, just grabbing necessary variables (I am assuming that "QUANTITY" refers to number of pills)
df.raw_prescriptions <-
s3read_any(v.raw_prescriptions_name,
columns = c("PATID", "NDC", "QUANTITY"))
df.merged_geo <-
s3read_any(v.merged_geo_name, select = c("PATID", "county"))
#5) Estimate pills sold
#a) merge datasets
df.prescriptions_geo<-merge(x=df.raw_prescriptions,y=df.merged_geo,by="PATID",all.x = T)
#b) identify whether drug is opioid or MAT
df.prescriptions_geo$opioid <-
df.prescriptions_geo$NDC %in% df.drugs_opioid$NDC
df.prescriptions_geo$MAT <-
df.prescriptions_geo$NDC %in% df.drugs_MAT$NDC
#c) count pills per county
df.prescriptions_by_county <-
df.prescriptions_geo[, .(
MAT_pill_total = sum(QUANTITY[MAT],na.rm=T),
opioid_pill_total = sum(QUANTITY[opioid],na.rm=T)
),
by = "county"]
#d) append to list
df.prescriptions_by_county$date<-v.dates[i]
l.prescriptions_by_county[[i]]<-df.prescriptions_by_county
print(i)
} #end of loop
#4) Save pill quantities
#a) bind list to data.table
df.quarterly_prescriptions_by_county<-rbindlist(l.prescriptions_by_county)
#b) aggregate by year (rather than by quarter, as is currently)
df.quarterly_prescriptions_by_county$date <-
substr(df.quarterly_prescriptions_by_county$date, 1, 4)
df.yearly_prescriptions_by_county <-
df.quarterly_prescriptions_by_county[, .(
MAT_pill_total = sum(MAT_pill_total),
opioid_pill_total = sum(opioid_pill_total)
),
by = c("county", "date")]
#c) save
if(!AWS){
write.fst(df.yearly_prescriptions_by_county,quantpath)
}else{
s3write_fst(df.yearly_prescriptions_by_county,quantpath)
}
|
92c3383e45fa81e66182f33d29a6d69e4e16d2ca | 33981f84bca1f1a75d920921310997d211d29de2 | /app/anova.R | 1002c43ca04a777495255ab3e084c85120a30dc5 | [] | no_license | gwerbin/statcomm-11a | 087e0296891699c2db3046073d5fb6ec399cdee3 | 68ea1d1b1e62fae0a7480e0644da5bf359175781 | refs/heads/master | 2020-05-18T02:46:46.059420 | 2014-11-18T16:39:44 | 2014-11-18T16:39:44 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,505 | r | anova.R | # Input ----
n_group <- 15
m_group <- 0
sd_group <- 1
n_rep <- 1000
alpha = 0.05
input_list <- list(
list(rng = rnorm, inv_F = qnorm, n = n_group, mean = m_group + 0.6, sd = sd_group),
list(rng = rnorm, inv_F = qnorm, n = n_group, mean = m_group, sd = sd_group),
list(rng = rnorm, inv_F = qnorm, n = n_group, mean = m_group, sd = sd_group),
list(rng = rnorm, inv_F = qnorm, n = n_group, mean = m_group, sd = sd_group)
#list(rng = rt, inv_F = qt, n = n_group, df = 2, ncp = 1)
)
K <- length(input_list)
group_names <- paste0("G", 1:K)
## Generate data ----
library(testthat)
library(dplyr)
library(reshape2)
library(colorspace)
sample_unif <- function(n_sample) seq(0 + 1/n_sample, 1 - 1/n_sample, 1/n_sample)
y_pretty <- lapply(input_list, function(item) {
do.call(item$inv_F, c(list(sample_unif(2000)), item[-(1:3)]))
})
N <- sum(vapply(input_list, `[[`, 0, 3))
group_sizes <- vapply(input_list, function(grp) grp$n, 0)
group_padding <- max(group_sizes) - group_sizes
between_df <- K - 1
within_df <- sum(group_sizes - 1)
y_rep <- vector("list", n_rep)
group_means <- matrix(0, n_rep, K)
grand_mean <- numeric(n_rep)
between_ms <- between_ss <- numeric(n_rep)
within_ms <- within_ss <- numeric(n_rep)
f <- numeric(n_rep)
progress <- txtProgressBar(max = n_rep, style = 3)
for(r in 1:n_rep) {
y_r <- lapply(input_list, function(item) do.call(item[[1]], item[-(1:2)]))
y_rep[[r]] <- y_r
names(y_r) <- group_names
grand_mean[r] <- mean(unlist(y_r))
for(k in 1:K) {
n_k <- input_list[[k]]$n
group_means[r, k] <- mean(y_r[[k]])
between_ss[r] <- n_k * (group_means[r, k] - grand_mean[r])^2 + between_ss[r]
within_ss[r] <- sum((y_r[[k]] - group_means[r, k])^2) + within_ss[r]
}
between_ms[r] <- between_ss[r] / between_df
within_ms[r] <- within_ss[r] / within_df
f[r] <- between_ms[r] / within_ms[r]
setTxtProgressBar(progress, r)
}
close(progress)
# plotting ----
plot_colors <- rainbow_hcl(K, start = 30, end = 300)
data_boxplot <- function() {
layout(matrix(c(1,1,1,2), nrow = 1))
## group boxplot
par(mar = c(3, 2, 1, 0) + .1)
boxplot(y_pretty, col = plot_colors, xaxt = "n", las = 1)
axis(1, at = 1:K, labels = group_names, tick = FALSE)
## overall boxplot
par(mar = c(3, 0, 1, 1) + .1)
boxplot(unlist(y_pretty), xaxt = "n", yaxt = "n")
axis(1, at = 1, labels = "Overall", tick = FALSE)
}
sampling_distribution_plot <- function(){
## sampling distributions
# par(mar = c(3, 1, 2.5, 1) + .1)
plot(density(grand_mean), lwd = 2, lty = "dashed", col = "darkgray",
xlab = "", ylab = "", main = "Sampling distribution", yaxt = "n",
xlim = c(min(group_means), max(group_means)))
for(k in 1:K) lines(density(group_means[, k]), lwd = 2, col = plot_colors[k])
}
mean_squares_plot <- function() {
between_den <- density(between_ms)
within_den <- density(within_ms)
## between MS
# par(mar = c(1, 0, 2, 1) + .1)
plot(between_den, lwd = 2, lty = "dashed",
main = "Mean Squares", xlab = "", ylab = "", yaxt = "n",
ylim = c(0, max(within_den$y)))
## within MS
# par(mar = c(1, 0, 1, 1) + .1)
lines(within_den, lwd = 2, lty = "dashed",
main = "", xlab = "", ylab = "", yaxt = "n", col = "red")
legend("topright", legend = c("Between", "Within"), lty = "dashed", col = c("black", "red"))
}
f_stat_plot <- function() {
h <- hist(f, breaks = 50, plot = FALSE)
xlim <- c(min(h$breaks), max(h$breaks))
x <- seq(xlim[1], xlim[2], 0.1)
x_den <- df(x, between_df, within_df)
ylim <- c(0, max(max(x_den), max(h$density)) + 0.1)
plot(h, freq = FALSE, ylim = ylim, main = "", xlab = "", ylab = "")
lines(x, x_den, xlim = xlim)
f_alpha <- qf(1 - alpha, between_df, within_df)
rect(0, xlim[1], f_alpha, ylim[2],
border = NA,
col = do.call("rgb", as.list(c(col2rgb("steelblue")/255, 1/4))))
rect(f_alpha, xlim[1], xlim[2], ylim[2],
border = NA,
col = do.call("rgb", as.list(c(col2rgb("wheat")/255, 1/4))))
legend("topright",
c(sprintf("theoretical coverage: %0.0f%%", pf(f_alpha, between_df, within_df) * 100),
sprintf("empirical coverage: %0.0f%%", ecdf(f)(f_alpha) * 100)),
bty = "n", inset = 5/100)
mtext(
side = 3,
text = "100 - coverage%\n=\n% chance of generating an F at least as large, purely by chance",
cex = 0.8
)
title(
main = "Empirical distribution of ANOVA F statistic",
line = 3
)
title(
sub = "Curve is theoretical F density",
line = 2
)
}
|
56821a0a0831b0d2406bb466037b4afa738c436a | 92e597e4ffc9b52cfb6b512734fb10c255543d26 | /R/commonNames.R | 87252568d9656657d55055c5ce62ecfd5e2c5d70 | [
"MIT"
] | permissive | KWB-R/kwb.utils | 3b978dba2a86a01d3c11fee1fbcb965dd15a710d | 0930eaeb9303cd9359892c1403226a73060eed5b | refs/heads/master | 2023-05-12T15:26:14.529039 | 2023-04-21T04:28:29 | 2023-04-21T04:28:29 | 60,531,844 | 9 | 1 | MIT | 2023-04-21T04:28:30 | 2016-06-06T13:52:43 | R | UTF-8 | R | false | false | 415 | r | commonNames.R | #' Common Names in two Objects with Names Attribute
#'
#' @param x object with names attribute, e.g. data.frame, named vector
#' @param y object with names attribute, e.g. data.frame, named vector
#' @return vector of names occurring in both \code{x} and \code{y}
#' @export
#' @examples
#' x <- data.frame(a = 1:2, b = 2:3)
#' y <- c(a = 1, c = 2)
commonNames <- function(x, y)
{
intersect(names(x), names(y))
}
|
005847e92d339ee7a958af68b8cfaac3c32a6085 | 301593bbe3b525a24d9adaf442454e2261f3c747 | /R/utilSolarTime.R | e5a68919b4cd6211c0b347abcf60c0685b097598 | [] | no_license | mt-climate-office/solartime | 9121e322dbd1054494aadd70b56f1e883ef413f1 | 485840b653a50de82d19c7fffaceb546b8741f08 | refs/heads/master | 2023-07-24T13:31:58.338865 | 2021-09-02T09:40:14 | 2021-09-02T09:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,168 | r | utilSolarTime.R | #' @export
computeSunPosition <- function(
### Calculate the position of the sun
timestamp ##<< POSIXct having a valid tzone attribute,
, latDeg ##<< Latitude in (decimal) degrees
, longDeg ##<< Longitude in (decimal) degrees
) {
if (is.null(attributes(timestamp)$tzone)) stop(
"Expected timestamp to have a timezone, but has none. "
, "Please assign the correct time zone,"
, " e.g. by structure(mytimestamp, tzone='UTC')"
, " and check that times are still correct.")
# express same time in different time zone for correct doy and hour
timestampLoc <- setLocalTimeZone(timestamp, longDeg)
doy = yday(timestampLoc) #as.POSIXlt(timestampLoc)$yday + 1 ##<<
## Data vector with day of year (DoY) starting at 1
## , same length as Hour or length 1
hour = getFractionalHours(timestampLoc) ##<<
## Data vector with time as fractional decimal hour of local time zone
hoursAheadOfUTC = getHoursAheadOfUTC(timestampLoc) ##<< Time zone (in hours)
##value<< as returned by \code{\link{computeSunPositionDoyHour}}
computeSunPositionDoyHour(doy, hour, latDeg, longDeg, hoursAheadOfUTC)
}
#' @export
setLocalTimeZone <- function(
### modify tzone attribute of timestamp to 'GMT+x' for local to given longitude
timestamp ##<< POSIXct
, longDeg ##<< Longitude in (decimal) degrees
) {
hourUTCDiff <- round(longDeg/15) # difference to UTC
signchar = if (hourUTCDiff > 0) "-" else "+" # tzone uses opposite sign
tzone_str = paste0("Etc/GMT", signchar, hourUTCDiff)
##value<< \code{timestamp} with modified tzone attribute. Its the same time
## point expressed in another time zone. E.g. "2019-04-04 00:00:00 UTC"
## becomes "2019-04-04 10:00:00 +10" for a longitude of +150 (Sydney, Australia)
structure(timestamp, tzone = tzone_str)
}
#' @export
getHoursAheadOfUTC <- function(
### get the time difference to UTC in hours
timestamp ##<< POSIXt vector
){
tUTC <- force_tz(timestamp, tzone = "UTC")
##value<< integer vector of how many hours noon of timestamp is ahead of
## noon in UTC
as.integer((as.numeric(tUTC) - as.numeric(timestamp))/3600)
}
#' @export
getFractionalHours <- function(
### get the time difference to previous midnight in fractional hours
timestamp ##<< POSIXt vector
){
##value<< numeric vector of fractional hours
(as.numeric(timestamp) -
as.numeric(floor_date(timestamp, unit = "days"))) / 3600
}
#' @export
computeSunriseHour <- function(
### Compute the hour of sunrise for given day and coordinates
timestamp ##<< POSIXt vector
, latDeg ##<< Latitude in (decimal) degrees
, longDeg=NA ##<< Longitude in (decimal) degrees
## (not required if solar time is sufficient)
, timeZone=getHoursAheadOfUTC(timestamp) ##<< Time zone (in hours) ahead
## of UTC (Central Europe is +1) (not required if solar time is sufficient)
, ... ##<< further arguments to \code{\link{computeSunriseHourDoy}}
){
doy <- as.POSIXlt(timestamp)$yday + 1L
##value<< result of \code{\link{computeSunriseHourDoy}}
computeSunriseHourDoy(doy, latDeg, longDeg, timeZone, ...)
}
#' @export
computeSunriseHourDoy <- function(
### Compute the hour of sunrise for given day and coordinates
doy ##<< integer vector with day of year [DoY, 1..366]
, latDeg ##<< Latitude in (decimal) degrees
, longDeg=NA ##<< Longitude in (decimal) degrees
## (not required if solar time is sufficient)
, timeZone=NA ##<< Time zone (in hours) ahead
## of UTC (Central Europe is +1) (not required if solar time is sufficient)
, isCorrectSolartime = TRUE ##<< sunrise hour is computed first for solar time
## (where noon is exactly at 12:00)
## If TRUE (default) then sunrise hour is converted to local winter time,
## based on timeZone and longitude.
){
if (isCorrectSolartime & any(!is.finite(c(longDeg, timeZone)))) stop(
"if isCorrectSolartime, one needs to provide finite longDeg and timeZone")
fracYearInRad <- 2 * pi * (doy - 1) / 365.24
solDeclRad <- (
(0.33281 - 22.984*cos(fracYearInRad)
- 0.34990*cos(2*fracYearInRad) - 0.13980*cos(3*fracYearInRad)
+ 3.7872*sin(fracYearInRad) + 0.03205*sin(2*fracYearInRad)
+ 0.07187*sin(3*fracYearInRad))/180*pi )
# compute time in radians
# solved equation for SolElevRad in computeSunPositionDoyHour for elevation == 0
# , i.e. sunrise
solTimeRad <- {
cosSolTimeRad <- pmax(-1, pmin(
+1,(sin(solDeclRad) * sin(latDeg/180*pi)) /
(cos(solDeclRad) * cos(latDeg/180*pi))
))
acos( cosSolTimeRad )
}
# # sunrise equation cos(solTimeRad) = -tan(latDeg) * tan(decl)
# sunriseRad <- {
# # https://www.quora.com/How-can-you-calculate-the-length-of-the-day-on-Earth-at-a-given-latitude-on-a-given-date-of-the-year
# cosSunriseRad <- -tan(latDeg/180*pi) * tan(solDeclRad)
# acos(cosSunriseRad)
# }
# sunsetHour <- 12 - sunriseRad/pi*12
# convert to hours
solTimeHour <- solTimeRad/pi*12
if (!isCorrectSolartime) return( solTimeHour )
hour <- solTimeHour - computeSolarToLocalTimeDifference(
longDeg = longDeg
, timeZone = timeZone, fracYearInRad = fracYearInRad)
##value<< numeric vector of length(doy) giving the time of sunrise
##in hours after midnight.
## Polar night is indicated by 12h, polar day by 0h.
hour
}
attr(computeSunriseHourDoy,"ex") <- function(){
today <-
as.POSIXlt(Sys.Date())$yday
(sunrise <- computeSunriseHourDoy(today, latDeg = 51, isCorrectSolartime = FALSE))
(sunrise <- computeSunriseHourDoy(today, latDeg = 51, longDeg = 11.586, timeZone = +1))
# elevation near zero
computeSunPositionDoyHour(160, sunrise, latDeg = 51, isCorrectSolartime = FALSE)
#
doy <- 1:366
plot( computeSunriseHourDoy(doy, latDeg = 51, isCorrectSolartime = FALSE) ~ doy )
# north pole: daylength 0 and 24 hours
plot( computeSunriseHourDoy( doy, latDeg = +80, isCorrectSolartime = FALSE) ~ doy )
plot( computeSunriseHourDoy( doy, latDeg = -80, isCorrectSolartime = FALSE) ~ doy )
}
#' @export
computeSunsetHour <- function(
### Compute the hour of sunrise for given day and coordinates
timestamp ##<< POSIXt vector
, latDeg ##<< Latitude in (decimal) degrees
, longDeg=NA ##<< Longitude in (decimal) degrees
## (not required if solar time is sufficient)
, timeZone=getHoursAheadOfUTC(timestamp) ##<< Time zone (in hours) ahead
## of UTC (Central Europe is +1) (not required if solar time is sufficient)
, ... ##<< further arguments to \code{\link{computeSunsetHourDoy}}
){
doy <- as.POSIXlt(timestamp)$yday + 1L
##value<< result of \code{\link{computeSunsetHourDoy}}
computeSunsetHourDoy(doy, latDeg, longDeg, timeZone, ...)
}
#' @export
computeSunsetHourDoy <- function(
### Compute the hour of sunrise for given day and coordinates
doy ##<< integer vector with day of year [DoY, 1..366]
, latDeg ##<< Latitude in (decimal) degrees
, longDeg=NA ##<< Longitude in (decimal) degrees
## (not required if solar time is sufficient)
, timeZone=NA ##<< Time zone (in hours) ahead
## of UTC (Central Europe is +1) (not required if solar time is sufficient)
, isCorrectSolartime = TRUE ##<< sunrise hour is computed first for solar time
## (where noon is exactly at 12:00)
## If TRUE (default) then sunrise hour is converted to local winter time,
## based on timeZone and longitude.
){
if (isCorrectSolartime & any(!is.finite(c(longDeg, timeZone)))) stop(
"if isCorrectSolartime, one needs to provide finite longDeg and timeZone")
# compute solar sunrise hour, that one is symmetric around noon
sunriseSolarHour <- computeSunriseHourDoy(
doy, latDeg = latDeg, isCorrectSolartime = FALSE)
sunsetSolarHour <- 24 - sunriseSolarHour
sunsetHour <- if (isCorrectSolartime) {
hourDiff <- computeSolarToLocalTimeDifference(
longDeg, timeZone, doy = doy)
sunsetSolarHour - hourDiff
} else sunsetSolarHour
##value<< numeric vector of length(doy) giving the time of sunset
##in hours after midnight.
## Polar night is indicated by 12h, polar day by 24h.
sunsetHour
}
attr(computeSunsetHourDoy,"ex") <- function(){
today <-
as.POSIXlt(Sys.Date())$yday
(sunset <- computeSunsetHourDoy(today, latDeg = 51, isCorrectSolartime = FALSE))
(sunset <- computeSunsetHourDoy(today, latDeg = 51, longDeg = 11.586, timeZone = +1))
#
doy <- 1:366
plot( computeSunsetHourDoy(doy, latDeg = 51, isCorrectSolartime = FALSE) ~ doy )
# north pole: daylength 0 and 24 hours
plot( computeSunsetHourDoy( doy, latDeg = +80, isCorrectSolartime = FALSE) ~ doy )
plot( computeSunsetHourDoy( doy, latDeg = -80, isCorrectSolartime = FALSE) ~ doy )
}
#' @export
computeSolarToLocalTimeDifference <- function(
### computes the time difference in hours between (apparent) solar time and local time
longDeg ##<< Longitude in (decimal) degrees
, timeZone ##<< Time zone (in hours) ahead of UTC (Berlin is +1)
, doy = NA ##<< integer vector with day of year [DoY, 1..366],
## Specify NA get mean solar time across the year instead of apparent solar
## time (i.e. with differences throughout the year due to eccentricity
## of earth orbit)
, fracYearInRad = 2 * pi * (doy - 1)/365.24 ##<< may specify instead
## of doy for efficiency.
){
# convert solar time to local winter time
# Equation of time in hours, accounting for changes in the time of solar noon
# to local time zone
eqTimeHour <- ifelse(is.na(fracYearInRad), 0,
(0.0072*cos(fracYearInRad) - 0.0528*cos(2*fracYearInRad)
- 0.0012*cos(3*fracYearInRad) - 0.1229*sin(fracYearInRad)
- 0.1565*sin(2*fracYearInRad) - 0.0041*sin(3*fracYearInRad)))
# Local time in hours
localTimeHour <- (longDeg/15 - timeZone)
##value<< time difference in hours to be added to local winter time
## to get solar time
localTimeHour + eqTimeHour
}
attr(computeSolarToLocalTimeDifference,"ex") <- function(){
# Jena: 50.927222, 11.586111
longDeg <- 11.586
doi <- 1:366
# due to longitude: west of timezone meridian: sun culminates later,
# solar time is less than local time
(localDiff <- computeSolarToLocalTimeDifference(longDeg, 1L)*60)
# taking into account shift during the year due to earth orbit eccentricity
plot( computeSolarToLocalTimeDifference(longDeg, 1L, doi)*60 ~ doi )
abline(h = localDiff)
}
#' @export
computeDayLength <- function(
### Compute the Day-length in hours for given time and coordinates
timestamp ##<< POSIXt vector
, latDeg ##<< Latitude in (decimal) degrees
, ... ##<< further arguments to \code{\link{computeDayLengthDoy}}
){
doy <- as.POSIXlt(timestamp)$yday + 1L
##value<< result of \code{\link{computeDayLengthDoy}}
computeDayLengthDoy(doy, latDeg, ...)
}
#' @export
computeDayLengthDoy <- function(
### Compute the Day-length in hours for given time and coordinates
doy ##<< integer vector with day of year [DoY, 1..366],
## same length as Hour or length 1
, latDeg ##<< Latitude in (decimal) degrees
){
solTimeHour <- computeSunriseHourDoy(
doy = doy, latDeg = latDeg, isCorrectSolartime = FALSE)
##value<< numeric vector of length(doy) giving the
## time between sunrise and sunset in hours
24 - 2*solTimeHour
}
attr(computeDayLengthDoy,"ex") <- function(){
doy <- 1:366
plot( computeDayLengthDoy(doy, latDeg = 51) ~ doy)
# north pole: daylength 0 and 24 hours
plot( computeDayLengthDoy( doy, latDeg = +80) ~ doy )
plot( computeDayLengthDoy( doy, latDeg = -80) ~ doy )
}
#' @export
getSolarTimeHour <- function(
### Get the fractional hour of solar time
timestamp ##<< POSIXt vector in local time
, longDeg ##<< Longitude in (decimal) degrees
){
doy = yday(timestamp) #as.POSIXlt(timestamp)$yday + 1 ##<<
## Data vector with day of year (DoY) starting at 1
## , same length as Hour or length 1
hour = getFractionalHours(timestamp) ##<<
## Data vector with time as fractional decimal hour of local time zone
timeZone = getHoursAheadOfUTC(timestamp) ##<< Time zone (in hours)
# Fractional year in radians
fracYearInRad <- 2 * pi * (doy - 1) / 365.24
##value<< fractional hour corrected by difference to local time
hour + computeSolarToLocalTimeDifference(
longDeg, timeZone, fracYearInRad = fracYearInRad)
}
#' @export
computeSunPositionDoyHour <- function(
### Compute the position of the sun (solar angle)
doy ##<< integer vector with day of year
## [DoY, 1..366], same length as Hour or length 1
, hour ##<< numeric vector with local winter time
## as decimal hour [0..24)
, latDeg ##<< Latitude in (decimal) degrees
, longDeg=NA ##<< Longitude in (decimal) degrees
, timeZone=NA ##<< Time zone (in hours) ahead of UTC
## (Central Europe is +1)
, isCorrectSolartime = TRUE ##<< by default corrects hour
## (given in local winter time) for latitude to solar time
## (where noon is exactly at 12:00). Set this to FALSE if times are
## specified already as solar times.
){
# adapted from REddyProc, credits to Antje Moffat
# and Alessandro Cescatti's C++ code
#
##details<<
## This code assumes that Hour is given in local winter time zone.
## By default, it corrects by longitude to solar time (where noon
## is exactly at 12:00).
## Set argument \code{isCorrectSolartime} to FALSE to use the given
## local winter time instead.
#
if (isCorrectSolartime & any(!is.finite(c(longDeg, timeZone)))) stop(
"if isCorrectSolartime, one needs to provide finite longDeg and timeZone")
# Fractional year in radians
fracYearInRad <- 2 * pi * (doy - 1) / 365.24
# Solar time, corrected for local time and equation of time
solarTimeHour <- if (!isCorrectSolartime ) hour else {
hour + computeSolarToLocalTimeDifference(
longDeg, timeZone, fracYearInRad = fracYearInRad)
}
# Conversion to radians
# with correction for solar time < -pi to positive, important
# for SolAzim_rad.V.n below
SolTimeRad <- {
SolTimeRad0 <- (solarTimeHour - 12) * pi / 12.0
ifelse(SolTimeRad0 < -pi, SolTimeRad0 + 2*pi, SolTimeRad0)
}
#Solar declination in radians, accounting for the earth axis tilt
SolDeclRad <- ((0.33281 - 22.984*cos(fracYearInRad)
- 0.34990*cos(2*fracYearInRad) - 0.13980*cos(3*fracYearInRad)
+ 3.7872*sin(fracYearInRad) + 0.03205*sin(2*fracYearInRad)
+ 0.07187*sin(3*fracYearInRad))/180*pi )
# Solar elevation (vertical, zenithal angle) in radians with zero for horizon
SolElevRad <- asin( sin(SolDeclRad) * sin(latDeg/180*pi)
+ cos(SolDeclRad) * cos(latDeg/180*pi) * cos(SolTimeRad))
# Solar azimuth (horizontal angle) with zero for North
SolAzimRad <- {
SolAzimCos <- ((cos(SolDeclRad) * cos(SolTimeRad)
- sin(SolElevRad) * cos(latDeg/180*pi) )
/ (sin(latDeg/180*pi) * cos(SolElevRad) ) )
# Correction if off edge values
SolAzimCos[SolAzimCos > +1] <- 1
SolAzimCos[SolAzimCos < -1] <- 1
# Conversion to radians
SolAzimRad0 <- acos(SolAzimCos)
# Determine if solar azimuth is East or West depending on solar time
ifelse(SolTimeRad < 0, pi - SolAzimRad0, pi + SolAzimRad0)
}
##value<< named numeric matrix with one row for each time with entries
ans <- cbind( # cbind creates matrix also if components are single values
hour = solarTimeHour ##<< Solar time in fractional hours after
## midnight, (or given hour if isCorrectSolartime = FALSE).
, declination = SolDeclRad ##<< Solar declination (rad)
, elevation = SolElevRad ##<< Solar elevation (rad)
## with 0 at horizon increasing towards zenith
, azimuth = SolAzimRad ##<< Solar azimuth (rad)
## with 0 at North increasing eastwards
)
ans
}
attr(computeSunPositionDoyHour,"ex") <- function(){
computeSunPositionDoyHour(
160, hour = 0:24, latDeg = 51, longDeg = 13.6, timeZone = 1L)
}
#' @export
computeIsDayByHour <- function(
### tell for each date, whether its daytime
date ##<< POSIXct vector
, sunriseHour = 7 ##<< sunrise as fractional hour (0..24)
## (vector of length date or length 1)
, sunsetHour = 18 ##<< sunset as fractional hour
## (vector of length date or length 1)
, duskOffset = 0 ##<< integer scalar: time in hours after dusk for
## which records are still regarded as day
){
# need to convert to numeric, otherwise minus may return in any unit
# get fractional hour of the day
hourOfDay <- (as.numeric(date) - as.numeric(trunc(date, units = "days")))/3600
isDay <- hourOfDay >= sunriseHour & hourOfDay <= (sunsetHour + duskOffset)
##value<< logical vector (length(date)): true if its daytime
isDay
}
#' @export
computeIsDayByLocation <- function(
### tell for each timestamp, whether its daytime
timestamp ##<< POSIXct vector
, latDeg ##<< Latitude in (decimal) degrees
, longDeg ##<< Longitude in (decimal) degrees
, timeZone = getHoursAheadOfUTC(timestamp) ##<< Time zone (in hours)
## ahead of UTC (Central Europe is +1)
, duskOffset = 0 ##<< integer scalar: time in hours after dusk for
## which records are still regarded as day
, isCorrectSolartime = TRUE ##<< set to FALSE to omit correction between
## local time and solar time, e.g. if coordinates cannot be provided
){
##details<< computes hour of sunrise and sunset from given date in timezone
## hour (assuming dates are given in timezone instead of solartime)
doy <- as.POSIXlt(timestamp)$yday + 1L
# correct for solar time only afterwards to get symmetric hours around noon
sunriseSolarHour <- computeSunriseHourDoy(
doy, latDeg = latDeg, isCorrectSolartime = FALSE)
#sunriseLocal <- computeSunriseHourDoy(
# doy, latDeg = latDeg, longDeg = longDeg, timeZone = timeZone)
sunsetSolarHour <- 24 - sunriseSolarHour
hourDiff <- if (!isCorrectSolartime) 0 else
computeSolarToLocalTimeDifference(longDeg, timeZone, doy = doy)
sunriseTimezoneHour <- sunriseSolarHour - hourDiff
sunsetTimezoneHour <- sunsetSolarHour - hourDiff
##value<< logical vector (length(date)): true if its daytime
computeIsDayByHour(
timestamp, sunriseHour = sunriseTimezoneHour,
sunsetHour = sunsetTimezoneHour, duskOffset = duskOffset )
}
attr(computeIsDayByLocation,"ex") <- function(){
dateSeq <- seq( as.POSIXct("2017-03-20", tz = "Etc/GMT-1")
,as.POSIXct("2017-03-21", tz = "Etc/GMT-1")
, by = "30 min")
tmp <- computeIsDayByLocation(
dateSeq, latDeg = 50.93, longDeg = 11.59, timeZone = 1)
plot( tmp ~ dateSeq )
yday <- as.POSIXlt(dateSeq[1])$yday + 1L
sunrise <- computeSunriseHourDoy(
yday, latDeg = 50.93, longDeg = 11.59, timeZone = 1)
sunset <- computeSunsetHourDoy(
yday, latDeg = 50.93, longDeg = 11.59, timeZone = 1)
abline( v = trunc(dateSeq[1], units = "days") + c(sunrise,sunset)*3600L )
}
|
f78c32a7936aca2c09176a7fc6482718b268b0b7 | 6852894908a932f71a040239b65c0a2e31634097 | /App/ui.R | fac4ef798f8c00e12fb0cc43e324739019125be5 | [] | no_license | honto-ming/Coursera_DevDataProds_Proj | 7adf6f8ce717600e67dbfa1ceda74bd637891ba3 | 26ec1a863a6def1369200906db1641750a09cd2f | refs/heads/master | 2021-01-01T19:09:59.369301 | 2015-09-15T04:36:57 | 2015-09-15T04:36:57 | 42,482,904 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,030 | r | ui.R | library(shiny)
instructions.sidebar.1 <- paste("This Shiny app will classify a patient as being AT RISK or NOT AT RISK of ",
"being diagnosed with Coronary Heart Disease (CHD) within the next 10 years.")
instructions.sidebar.2 <- paste("To obtain a prediction, provide the patient's characteristics through the",
"inputs on this sidebar and click the 'Predict' button.")
instructions.predict.1 <- paste("The figure below shows the predicted probability a patient with the",
"given characteristics will be diagnosed with CHD in the next 10 years.")
instructions.predict.2 <- paste("Based on the provided threshold, a statement below the figure will classify",
"the patient as AT RISK or NOT AT RISK of CHD.")
instructions.model.1 <- paste("The figure below shows the ROC curve for the logistic regression model",
"used for predictions. Numbers below shows the performance metrics for the model.")
instructions.model.2 <- paste("Adjust the threshold through the slide-bar on the side panel on the left",
"to see the impact to accuracy, sensitivity, and specificity.")
shinyUI(fluidPage(
titlePanel("Predicting Coranary Heart Disease"),
sidebarLayout(
sidebarPanel(
helpText(p(instructions.sidebar.1), p(instructions.sidebar.2)),
sliderInput("threshold",
label="Threshold:",
min=0.0,
max=1.0,
value=0.4,
step=0.1),
selectInput("male",
label="Sex:",
choices=c("male", "female"),
selected="male",
multiple=FALSE),
numericInput("age",
label="Age (integers only between 0-100):",
value=30,
min=0,
max=100,
step=1),
numericInput("cigsPerDay",
label="Cigarettes Per Day (integers only > 0):",
value=10,
min=0,
step=1),
selectInput("prevalentHyp",
label="Prevalent to Hyptertension?",
choices=c("True", "False"),
selected="Fals",
multiple=FALSE),
numericInput("sysBP",
label="Systolic Blood Pressure:",
value=120,
min=0),
numericInput("glucose",
label="Glucose levels (integers only > 0):",
value=80,
min=0,
step=1),
actionButton("predBtn", "Predict")
),
mainPanel(
tabsetPanel(
tabPanel("Prediction",
h3("Probability of Coronary Heart Disease in the Next 10 Years:"),
p(instructions.predict.1),
p(instructions.predict.2),
plotOutput("predPlot"),
p(textOutput("classByThreshold"))
),
tabPanel("Model Info",
h3("ROC Curve for Logistic Regression Model"),
p(instructions.model.1),
p(instructions.model.2),
plotOutput("rocPlot"),
p("Adjust Threshold slider input on the left Sidebar to obtain desired accuracy, sensitivity, and specificity"),
p(textOutput("accuracy")),
p(textOutput("sensitivity")),
p(textOutput("specificity")),
p(textOutput("auc"))
)
)
)
)
)) |
52c2f1e13b046f62379b2e89dca140888d524555 | ec16c798bf80bcbde5d0bb621554d3f2d906a974 | /man/probability.Rd | 1daa849de77ea6899f33bd0e398d2042007b2ce0 | [
"Apache-2.0"
] | permissive | mickash/SimpleGPs | 8dfec3353dd38ec259f0306657ba11d78ae4fc61 | 4c5c8adfa5b7cb20a73fd93998fdb7cbd4a07144 | refs/heads/master | 2020-09-09T11:45:45.032197 | 2019-11-13T12:33:47 | 2019-11-13T12:33:47 | 221,213,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 488 | rd | probability.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit.R
\name{probability}
\alias{probability}
\title{Find the probability of points}
\usage{
probability(model, X, Y, logprob = T)
}
\arguments{
\item{model}{The gp model}
\item{X}{The input data}
\item{Y}{The target data}
\item{logprob}{Whether the probabilities should be log}
}
\value{
A scalar giving the probability or log probability
}
\description{
Find the probability of points
}
\keyword{internal}
|
4f535347121618910853e032be206735ca001071 | cce0cf573debcd09e9b47f00df1e2f3363e08bb8 | /man/generic-c.Rd | ebc685d4429f9f231ab51809fbf224cfdb1dba86 | [] | no_license | cran/facebook.S4 | 5cee55e7ec5a211a2aafe4935c96f061e110f2d2 | 31422f8a89f9fd1be94b88dfa555ea9f19a4ffaa | refs/heads/master | 2020-06-26T07:19:31.595531 | 2017-07-12T11:49:30 | 2017-07-12T11:49:30 | 97,008,724 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 679 | rd | generic-c.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FacebookGenericCollection-methods.R
\docType{methods}
\name{c,FacebookGenericCollection-method}
\alias{c,FacebookGenericCollection-method}
\title{Combine two or more Facebook collections}
\usage{
\S4method{c}{FacebookGenericCollection}(x, ..., recursive = FALSE)
}
\arguments{
\item{x}{the first collection to chain into}
\item{...}{the other collections to chain from}
\item{recursive}{not used in this context}
}
\description{
This method combines two or more Facebook collections of the same kind. Please note that duplicates are removed unless they have
different parents.
}
|
bd502084b5808ad2b6e0d99db634e3daf6d68d4f | 52e73beccb12fa13be06237e850ad5973c9ba5ef | /Exploration_of_Data_Analytics.r | 9648c347f5ab5a079ef258996396e76d5b3c66c0 | [] | no_license | DarrenTsungjenWu/R-Learning | 9f66b36fed29c9b1943f355f0dae24490601cd56 | a990f7f86b10f15c0b6af360ed41412e575abd91 | refs/heads/master | 2023-02-06T14:43:39.491368 | 2020-12-27T12:05:35 | 2020-12-27T12:05:35 | 324,491,427 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,662 | r | Exploration_of_Data_Analytics.r | #Read loan data
#stringsAsFacctors=False, False should be capital
loan = read.csv("D:\\prosperLoanData.csv", stringsAsFactors=FALSE, na.strings=c(""))
#Check data
str(loan)
head(x, n) #where x is data and n = the number of data shown from begining
###See which is state is the borrower
#Use the bar to see the trend or details
gloan = ggplot(loan, aes(BorrowerState))+geom_bar()
#Fill the bar by color(not by other variables)
gloan = ggplot(loan, aes(BorrowerState))+
geom_bar(col=I("black"), fill=I("#FFCC22"))+
theme(axis.text.x=element_text(angle=90))
#angle means the angle of the text (here it refers to the text on x axis becasue of our setting)
#axis.text.x 為x軸刻度標籤
#There are so many parameter in fn. theme() that can implement our code
###See Income range of the borrower
#Show the distribution of variable IncomeRange by bar chart
gloan = ggplot(loan, aes(loan$IncomeRange))+
geom_bar(col=I("black"), fill=I("#FFCC22"))
#We wanna rearrange the sequence of data
loan$IncomeRange = ordered(loan$IncomeRange,
levels = c("Not displayed", "Not employed", "$0", "$1-24,999", "$25,000-49,999", "$50,000-74,999", "$75,000-99,999", "$100,000+"))
gloan = ggplot(loan, aes(loan$IncomeRange))+
geom_bar(col=I("black"), fill=I("#FFCC22"))
#Result: Can not seperate the data from income "$1-100,000".
#Need to debug
####See occupation
gloan = ggplot(loan, aes(loan$Occupation))+ #loan$occupation can be turned as "Occupation"
geom_bar()
#We can see all the texts on x axis are not clear in our code
#We need to made some adjustment
gloan = ggplot(loan, aes(loan$Occupation))+ #loan$occupation can be turned as "Occupation"
geom_bar(col = I("black"), fill = I("#FFCC22"))+
theme(axis.text.x = element_text(angle = 90))
#Still vague, so we do the further transforamtion based on theme()
#The perfect result
gloan = ggplot(loan, aes(loan$Occupation))+ #loan$occupation can be turned as "Occupation"
geom_bar(col = I("black"), fill = I("#FFCC22"))+
theme(axis.text.x = element_text(angle = 90, vjust=0.5, hjust=1))
#Another trial
#the value of vjust and hjust can be 0 but we do not try this
#1
gloan = ggplot(loan, aes(loan$Occupation))+ #loan$occupation can be turned as "Occupation"
geom_bar(col = I("black"), fill = I("#FFCC22"))+
theme(axis.text.x = element_text(angle = 90, vjust=1, hjust=1))
#2
gloan = ggplot(loan, aes(loan$Occupation))+ #loan$occupation can be turned as "Occupation"
geom_bar(col = I("black"), fill = I("#FFCC22"))+
theme(axis.text.x = element_text(angle = 90, vjust=1, hjust=0.5))
#3
gloan = ggplot(loan, aes(loan$Occupation))+ #loan$occupation can be turned as "Occupation"
geom_bar(col = I("black"), fill = I("#FFCC22"))+
theme(axis.text.x = element_text(angle = 90, vjust=0.5, hjust=0.5))
###Debt to Income ratio
gloan = ggplot(loan, aes(DebtToIncomeRatio))+
geom_histogram()
#The distribution is not clear at all. We zoom in and focus on the specific inteval of x axis
gloan = ggplot(loan, aes(DebtToIncomeRatio))+
geom_histogram()+
xlim(0,1)
#The result we get is much more better but the specific bars in the histogram are still vague
#Use color to make them clear
gloan = ggplot(loan, aes(DebtToIncomeRatio))+
geom_histogram(col=I("black"), fill=I("#FFCC22"))+
xlim(0,1)
###Build up the bar chart based on the variables we insterest. We create Bank Card Use as the measurement
#Define "Mild": The value lower than 0.25 quantile in variable of Bank Card Utilization
loan$BankCardUse[loan$BankCardUtilization<quantile(loan$BankcardUtilization, probs=0.25, "na.rm"=TRUE)] <- "Mild"
#Define "Medium": The 0.25 =< value < 0.5 in quantile in of Bank Card Utilization
loan$BankCardUse[loan$BankCardUtilization>=quantile(loan$BankcardUtilization, probs=0.25, "na.rm"=TRUE) &
(loan$BankCardUtilization<quantile(loan$BankCardUtilization, probs=0.5, na.rm=TRUE))] <- "Medium"
#Define "Heavy": The value >= 0.5 quantile in variable of Bank Card Utilization
loan$BankCardUse[loan$BankCardUtilization>=quantile(loan$BankcardUtilization, probs=0.5, "na.rm"=TRUE)] <- "Heavy"
#Define "Super": The value >= 1 quantile in variable of Bank Card Utilization
loan$BankCardUse[loan$BankCardUtilization>=quantile(loan$BankcardUtilization, probs=1, "na.rm"=TRUE)] <- "Super"
#Make the 4 variables in one
loan$BankCardUse = factor(loan$BankCardUse, levels = c("Mild", "Medium", "Heavy", "Super"))
#Draw he bar chart based on these 4 varaibles we defined
p = ggplot(subset(!is.na(loan$BankCardUse), aes(loan$BankCardUse)))+
geom_bar(color = I('black'),fill = I('#FFBB66'))
###Result: Something wrong happened in <- "xxx" and hence the result of bar chart can not be shown
###Preprocessing data by transforming chr. into date
loan$LoanOriginationDate = as.Date(loan$LoanOriginationDate)
loan$ListingCreationDate = as.Date(loan$ListingCreationDate)
loan$DateCreditPulled = as.Date(loan$DateCreditPulled)
#Measuring Loan Credit of customer by the median of upper and lower credit loan credit index
loan$CreditScore = (loan$CreditScoreRangeLower + loan$CreditScoreRangeUpper)/2
#Preprocessing
#将两个衡量信用等级的数据转换 >> From "chr" to "ord.factor" that is shown in order where NC < HR < E<...<AA
loan$CreditGrade = ordered(loan$CreditGrade, levels = c("NC","HR","E","D","C","B","A","AA"))
loan$ProsperRating..Alpha. = ordered(loan$ProsperRating..Alpha., levels = c("HR","E","D","C","B","A","AA"))
#Seperate data into 2 parts based on 2009-07-01
#Create a new variable Phase in loan
loan$Phase[loan$LoanOriginationDate > "2009-07-01"] <- "After 2009-07-01"
loan$Phase[loan$LoanOriginationDate < "2009-07-01"] <- "Before 2009-07-01"
#Check loan$Phase and loan$LoanOriginationDate
head(loan$LoanOriginationDate, n=50)
head(loan$Phase, n=50) #No actual date were shown but only shwon in two types: Before and After 2009-07-01
#对之前未在prosper的客户建立库(数据解释:0或NA是未使用过prosper的客户,反之是使用过的)
#Create a new variable Customer in loan
loan$Customer[loan$TotalProsperLoans > 0] <- "Previous Customer"
loan$Customer[loan$TotalProsperLoans == 0 ] <- "New Customer"
loan = replace_na(loan, replace = list(Customer = "New Customer"))
loan$Customer = factor(loan$Customer)
###客戶信用等級探索
#Create a credit score as variable measuring credit level
loan$CreditScore = (loan$CreditScoreRangeLower + loan$CreditScoreRangeUpper)/2
#Visualize the credit score by bar chart
p = ggplot(loan, aes(loan$CreditScore))+
geom_bar(col=I("black"), fill = I("#00AA55"))
#Focus on the specific interval to see it clearly
p = ggplot(loan, aes(loan$CreditScore))+
geom_bar(col=I("black"), fill = I("#00AA55"))+xlim(300,950)
#Add the label on the chart graph
p = ggplot(loan, aes(loan$CreditScore))+
geom_bar(col=I("black"), fill = I("#00AA55"))+xlim(300,950)
+ggtitle("Customer Credit Score", "Credit Score") #Title and Subtitle(Suggest to do so so that we can report it in clear way)
#Note: subset(): 從data frame中挑選符合某個條件的數據/列
#挑選loan資料中Loan Origination Date早於2009-07-01的數據,查看資料中這些人的credit score
q = ggplot(data = subset(loan, LoanOriginationDate < "2009-07-01"), aes(CreditGrade))+
#We may not use loan$LoanOriginationDate in fn. subset and loan$CreditGrade in aes of ggplot
geom_bar(col=I("black"), fill = I("orange"))+
scale_x_discrete(limits = c("NC","HR","E","D","C","B","A","AA"))
#Let the bar shown in order from NC to AA (instead of default: AA to NC )
+ggtitle("Consumer Credit Grade before 2009-07-01")
#挑選loan資料中Loan Origination Date晚於2009-07-01的數據,查看資料中這些人的credit socre
q2 = ggplot(data = subset(loan, LoanOriginationDate > "2009-07-01"), aes(ProsperRating..Alpha.))+
geom_bar(col = I("black"), fill = I("blue"))+
scale_x_discrete(limits = c("HR","E","D","C","B","A","AA"))
+ggtitle("Consumer Credit Grade after 2009-07-01")
#Show three graphs in one graph on device
grid.arrange(p, q, q2)
###Delinquencies
#Use density plot to demonstrate continuous variable
delin = ggplot(loan, aes(loan$DelinquenciesLast7Years))+
geom_density(col = I("black"), fill = I("yellow"))
#We can focus on the specific interval based on the way of setting quantile
delin = ggplot(loan, aes(loan$DelinquenciesLast7Years))+
geom_density(col = I("black"), fill = I("yellow"))+
scale_x_continuous(limits = c(-1, quantile(loan$DelinquenciesLast7Years, probs = 0.25, na.rm = TRUE)))
#Probs表第x百分位數的值,此函數表示呈現-1到資料中第25%的值的分布
delin = ggplot(loan, aes(loan$DelinquenciesLast7Years))+
geom_density(col = I("black"), fill = I("yellow"))+
scale_x_continuous(limits = c(-1, quantile(loan$DelinquenciesLast7Years, probs = 0.5, na.rm = TRUE)))
delin = ggplot(loan, aes(loan$DelinquenciesLast7Years))+
geom_density(col = I("black"), fill = I("yellow"))+
scale_x_continuous(limits = c(-1, quantile(loan$DelinquenciesLast7Years, probs = 0.75, na.rm = TRUE)))
delin = ggplot(loan, aes(loan$DelinquenciesLast7Years))+
geom_density(col = I("black"), fill = I("yellow"))+
scale_x_continuous(limits = c(-1, quantile(loan$DelinquenciesLast7Years, probs = 0.95, na.rm = TRUE)))
###Show the pattern of Borrower Rate and Term of loan
#Borrower Rate
b = ggplot(loan, aes(BorrowerRate))+
geom_histogram(col = I("black"), fill = I("blue"))
#Term
t = ggplot(loan, aes(Term))+
geom_bar(col = I("black"), fill = I("yellow"))
##0513 ch07 ppt 24
|
9c079b97bc63184beb241da1bc6923580c7966b0 | 79eb7c6958b247770731ee20a5d9be525d8f5ed0 | /exercises/practice/tournament/.meta/example.R | 7505465aa6081e0412782699f8e1b4be5d3d9731 | [
"CC-BY-SA-4.0",
"CC-BY-3.0",
"CC-BY-4.0",
"MIT"
] | permissive | exercism/r | 345781f314b8a66be047abd889238cba2630a20c | 566676cca76019e3e6a602f8f4d8086c54a51e1e | refs/heads/main | 2023-08-03T09:30:59.027153 | 2023-07-28T00:18:31 | 2023-07-28T00:18:31 | 24,401,761 | 22 | 37 | MIT | 2023-09-05T11:19:45 | 2014-09-24T05:22:10 | R | UTF-8 | R | false | false | 1,101 | r | example.R | tournament <- function(input) {
results <- data.frame(Team = "tempTeam", MP = 0, W = 0, D = 0, L = 0, P = 0)
for (item in input) {
match <- unlist(strsplit(item, ";"))
# Check that match result is valid
if (length(match) != 3 || !(match[3] %in% c("draw", "win", "loss"))) {
next
}
else {
# Process result
if (match[3] == "draw") {
results <- rbind(results,
c(match[1], 1, 0, 1, 0, 1),
c(match[2], 1, 0, 1, 0, 1))
} else if (match[3] == "win") {
results <- rbind(results,
c(match[1], 1, 1, 0, 0, 3),
c(match[2], 1, 0, 0, 1, 0))
} else {
results <- rbind(results,
c(match[2], 1, 1, 0, 0, 3),
c(match[1], 1, 0, 0, 1, 0))
}
}
}
results[, 2:6] <- sapply(results[, 2:6], as.numeric)
output <- aggregate(. ~ Team, results[-1, ], sum)
output <- output[order(-output$P, output$Team), ]
row.names(output) <- 1:nrow(output)
output
}
|
6b6c6423151d3a771d6a2438e8f1c1cbd9a5df37 | cd7a55841d48cc3393f449e3955bff13820369d5 | /R/w45_2020.R | 7a449fc45b70ff4bf898713e40c2c8e14f18a21d | [] | no_license | gejielin/-TidyTuesday | 9d03bec78ea0d49350278985bbab8c64d9c17ffe | aa4ceea316ad4e14342be9c0c92f149d19b89780 | refs/heads/master | 2023-02-28T02:31:36.885017 | 2021-02-04T15:13:14 | 2021-02-04T15:13:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,636 | r | w45_2020.R | library(tidyverse)
library(extrafont)
library(ggtext)
library(glue)
library(here)
ikea <- read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-11-03/ikea.csv')
ikea.clean <- ikea %>%
mutate(designer = strsplit(as.character(designer), "/")) %>%
unnest(designer) %>%
filter(!designer %in% "IKEA of Sweden") %>%
filter(!str_detect(designer, "\\d")) %>%
filter(!str_detect(designer, "cabinets")) %>%
filter(!str_detect(designer, "glass")) %>%
filter(!str_detect(designer, "handles")) %>%
mutate(
designer = case_when(
designer == "Andreas Fredriksson" ~ "A Fredriksson",
designer == "Ebba Strandmark" ~ "E Strandmark",
designer == "Anna Efverlund" ~ "A Efverlund",
designer == "Anna Palleschitz" ~ "A Palleschitz",
designer == "Annie Huldén" ~ "A Huldén",
designer == "Carina Bengs" ~ "C Bengs",
designer == "Carl Öjerstam" ~ "C Öjerstam",
designer == "Charlie Styrbjörn" ~ "C Styrbjörn",
designer == "Chenyi Ke" ~ "C Ke",
designer == "Chris Martin" ~ "C Martin",
designer == "David Wahl" ~ "D Wahl",
designer == "E Lilja Löwenhielm" ~ "E L Löwenhielm",
designer == "Ehlén Johansson" ~ "E Johansson",
designer == "Elizabet Gutierrez" ~ "E Gutierrez",
designer == "Eva Lilja Löwenhielm" ~ "E L Löwenhielm",
designer == "Eva Schildt" ~ "E Schildt",
designer == "Francis Cayouette" ~ "F Cayouette",
designer == "Fredriksson" ~ "A Fredriksson",
designer == "Gillis Lundgren" ~ "G Lundgren",
designer == "Gustav Carlberg" ~ "G Carlberg",
designer == "Henrik Preutz" ~ "H Preutz",
designer == "Johan Kroon" ~ "J Kroon",
designer == "Johanna Asshoff" ~ "J Asshoff",
designer == "Jomi Evers" ~ "J Evers",
designer == "Jon Karlsson" ~ "J Karlsson",
designer == "Johanna Jelinek" ~ "J Jelinek",
designer == "Jonas Hultqvist" ~ "J Hultqvist",
designer == "Jonas" ~ "J Hultqvist",
designer == "Jooyeon Lee" ~ "J Lee",
designer == "Karl Malmvall" ~ "K Malmvall",
designer == "Lars Norinder" ~ "La Norinder",
designer == "Lisa Hilland" ~ "L Hilland",
designer == "Hilland" ~ "L Hilland",
designer == "Lisa Norinder" ~ "Li Norinder",
designer == "Lisel Garsveden" ~ "L Garsveden",
designer == "Lycke von Schantz" ~ "L von Schantz",
designer == "Magnus Elebäck" ~ "M Elebäck",
designer == "Maja Ganszyniec" ~ "M Ganszyniec",
designer == "Malin Unnborn" ~ "M Unnborn",
designer == "Marcus Arvonen" ~ "M Arvonen",
designer == "Maria Vinka" ~ "M Vinka",
designer == "Mia Lagerman" ~ "M Lagerman",
designer == "Mikael Axelsson" ~ "M Axelsson",
designer == "Mikael Warnhammar" ~ "M Warnhammer",
designer == "Monika Mulder" ~ "M Mulder",
designer == "Nada Debs" ~ "N Debs",
designer == "Nicholai Wiig Hansen" ~ "N W Hansen",
designer == "Niels Gammelgaard" ~ "N Gammelgaard",
designer == "Nike Karlsson" ~ "N Karlsson",
designer == "Noboru Nakamura" ~ "N Nakamura",
designer == "Ola Wihlborg" ~ "O Wihlborg",
designer == "Olle Lundberg" ~ "O Lundberg",
designer == "Paulin Machado" ~ "P Machado",
designer == "Sarah Fager" ~ "S Fager",
designer == "Synnöve Mork" ~ "S Mork",
designer == "Thomas Sandell" ~ "T Sandell",
designer == "Tina Christensen" ~ "T Christensen",
designer == "Tom Dixon" ~ "T Dixon",
designer == "Tord Björklund" ~ "T Björklund",
designer == "Virgil Abloh" ~ "V Abloh",
designer == "Wiebke Braasch" ~ "W Braasch",
TRUE ~ designer)) %>%
mutate(
category = case_when(
category == "Bar furniture" ~ "Bar Furniture",
category == "Bookcases & shelving units" ~ "Bookcases & Shelving",
category == "Cabinets & cupboards" ~ "Cabinets & Cupboards",
category == "Café furniture" ~ "Café Furniture",
category == "Chests of drawers & drawer units" ~ "Drawers (Units/Chests)",
category == "Children's furniture" ~ "Furniture: Children",
category == "Nursery furniture" ~ "Furniture (Nursery)",
category == "Outdoor furniture" ~ "Furniture (Outdoor)",
category == "Room dividers" ~ "Room Dividers",
category == "Sideboards, buffets & console tables" ~ "Sideboards, Buffets & Console Tables",
category == "Sofas & armchairs" ~ "Sofas & Armchairs",
category == "Tables & desks" ~ "Tables & Desks",
category == "TV & media furniture" ~ "TV & Media Furniture",
TRUE ~ category)) %>%
count(designer, category) %>%
group_by(designer) %>%
mutate(sum = sum(n)) %>%
filter(sum >=10) %>%
group_by(category) %>%
mutate(unique = length(unique(designer))) %>%
mutate(
designer = glue("{designer} <span style='font-size:6pt;color:#FFDA1A;'> ({sum}) </span>")
)
ggplot(ikea.clean, aes(reorder(category, desc(unique)), reorder(designer, sum), size = n)) +
geom_point(colour = "#FFDA1A") +
labs(title = "THE IKEA FURNITURE CATÅLOGUE",
subtitle = "THE KEY DESIGNERS",
caption = "@CSHoggard | #TidyTuesday Week 45 | Source: Kaggle / IKEA",
size = "Number of Designs") +
scale_y_discrete(expand = c(0,0.8)) +
theme_minimal() +
theme(
plot.margin = margin(30,20,30,20),
plot.background = element_rect(colour = "#0051BA", fill = "#0051BA"),
plot.title = element_text(family = "Noto Sans", face = "bold", colour = "white", size = 28, hjust = 0.5, margin = margin(20,0,20,0)),
plot.title.position = "plot",
plot.subtitle = element_text(family = "Noto Sans", face = "bold", colour = "white", size = 18, hjust = 0.5, margin = margin(0,0,20,0)),
plot.caption = element_text(family = "Noto Sans", face = "bold", colour = "white", size = 11, hjust = 0.13, margin = margin(40,0,10,0)),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.x = element_text(family = "Noto Sans", face = "bold", colour = "white", size = 10, angle = 90, hjust = 1, vjust = 0.2),
axis.text.y = element_markdown(family = "Noto Sans", colour = "white", size = 8),
panel.grid.major = element_line(colour = "#0064EA"),
legend.position = c(0.4, -0.3),
legend.direction = "horizontal",
legend.text = element_text(family = "Noto Sans", colour = "white", size = 11),
legend.title = element_text(family = "Noto Sans", face = "bold", colour = "white", size = 12))
ggsave("images/Week_45_Ikea.png", plot = last_plot(), width = 210, height = 380, units = "mm", dpi = 400)
|
fe7acff15a4feaa427d737250cb4585e15fae5cd | 13c4547fe1b981a0e93fcec1fc5d117efd011585 | /Single_cell_RNA-seq/invitro_single_cell_RNAseq/pagoda_visualization_KEGG_Aging_Activation.R | 93312257654b08c80fec9dd453a03730586c777f | [] | no_license | dtcdtcdtcdtc/Mahmoudi_et_al_2018 | 5da000052c1b5108c74cafa9d96ce6e5dbbcd047 | abdba9e585aa9510e5677ecb2ff0f7ce7a50c1a1 | refs/heads/master | 2020-09-21T17:40:12.620290 | 2019-07-21T00:12:56 | 2019-07-21T00:12:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,778 | r | pagoda_visualization_KEGG_Aging_Activation.R | library(Biobase)
library(scde)
library(RColorBrewer)
setwd("~/Salah_Katja/SingleCells/Pagoda")
load("../Data/eset.RData")
## load pwpca:
load("Data/pathway.wPCA/pathway.wPCA_KEGG_Aging_Activation_varnorm_knn.error.models.RData")
## load clpca:
load("Data/gene.clusters/gene.clusters_subtract.aspect_varnorm_knn.error.models.RData")
## load varinfo.new:
load("Data/varnorm/subtract.aspect_varnorm_knn.error.models.RData")
## KEGG Pathways:
library(GSA)
filename <- "../Data/KEGG_2016_Aging_Activation.gmt"
gs <- GSA.read.gmt(filename)
## number of gene sets
n <- length(gs$geneset.names)
## create environment
env <- new.env(parent=globalenv())
invisible(lapply(1:n,function(i) {
genes <- as.character(unlist(gs$genesets[i]))
name <- as.character(gs$geneset.names[i])
assign(name, genes, envir = env)
}))
go.env <- env
class(go.env)
## De novo gene sets:
#load("Data/de.novo.gene.sets_gene.clusters_subtract.aspect_varnorm_knn.error.models.gmt")
## read in Broad gmt format
library(GSA)
filename <- "Data/de.novo.gene.sets_gene.clusters_subtract.aspect_varnorm_knn.error.models.gmt"
gs <- GSA.read.gmt(filename)
## number of gene sets
n <- length(gs$geneset.names)
## create environment
env <- new.env(parent=globalenv())
invisible(lapply(1:n,function(i) {
genes <- as.character(unlist(gs$genesets[i]))
name <- as.character(gs$geneset.names[i])
assign(name, genes, envir = env)
}))
go.env.de.novo <- env
class(go.env.de.novo)
### Visualize significant aspects of heterogeneity:
## Evaulate statistical significance of the observed overdispersion for each gene set (also de novo gene sets):
pdf("Results/top.aspects_gene.clusters_pathway.wPCA_KEGG_Aging_Activation_subtract.aspect_varnorm_knn.error.models.pdf")
tam <- pagoda.top.aspects(pwpca,
clpca,
return.table = TRUE,
return.genes = TRUE, ## whether set of genes driving significant aspects should be returned
plot = TRUE,
z.score = 1.96)
dev.off()
head(tam)
## Determine overall cell clustering (hclust) based on weighted correlation of genes underlying the top aspects transcriptional heterogeneity.
## For some reason, return.table and return.genes have to be FALSE in order to get the list with all results for clustering:
tam <- pagoda.top.aspects(pwpca,
clpca,
return.table = FALSE,
return.genes = FALSE,
plot = FALSE,
z.score = 1.96)
save(tam,
file = "Data/top.aspects_gene.clusters_pathway.wPCA_KEGG_Aging_Activation_subtract.aspect_varnorm_knn.error.models.RData")
hc <- pagoda.cluster.cells(tam,
varinfo = varinfo.new,
include.aspects = TRUE,
verbose = 1,
return.details = TRUE) ## Whether to return also a the distance matrix and gene values
## clustering cells based on 405 genes and 97 aspect patterns
save(hc,
file = "Data/cluster.cells_return.details_top.aspects_gene.clusters_pathway.wPCA_KEGG_Aging_Activation_subtract.aspect_varnorm_knn.error.models.RData")
## I need to run it again with return.details = FALSE in order to pass it to the plot functions:
rm(hc)
hc <- pagoda.cluster.cells(tam,
include.aspects = TRUE,
varinfo = varinfo.new)
save(hc,
file = "Data/cluster.cells_top.aspects_gene.clusters_pathway.wPCA_KEGG_Aging_Activation_subtract.aspect_varnorm_knn.error.models.RData")
n.clusters <- 5
col.pal <- brewer.pal(n.clusters, "Paired")
col.cols <- rbind(#groups = col.pal[cutree(hc, n.clusters)],
pData(eset)[colnames(tam$xv), "col.mouse.ID"],
pData(eset)[colnames(tam$xv), "col.age"])
dir.create("Results/KEGG_Aging_Activation")
## Reduce redundant aspects part 1:
## Combine pathways/aspects that are driven by the same sets of genes.
## Examines PC loading vectors underlying the identified aspects and clusters based on a product of loading and score correlation.
## Clusters of aspects driven by the same genes are determined based on the distance.threshold and collapsed.
pdf("Results/KEGG_Aging_Activation/reduce.loading.redundancy_top.aspects_gene.clusters_pathway.wPCA_KEGG_Aging_Activation_subtract.aspect_varnorm_knn.error.models.pdf",
width = 10, height = 13)
tamr <- pagoda.reduce.loading.redundancy(tam,
pwpca,
clpca,
cell.clustering = hc,
distance.threshold = 0.01, ## similarity threshold for grouping interdependent aspects, default: 0.01
abs = TRUE, ## Whether to use absolute correlation.
plot = TRUE,
col.cols = col.cols,
cols = colorRampPalette(c("cornflowerblue", "white", "palevioletred4"), ## low neutral high
space = "Lab")(1024),
margins = c(7, 30))
dev.off()
save(tamr,
file = "Data/reduce.loading.redundancy_top.aspects_gene.clusters_pathway.wPCA_KEGG_Aging_Activation_subtract.aspect_varnorm_knn.error.models.RData")
## Extract the names of all aspects (from function 'pagoda.view.aspects'):
top <- Inf
top.aspects <- tam
rcmvar <- apply(top.aspects$xv, 1, var)
vi <- order(rcmvar, decreasing = TRUE)[1:min(length(rcmvar), top)]
top.aspects$xv <- top.aspects$xv[vi, ]
top.aspects$xvw <- top.aspects$xvw[vi, ]
top.aspects$cnam <- top.aspects$cnam[vi]
top.aspects <- gsub("#PC1# ", "", as.character(rownames(top.aspects$xv)))
## Show genes in top pathways:
dir.create("Results/KEGG_Aging_Activation/Heatmaps")
for(i in 1:length(top.aspects)){
aspect.i <- top.aspects[i]
aspect.i.file <- gsub("/", ".", aspect.i)
file.name <- paste0("Results/KEGG_Aging_Activation/Heatmaps/",
aspect.i.file,
".pdf")
pdf(file.name, width = 5, height = 4)
if(grepl("geneCluster.", aspect.i)){
pagoda.show.pathways(pathways = aspect.i,
varinfo = varinfo.new,
goenv = go.env.de.novo,
cell.clustering = hc,
n.genes = 20,
colcols = col.cols,
margins = c(1,5),
show.cell.dendrogram = TRUE,
showRowLabels = TRUE,
showPC = TRUE)
} else {
pagoda.show.pathways(pathways = aspect.i,
varinfo = varinfo.new,
goenv = go.env,
cell.clustering = hc,
n.genes = 20,
colcols = col.cols,
margins = c(1,5),
show.cell.dendrogram = TRUE,
showRowLabels = TRUE,
showPC = TRUE)
}
dev.off()
}
### Correct for: Cell cycle_Homo sapiens_hsa04110, geneCluster.37, gene.cluster.119
## write out the genes in geneCluster.37 and geneCluster.119:
cluster.37 <- get("geneCluster.37", go.env.de.novo)
cluster.119 <- get("geneCluster.119", go.env.de.novo)
write.table(data.frame(geneCluster.37 = cluster.37),
file = "Data/geneCluster.37.csv",
row.names = FALSE,
sep = "\t")
write.table(data.frame(geneCluster.119 = cluster.119),
file = "Data/geneCluster.119.csv",
row.names = FALSE,
sep = "\t")
|
49b3ec3c27d4a24dee977bde6af5e7df7c683f0d | 86ffa3585968eafd21bd52d699569a921e31c0b9 | /man/presentation_commit.Rd | 5a73aee4b6a28f24c1a92012820cd04a366267b4 | [
"MIT"
] | permissive | d3v3l0/gitdown | 66e99599111620b580f2a38b7e83a7d36214e68c | 0eb068cbbffccb3f007fc89f86f7ddeee22c3ed4 | refs/heads/master | 2022-02-24T05:43:02.920903 | 2019-09-28T19:33:05 | 2019-09-28T19:33:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 412 | rd | presentation_commit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{presentation_commit}
\alias{presentation_commit}
\title{Presentation of commit}
\usage{
presentation_commit(write_in, data, i)
}
\arguments{
\item{write_in}{function to write in good place}
\item{data}{data from get_commits_pattern}
\item{i}{line number to present the commit}
}
\description{
Presentation of commit
}
|
679fdedf325fe6e78b65e1ed88e03c5126d9dab7 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /blorr/inst/testfiles/blr_pairs_cpp/libFuzzer_blr_pairs_cpp/blr_pairs_cpp_valgrind_files/1609956826-test.R | bc8bcf82bf595d8f5a0bdb25b5b953d8bd86b1b8 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,903 | r | 1609956826-test.R | testlist <- list(x = Inf, y = c(NaN, NaN, 7.27386870933034e-313, 0, 0, 2.77447923392645e+180, 2.77448001762441e+180, 2.77448013398363e+180, 8.40476764068988e-315, 2.0971286527899e-314, 1.42740904212728e+181, 3.12372766183029e-12, -6.67115915940654e+306, -5.82900682309329e+303, -1.08193841662861e+307, 6.40610294902753e-145, 5.48674796870944e-310, -2.6355489919319e-82, 3.53374805464393e+72, NA, 0, -1.491667821162e-154, -2.4220830504079e+24, -1.34765550943381e+28, 2.26379555505062e-308, -4.19987381709656e-140, 7.20834935387592e-304, 7.05604443522227e-158, 3.39833555023577e+178, 0, 3.07908049665631e-146, 1.42760931252157e+181, 5.07597049803999e-299, 3.04572577107473e-115, 6.71868006059153e-308, -2.01345623369759e-193, -5.17325432914172e+25, 9.36335417439512e-97, 8.38205310386356e-309, 3.13329174323522e-314, 1.42760931482206e+181, 1.3860909794621e-309, 8.85449459061078e-159, 8.37218665953851e-315, 3.53369412955677e+72, 2.76383203481844e-259, 3.13703838348227e-115, 2.45008553842286e+35, 2.4937306366528e-317, -3.7305643668306e+305, NaN, 3.25263802794957e+178, 9.45476457624714e-310, NaN, 7.54642448019094e+179, 1.26707338462669e-279, NaN, -3.749311022013e-253, 3.32306998945247e+37, 4.8993279890826e-306, -4.80948513475625e+306, 5.48684425442826e-310, 0, 0, -2.43339720278629e-209, 5.50339608387342e-310, 0, 1.390671161567e-308, -4.73574659118105e+305, NaN, 5.0453059431966e+182, 7.03090767247763e+34, 2.4173705217461e+35, 9.46922070913769e-307, -6.67113314602392e+306, -5.82852024984172e+303, 8.85449539944218e-159, 2.14327978499502e-312, NaN, NaN, 7.29112201955627e-304, 1.66906428131715e-308, 9.36372775213068e-97, 8.85449539937594e-159, 2.5898117484415e-307, 1.03211490025416e-296, 5.2181291954879e+279, -2.42208305039457e+24, -1.3476552055185e+28, NaN, 8.90771002988687e-159, 9.34665314051611e-307, 0))
result <- do.call(blorr:::blr_pairs_cpp,testlist)
str(result) |
a49f3d125043cea4941e8158cdd65f09600c902b | 334c4deecc96b8100e33b1380e19be0ae7d8da2c | /run_evaluation_times.R | 2424d7121fea74d90d0314ced98d7eff7c26ee8d | [] | no_license | Sciathlon/running_races_analysis | c05b4dee1419c492d945044046e1bb4bd149a3fb | ad3a0e9e23248497b579787c798daaf76c79cec8 | refs/heads/master | 2020-03-18T05:01:09.031084 | 2018-07-07T15:09:46 | 2018-07-07T15:09:46 | 134,319,376 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,042 | r | run_evaluation_times.R | library(devtools)
#devtools::install_github('fawda123/rStrava') #for installing the package
library(rStrava)
library(ggplot2)
library(curl)
library(sqldf)
#loading the athlete data
athlete_data <- athl_fun('beeep!', trace = FALSE)
app_name <- 'beeep!' # chosen by user
app_client_id <- 'beeep!' # an integer, assigned by Strava
app_secret <- 'beeeeep!' # an alphanumeric secret, assigned by Strava
# create the authentication token
stoken <- httr::config(token = strava_oauth(app_name, app_client_id, app_secret))
barplot(athlete_data$`21915362`$monthly)
# get activities, get activities by location, plot
my_acts <- get_activity_list(stoken)
#sort the data into a dataframe
elevation_gain <- unlist(lapply(my_acts, function(x) x$total_elevation_gain))
distance <- unlist(lapply(my_acts, function(x) x$distance))
distance <- distance/1000
times <- unlist(lapply(my_acts, function(x) x$moving_time))
times <- round(times/(60), 0)
dates <- unlist(lapply(my_acts, function(x) x$start_date))
dates <- format(as.Date(dates), "%Y-%m")
df <- data.frame(dates, distance, times, elevation_gain)
ggplot(df, aes(x = distance, y=times, col=dates)) + geom_point()
ggplot(df, aes(x = distance, y=times)) + stat_summary(fun.data=mean_cl_normal) + geom_smooth(method='lm')
ggplot(df, aes(x = distance, y=times, col=dates)) + stat_summary(fun.data=mean_cl_normal) + geom_smooth(method='lm')
ggplot(df, aes(x = distance, y=dates)) + geom_point()
df_max <- sqldf("select max(distance) as max_dist, dates from df group by dates")
ggplot(df_max, aes(x = dates, y=max_dist, group=1)) + geom_point() + stat_summary(geom="line")
df_num_amount <- sqldf("select count(distance) as num_run,dates,sum(distance) as sum_dist from df group by dates")
ggplot(df_num_amount, aes(x = dates)) + geom_point(aes(x = dates, y=num_run)) + geom_point(aes(x = dates, y=sum_dist)) + geom_line(aes(x = dates, y=num_run, group=1, col="red")) + geom_line(aes(x = dates, y=sum_dist, group=1, col="blue")) + theme(legend.position="none", axis.title.y=element_blank())
df_round <- df
df_round$distance <- round(df_round$distance, 0)
sub_5 <- sqldf("select count(distance) as dist_count from df where distance < 5")
sub_10 <- sqldf("select count(distance) as dist_count from df where distance > 5 and distance < 10")
sub_15 <- sqldf("select count(distance) as dist_count from df where distance > 10 and distance < 15")
sub_20 <- sqldf("select count(distance) as dist_count from df where distance > 15 and distance < 20")
over_20 <- sqldf("select count(distance) as dist_count from df where distance > 15 and distance < 20")
df_partition <- rbind(sub_5, sub_10, sub_15, sub_20, over_20)
row.names(df_partition) <- c('<5', '<10', '<15', '<20', '>=20')
ggplot(df_partition, aes(x= row.names(df_partition),y = dist_count)) + geom_col() + scale_x_discrete(breaks=c(0, 5, 10, 15, 20)) + scale_x_discrete(labels=c("<5", "<10","<15", "<20", ">=20")) + ggtitle("Number of runs of different lengths") + theme(axis.title.x=element_blank())
ggplot(df, aes(x = dates, y=distance)) + geom_boxplot()
|
04502cf168bbed01a2ab2a8114cc2b2c4df7e445 | a84cc9c6af9c81b23576be27bff610cd00536ff1 | /man/bh_slack_correct.Rd | c4793542f0375b74c1df50e7ba442fdbf4d87a30 | [] | no_license | yadbor/bluer | 3f0ca8e8c9018309de1dd4d6d633c294accecea7 | 18d1be170f0bb9eae01392423cdc341213d50702 | refs/heads/master | 2023-01-23T08:42:22.839075 | 2023-01-18T07:17:18 | 2023-01-18T07:17:18 | 174,983,637 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 982 | rd | bh_slack_correct.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bluehill_functions.R
\name{bh_slack_correct}
\alias{bh_slack_correct}
\title{Slack Correction}
\usage{
bh_slack_correct(DT)
}
\arguments{
\item{DT}{a \code{data.table} with \code{Load} and \code{Extension} channels to be corrected.}
}
\value{
The original series with any initial slack portion removed.
}
\description{
Remove any slack at the start of a test (or cycle)
}
\details{
The Bluehill version sets the limits at 2\% and 80\% of maximum then uses
AutoSlope to get the estimated slope and sets the new zero to where that line
intersects y = 0.
This version justs fits a single line to the range from 2\% to 80\% of maximum
and uses the intercept from that to define the zero point. Rows before that point are
removed and the first row subtracted from both \code{Time} and \code{Extension}
so that they start from zero. \code{Load} is unchanged.
Really just a wrapper for \code{trim_slack()}.
}
|
fbc4bb669338e254957be119d1d42bdbda87541f | 72d6b9c39f97b1eaa479bce4d6ee3e8dc62cdd6c | /tests/testthat/test-getStratifiesBinaryData.R | 19b76b5bbe9a1daaa14de51f748fbced349a753e | [] | no_license | philippstats/RRmisc | 35877668fdbde8586c5fae7fd03762d6daebbd06 | 08df544ed10e1ccf8f4694e36baa7c61dde232bf | refs/heads/master | 2020-04-10T14:13:08.763526 | 2016-07-19T16:12:29 | 2016-07-19T16:12:29 | 51,521,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 523 | r | test-getStratifiesBinaryData.R | # test
# getStratifiedBinaryData
library(testthat)
library(mlbench)
library(mlr)
###############################################################################
test_that("check basic properties", {
data(BreastCancer, package = "mlbench")
df = BreastCancer
df$Id = NULL
tsk = makeClassifTask(id = "BreastCancer", data = df, target = "Class")
t = getStratifiedBinaryData(task = tsk, size = 100L)
r = as.vector(table(getTaskTargets(t)))
expect_equal(getTaskSize(t), 100L)
expect_equal(r, c(50L, 50L))
})
|
0a56b845a7453ab6833c735c44de2e6731e82360 | 974b2751d8d447b086d75eb35f5316f0a2801b4c | /codes/mod.mediation.study2.R | 33443cd51ecbe648dce370916611dc2a5936b088 | [] | no_license | daij9412/whtdisid | 5cb186c875555cf39613bcdadc2434774fff91e6 | 7c1abb5442763e84cb535ccdf5105c99199b08a7 | refs/heads/master | 2020-08-09T07:47:01.030940 | 2019-10-14T06:21:34 | 2019-10-14T06:21:34 | 214,039,825 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,467 | r | mod.mediation.study2.R | ###########################################################
## Below is the code used for moderated mediation tests in#
## Study 2, with DV as intentions to support for racial ##
## equity. Code can be used for replication along with ##
## redacted datasets.
## Note: Datasets are not be released to public due to ##
## confidentiality concern. Researchers interested in the #
## redacted or full datasets could contact the research ##
## team. ##
###########################################################
#####################
##load required packages
require(tidyverse); require(dplyr); require(readr); require(lavaan); require(processr)
###########load data#################
####################
## Study 2 ##
####################
##########################
## test using lavaan ##
##########################
##Note: b1a3 = Index of moderated mediation, (a1b1+b1a3 -1) and (a1b1+b1a3 1) = conditional indirect effects
#compute interaction terms
study2_contrast_mod <- study2_contrast %>%
mutate(condition_contrast = ifelse(condition_actual == "Actual threat", -1, 1)) %>%
mutate(WhtDis_comp_centered = scale(WhtDis_comp, scale=F)) %>%
mutate(LibCon_condition = LibCon_centered * condition_contrast,
DisId_condition = WhtDis_comp_centered * condition_contrast)
#head(study2_contrast,5)
#tail(study2_contrast,5)
##Close-ended Individual advocacy for racial equity
mod_IndivAct <- " #a path
WhtDis_comp_centered ~ 1 + a1 * LibCon_centered
WhtDis_comp_centered ~ a2 * condition_contrast
WhtDis_comp_centered ~ a3 * LibCon_condition
# b path
IndivAction ~ b1 * WhtDis_comp_centered
# c prime path
IndivAction ~ 1 + cp * LibCon_centered
# index of moderated mediation and conditional indirect effects
b1a3 := b1 * a3
actualss := a1 + a3 * -1
highss := a1 + a3 * 1
actual := a1 * b1 + b1a3 * -1
high := a1 * b1 + b1a3 * 1"
set.seed(1234)
sem.fit_IndivAct <- sem(mod_IndivAct, data = study2_contrast_mod, se = "bootstrap", bootstrap = 5000, likelihood = "wishart")
parameterestimates(sem.fit_IndivAct, boot.ci.type = "bca.simple", standardized = TRUE)
##Support for policies designed to help racially minoritized groups
#policy support
mod_PolicySupport <- " #a path
WhtDis_comp_centered ~ 1 + a1 * LibCon_centered
WhtDis_comp_centered ~ a2 * condition_contrast
WhtDis_comp_centered ~ a3 * LibCon_condition
# b path
PolicySupport ~ b1 * WhtDis_comp_centered
# c prime path
PolicySupport ~ 1 + cp * LibCon_centered
# index of moderated mediation and conditional indirect effects
b1a3 := b1 * a3
actualss := a1 + a3 * -1
highss := a1 + a3 * 1
actual := a1 * b1 + b1a3 * -1
high := a1 * b1 + b1a3 * 1"
set.seed(1234)
sem.fit_PolicySupport <- sem(mod_PolicySupport, data = study2_contrast_mod, se = "bootstrap", bootstrap = 5000, likelihood = "wishart")
parameterestimates(sem.fit_PolicySupport, boot.ci.type = "bca.simple", standardized = TRUE)
#Open-ended Individual advocacy for racial equity
mod_OpenIndiv <- " #a path
WhtDis_comp_centered ~ 1 + a1 * LibCon_centered
WhtDis_comp_centered ~ a2 * condition_contrast
WhtDis_comp_centered ~ a3 * LibCon_condition
# b path
IndivActionOpen ~ b1 * WhtDis_comp_centered
# c prime path
IndivActionOpen ~ 1 + cp * LibCon_centered
# index of moderated mediation and conditional indirect effects
b1a3 := b1 * a3
actualss := a1 + a3 * -1
highss := a1 + a3 * 1
actual := a1 * b1 + b1a3 * -1
high := a1 * b1 + b1a3 * 1"
set.seed(1234)
sem.fit_OpenIndiv <- sem(mod_OpenIndiv, data = study2_openInd, se = "bootstrap", bootstrap = 5000, likelihood = "wishart")
parameterestimates(sem.fit_OpenIndiv, boot.ci.type = "bca.simple", standardized = TRUE)
#############################
### cross-validating ###
### using processr ###
#############################
#Close-ended Individual advocacy for racial equity
set.seed(1234)
mod7.Ind <- model7(iv="LibCon_centered", dv = "IndivAction", med = "WhtDis_comp_centered", mod = "condition_contrast", study2_contrast)
kable(mod7.Ind)
#Policy support
set.seed(1234)
mod7.Policy <- model7(iv="LibCon_centered", dv = "PolicySupport", med = "WhtDis_comp_centered", mod = "condition_contrast", study2_contrast)
kable(mod7.Policy)
#Open-ended Individual advocacy for racial equity
set.seed(1234)
mod7.openInd <- model7(iv="LibCon_centered", dv = "IndivActionOpen", med = "WhtDis_comp_centered", mod = "condition_contrast", study2_openInd)
kable(mod7.openInd)
|
d758fc57b306c3c2e0568dfe844ec2ddf3e31239 | ca2802548f8a961ca6e0fe57d7906f912eb3f221 | /R/optimize_variables.R | 013d6cf7f1cda45cd8079ccfc328c0f5002ace9c | [] | no_license | O1sims/FootballStats | 4dca2ebb135922f5ca3beafa7c3206022faea490 | 266d476f5f15d57960f8715d33a766f8fa091daa | refs/heads/master | 2023-06-08T08:48:39.646688 | 2021-06-29T09:07:44 | 2021-06-29T09:07:44 | 381,303,716 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,327 | r | optimize_variables.R | #' @title Optimize Variables
#'
#' @export
optimize_variables <- function(total.metrics, GRIDS, optimizeModels = TRUE,
overwrite = FALSE, types = c("xgboost", "neuralnetwork"),
saveModels = c(), colNames = list(localID = "localID", awayID = "awayID")) {
# Create directory
if (saveModels %>% length %>% `>`(0)) {
modelDir <- getwd() %>% paste0("/mymodels/")
if (modelDir %>% dir.exists %>% `!`()) modelDir %>% dir.create
}
# Only save models when requested
if (optimizeModels) {
if (saveModels %>% length %>% `>`(0)) { # nocov start
cat(" ## If you want to save any models, then set optimizeModels to FALSE! \n\n")
saveModels <- c()
} # nocov end
} else {
saveModels %<>% intersect(types)
}
# Must supply a valid type
if (types %>% length %>% `>`(0) %>% `!`()) stop("Must supply some _types_")
# Redfine list from GRIDS
DAYS <- GRIDS$DAYS
GRID_PTS <- GRIDS$GRID_PTS
GRID_BOUND <- GRIDS$GRID_BOUND
DECAY <- GRIDS$DECAY
TOTAL_PERC <- GRIDS$TOTAL_PERC
# Only write to files if need be
if (optimizeModels) { # nocov start
# Make sure the base directory exists
resultsDir <- getwd() %>% paste0("/results_optimization/")
if (resultsDir %>% dir.exists %>% `!`()) resultsDir %>% dir.create
# Read in the existing data frame of results here
resultsFile <- resultsDir %>% paste0("results.csv")
if (resultsFile %>% file.exists) {
# Read the CSV
existing.topscore <- read.csv2(
file = resultsFile,
header = TRUE,
sep = ',',
stringsAsFactors = FALSE
)[ , 1:6]
# Get matches with existing data frame
totMatches <- existing.topscore %>%
footballstats::get_grid_matches(
fullGrid = expand.grid(DAYS, GRID_PTS, GRID_BOUND, DECAY, TOTAL_PERC, types)
)
} else {
totMatches <- 0
}
} else {
totMatches <- 0
} # nocov end
# Define neural network input list
NN <- list(
REP = GRIDS$NN_REP,
THRESH = GRIDS$NN_THRESH %>% min(0.3)
)
# Define XGBoost input list
XGB <- list(
ROUNDS = GRIDS$XG_ROUNDS,
DEPTH = GRIDS$XG_DEPTH,
ETA = GRIDS$XG_ETA,
GAMMA = GRIDS$XG_GAMMA,
BOUNDARY = GRIDS$XG_BOUNDARY
)
# Initialise values for generating and tracking results
bestResult <- icount <- 0
totalOps <- (DAYS %>% length) *
(GRID_PTS %>% length) *
(GRID_BOUND %>% length) *
(DECAY %>% length) *
(TOTAL_PERC %>% length)
totalOps %<>% `-`(totMatches)
# Load up the odds frame
odds.frame <- footballstats::odds.frame
# Start looping the grid
for (i in 1:(DAYS %>% length)) {
for (j in 1:(GRID_PTS %>% length)) {
for (k in 1:(GRID_BOUND %>% length)) {
for (l in 1:(DECAY %>% length)) {
for (m in 1:(TOTAL_PERC %>% length)) {
# Check for any matched rows
if (totMatches > 0) {
check <- data.frame(
day = DAYS[i],
gridPoints = GRID_PTS[j],
gridBoundary = GRID_BOUND[k],
decay = DECAY[l],
totalPercentage = TOTAL_PERC[m],
type = types,
stringsAsFactors = FALSE
)
# If there is a direct match then move onto the next iteration
matched <- check %>%
footballstats::get_grid_matches(
fullGrid = existing.topscore,
r = TRUE
)
if (matched) next
}
icount %<>% `+`(1)
cat(' ## Analysing operation', icount, '/', totalOps, ' (Loading data first) \n')
# Determine expected goals/ accuracy etc
all.results <- total.metrics %>%
footballstats::sub_metrics(
colNames = colNames,
GRIDS = GRIDS,
odds.frame = odds.frame
)
# Prepare data - get the scales and scale results
scaled.results <- all.results$data %>%
mltools::scale_data()
# Create plots + get feature metrics
feat.metrics <- scaled.results$data %>%
footballstats::create_plot(
day = DAYS[i],
gridPoints = GRID_PTS[j],
gridBoundary= GRID_BOUND[k],
decayFactor = DECAY[l],
totalPer = TOTAL_PERC[m],
savePlot = FALSE
)
# Initialise all methods
allMethods <- list()
# Build XGBoost model using CV
if ("xgboost" %in% types) {
startTime <- Sys.time()
# Build and save XGBoost
allMethods$xgb <- scaled.results$data %>%
mltools::gen_xgb(
XGB = XGB
)
# Now calculate odds
allMethods$xgb$totalStats$netWinnings <- sapply(
X = allMethods$xgb$results,
FUN = function(x) all.results$odds %>% footballstats::calculate_winnings(x)
)
endTime <- Sys.time()
tDiff <- difftime(
time1 = endTime,
time2 = startTime
) %>% format
cat(" XGBoost took :", tDiff, "\n")
# Save the models
if ("xgboost" %in% saveModels) { # nocov start
xgModel <- allMethods$xgb$model
xgScales <- scaled.results$scaler
save(xgModel, file = modelDir %>% paste0("xgModel.rda"))
save(xgScales, file = modelDir %>% paste0("xgScales.rda"))
} # nocov end
}
# Build Neural network model using CV
if ("neuralnetwork" %in% types) {
startTime <- Sys.time()
# Build and save NN
allMethods$neuralnetwork <- scaled.results$data %>%
mltools::gen_nn(
NN = NN,
logs = TRUE
)
# Now calculate odds
allMethods$neuralnetwork$totalStats$netWinnings <- sapply(
X = allMethods$neuralnetwork$results,
FUN = function(x) odds.results %>% footballstats::calculate_winnings(x)
)
endTime <- Sys.time()
tDiff <- difftime(
time1 = endTime,
time2 = startTime
) %>% format
cat(" Neural Network took :", tDiff, "\n")
# Save the models
if ("neuralnetwork" %in% saveModels) { # nocov start
nnModel <- allMethods$neuralnetwork$model
nnScales <- scaled.results$scaler
save(nnModel, file = modelDir %>% paste0("nnModel.rda"))
save(nnScales, file = modelDir %>% paste0("nnScales.rda"))
} # nocov end
}
# Write metrics to file
if (optimizeModels) { # nocov start
allMethods %>%
footballstats::optimize_save_metrics(
resultsFile = resultsFile,
resultsDir = resultsDir
)
} # nocov end
}
}
}
}
}
if (!optimizeModels) return(allMethods)
}
|
96592fdff719d71b1a8a14c880caefc5b66981eb | 82592f877c695a45a258a479e40394a5f994530a | /src/R/zzz.R | f19278f52cf1f95265243b7fadbe042340ec35e4 | [] | no_license | whcsu/vtg.coxph | af2d3cb774561408554adbef024ebf1bac0cb766 | d82f2786719a6f0df6620027f7e86b4b0f4892b9 | refs/heads/master | 2023-06-25T06:22:18.015822 | 2021-07-27T09:01:33 | 2021-07-27T09:01:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 698 | r | zzz.R | .onLoad <- function(libname, pkgname) {
# writeln()
# writeln('vantage.basic .onLoad')
# writeln(paste(' libname:', libname))
# writeln(paste(' pkgname:', pkgname))
# writeln()
# fileName <- system.file('extdata', 'startup.message.txt', package=pkgname)
# msg <- readChar(fileName, file.info(fileName)$size)
# packageStartupMessage(msg)
}
.onAttach <- function(libname, pkgname) {
# writeln('vantage.basic .onAttach')
# writeln(paste(' libname:', libname))
# writeln(paste(' pkgname:', pkgname))
# writeln()
}
# FIXME: This is as close as I can get to a package-wide 'import x from y'
# Anyone know a better way?
writeln <- vtg::writeln |
1f8636690196bec81f7b2c6f800731a0969ac81b | bc6a1ddc9b9374f28ffa9c662212be3608c29f1b | /PRJ16_201520934_배재훈/ex_tm.R | 8cebc7ad0dcf09e86f149c077ee72f89b1b726f2 | [] | no_license | jaypae95/rproject | 68af022e12c4ddddb632175fbc0ae5c0523e8653 | fa83cb7d849aaabc438f5a137c7ca472adb8ef93 | refs/heads/master | 2021-10-10T00:27:54.109993 | 2019-01-05T02:09:52 | 2019-01-05T02:09:52 | 164,172,959 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,767 | r | ex_tm.R | ## ----------------------------------------------
#
# PROJECT16. [EX] 텍스트 마이닝
#
# COPYRIGHT (c) 2018 AJOU University
# Author : Seo, Jooyoung
# History : 2018/09/01
#
## ----------------------------------------------
#install.packages(c('KoNLP', 'stringr', 'wordcloud2', 'tm', 'qgraph', 'scales'))
library(tm)
library(KoNLP) # 한글 자연어 처리(형태소 분석) 패키지
library(stringr)
library(wordcloud2)
library(qgraph)
options(mc.cores=1)
# Step 1. 한글 형태소 분석 함수
ko.words <- function(doc) {
d <- as.character(doc)
## 형태소 중 명사만 추출하기
pos <- paste(SimplePos09(d))
ex <- str_match(pos, '([가-힣]+)/[NP]')
keyword <- ex[,2]
## 결측값(NA) 정제하기
keyword[!is.na(keyword)]
}
# Step 2. 텍스트 파일 읽어오기
#txt <- c("행복은 마음에", "착한 마음에는 행복이")
txt <- readLines("2017_마스터_네이버영화평.txt")
#txt <- readLines(file.choose())
# Step 3. 텍스트마이닝 분석하기
words <- lapply(txt, ko.words)
cps <- Corpus(VectorSource(words))
tdm <- TermDocumentMatrix(cps, control = list(removePunctuation=T, removeNumbers=T))
tdm.matrix <- as.matrix(tdm)
# Step 4. 단어의 빈도수 분석을 통해 키워드 찾기
word.count <- rowSums(tdm.matrix)
word.order <- order(word.count, decreasing = T)
freq.words <- tdm.matrix
keyword.df <- data.frame(rownames(freq.words), rowSums(freq.words))
# Step 5. 키워드를 워드클라우드로 시각화
wordcloud2(keyword.df)
# Step 6. 단어사이의 관계를 네트워크로 시각화
co.matrix <- freq.words %*% t(freq.words)
qgraph(co.matrix, labels=rownames(co.matrix), diag=F, layout='spring', edge.color='blue')
|
06cccc32f33c649d046f825d1f166622eeed2770 | 2c191434506e5d8940c1ae0c0ae0038de7bb8f7d | /3_fractional_regressions.r | 34e36f1f80fb916c3059c05dafa2206af5c7dfb8 | [] | no_license | briatte/epsa2018-paper | 7f583edbbf14d20ec7a8f9980d310ce12fa45120 | b19786916b81e46e086d5df84166e708b73e9468 | refs/heads/master | 2023-05-30T17:18:06.632835 | 2021-06-21T16:28:13 | 2021-06-21T16:28:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,668 | r | 3_fractional_regressions.r | # --- pkgs ---------------------------------------------------------------------
library(frm) # https://home.iscte-iul.pt/~jjsro/FRM.htm
library(readr)
library(strict) # https://github.com/hadley/strict
d <- readr::read_csv("data/parlnet_panel.csv")
# covariates:
# - time period (start year of legislature),
# - duration (of legislature, in years),
# - statutory size of parliamentary chamber,
# - number of sponsors (proxy for party size),
# - participation in government (party-level, 0 or 1)
# - ideological score (party-level, 0-10 towards right-wing; time-invariant)
# - fraction of senior MPs in party of sponsor (bounded 0-1)
# - fraction of female MPs in party of sponsor (bounded 0-1)
#
# NOTE -- The ideological score should not technically be time-invariant: it
# was built by aggregating ParlGov that do not vary much throughout the time
# period under observation, but that do vary still. This limitation makes it
# impossible to specify 'pure' panel effects for the models, using the frmpd
# package. [fmrpd]: https://cran.r-project.org/package=frmpd
#
X <- with(d, cbind(t, duration, size, n_j, g_j, lr_ij_sd, p_sen, p_fem))
# --- WAP ----------------------------------------------------------------------
# dependent variable:
# weighted fraction of cosponsorship ties across PARTY lines
#
Y_WAP <- d$p_wap
length(Y_WAP) # N = 617
# n (% N)
table(Y_WAP == 0) # 70 (.11)
table(Y_WAP > 0 & Y_WAP < 1) # 512 (.83)
table(Y_WAP == 1) # 35 (.55)
# fractional logit
WAP_flogit <- frm::frm(
Y_WAP,
X,
type = "1P",
linkfrac = "logit",
#
# clustered standard errors at the country-chamber-party level
var.type = "cluster",
var.cluster = factor(d$id),
intercept = FALSE # no-constant estimates
)
# frm::frm.pe(WAP_flogit) # average partial effects
# RESET tests
# frm::frm.reset(WAP_flogit, 2, version = c("Wald", "LM"))
# frm::frm.reset(WAP_flogit, 3, version = c("Wald", "LM"))
# frm::frm.reset(WAP_flogit, 4, version = c("Wald", "LM"))
# fractional probit
WAP_fprobit <- frm::frm(
Y_WAP,
X,
type = "1P",
linkfrac = "probit",
#
# clustered standard errors at the country-chamber-party level
var.type = "cluster",
var.cluster = factor(d$id),
intercept = FALSE # no-constant estimates
)
# frm::frm.pe(WAP_fprobit) # average partial effects
# RESET tests
# frm::frm.reset(WAP_fprobit, 2, version = c("Wald", "LM"))
# frm::frm.reset(WAP_fprobit, 3, version = c("Wald", "LM"))
# frm::frm.reset(WAP_fprobit, 4, version = c("Wald", "LM"))
# --- WAG ----------------------------------------------------------------------
# dependent variable:
# weighted fraction of cosponsorship ties across GOVERNMENT lines
#
Y_WAG <- d$p_wag
length(Y_WAG) # N = 617
# n (% N)
table(Y_WAG == 0) # 151 (.24)
table(Y_WAG > 0 & Y_WAG < 1) # 460 (.74)
table(Y_WAG == 1) # 6 (.01)
# fractional logit
WAG_flogit <- frm::frm(
Y_WAG,
X,
type = "1P",
linkfrac = "logit",
#
# clustered standard errors at the country-chamber-party level
var.type = "cluster",
var.cluster = factor(d$id),
intercept = FALSE # no-constant estimates
)
# frm::frm.pe(WAG_flogit) # average partial effects
# RESET tests
# frm::frm.reset(WAG_flogit, 2, version = c("Wald", "LM"))
# frm::frm.reset(WAG_flogit, 3, version = c("Wald", "LM"))
# frm::frm.reset(WAG_flogit, 4, version = c("Wald", "LM"))
# fractional probit
WAG_fprobit <- frm::frm(
Y_WAG,
X,
type = "1P",
linkfrac = "probit",
#
# clustered standard errors at the country-chamber-party level
var.type = "cluster",
var.cluster = factor(d$id),
intercept = FALSE # no-constant estimates
)
# frm::frm.pe(WAG_fprobit) # average partial effects
# RESET tests
# frm::frm.reset(WAG_fprobit, 2, version = c("Wald", "LM"))
# frm::frm.reset(WAG_fprobit, 3, version = c("Wald", "LM"))
# frm::frm.reset(WAG_fprobit, 4, version = c("Wald", "LM"))
# --- P-tests ------------------------------------------------------------------
# frm::frm.ptest(WAP_flogit, WAP_fprobit, version = c("Wald", "LM"))
# frm::frm.ptest(WAG_flogit, WAG_fprobit, version = c("Wald", "LM"))
# --- Residual standard errors -------------------------------------------------
round(sd(Y_WAP - WAP_flogit$yhat), 2)
round(sd(Y_WAG - WAG_flogit$yhat), 2)
# --- save ---------------------------------------------------------------------
save(list = ls(pattern = "WA?_*"), file = "data/parlnet_frm.rda")
# ----------------------------------------------------- have a nice day --------
# rm(list = ls())
# gc()
# kthxbye
|
b206a4097404ff7696b1c365582d2b2b5a389a06 | b4294e86bd4aa13da2da944c3499eff23ac9e0c6 | /mixtures.R | 4999ceca92edda906cb74e3c1e6c574765809c0c | [] | no_license | sylvansecrets/signal-coursework | 62f0a5e7522eb478b6a6d6adc01609cebfeeeeca | caacf893d0e8c8fdff6c7fc0433422f54ea04309 | refs/heads/master | 2021-01-12T10:18:53.326551 | 2016-12-14T03:06:14 | 2016-12-14T03:06:14 | 76,419,676 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,223 | r | mixtures.R | # Mixture Models
library("ggplot2")
library("datasets")
library("mixtools")
library("mclust")
# Convenience function
print_clusters = function(labels, k) {
for(i in 1:k) {
print(paste("cluster", i))
print(protein_df[labels == i, c( "RedMeat",
"Fish", "Fr.Veg")])
}
}
# Convenience function
hclust_plot = function(d, method,k){
scaled = scale(d)
distance = dist(scaled)
uncut = hclust(distance, method=method)
cut = cutree(uncut, k=k)
return(
clusplot(scaled, cut, color=TRUE, shade=TRUE, labels=2, lines=0)
)
}
# Load dataset
df_faithful = faithful
scale_faithful = as.data.frame(scale(faithful))
# plot
ggplot(scale_faithful)+geom_histogram(aes(x=waiting), alpha=0.5, fill="black")+geom_histogram(aes(x=eruptions), alpha=0.5, fill="red")
# MULTIVARIATE MODELS
# plot log-likelihood and guassian density estimates vs. histogram
nme = normalmixEM(df_faithful$waiting)
plot(nme, density=TRUE)
# run a few times to look at variation
nme = normalmixEM(df_faithful$waiting)
summary(nme)
# comp 1 comp 2
# lambda 0.639113 0.360887
# mu 80.091102 54.614907
# sigma 5.867708 5.871255
# loglik at estimate: -1034
# Try 2: numbers fairly close but swapped... number of iterations changed
# run a few times with k=3 to look at variation
rme = normalmixEM(df_faithful$waiting, k = 3)
plot(nme, density=TRUE)
summary(nme)
# jumped from ~30 iterations to 287, didn't converge 2nd time, 767 on third
# gave no third component
# otherwise the samish when it does converge
# semi-parametric models
# looks about right with n = 2, b = 3
n=20
b=0.5
semi_model = spEMsymloc(df_faithful$waiting, mu0=n, bw = b)
plot(semi_model, sub = paste("b =", b, "n =", n))
# Add outliers and compare plots
alt_wait = c(0,200, -5, df_faithful$waiting)
alt_nme = normalmixEM(alt_wait, k=2)
plot(alt_nme, which=2)
alt_semi = spEMsymloc(alt_wait, mu0=2, bw=3)
plot(alt_semi)
# Scatter eruptions and waiting times
ggplot(data=df_faithful)+geom_point(aes(x= waiting, y=eruptions))
# There looks to be two clusteres
#
multi_lust = Mclust(scale_faithful)
plot(multi_lust)
# Try this on proteins
setwd("C:/Users/User/Documents/GitHub/Signal-Data-Science")
protein_df = read.delim("~/GitHub/Signal-Data-Science/protein.txt")
multi_protein = Mclust(scale(protein_df))
plot(multi_protein)
# it's got four components!
pc = prcomp(scale(protein_df))
qplot(1:9, pc$sdev)
multi_pc = Mclust(scale(pc$x[,1:2]))
plot(multi_pc)
#PCA and then multi leads to readable results
# Non-parameteric
np_faith = npEM(scale(df_faithful), mu0=2, bw=.3)
np_protein = npEM(scale(protein_df), mu0=2, bw=.3)
np_pc_protein = npEM(scale(pc$x[,1:2]), mu0=2, bw=0.5)
plot(np_faith)
plot(np_protein)
plot(np_pc_protein)
silenced = cbind(pc$x[,1:2],np_protein$posteriors)
silenced = as.data.frame(silenced)
colnames(silenced)[3:4] = c("G1", "G2")
library("tidyr")
test = gather(silenced, "Group", "GroupValue", G1)
test = gather(test, "PC", "PCvalue", PC1:PC2)
ggplot(test, aes(x=GroupValue, y=PCvalue, color=Group, size=PC, shape=PC)) + geom_jitter(width=.3, height=.3)
# ????????? |
f27de3a235ded24d2217c10ef972784807323f1c | 5bd444d45603c382153607c3d258c58e3db4fdaa | /Rscripts/user_function.R | 9b06f9b108fe20d7525fa32d716cfb7c9f50787a | [] | no_license | Davide-bll/teleworkingproject | f05d8c9ffd675750abdf6a212c0fde1f6ed3ecd9 | 0874dcdd6804c3c69ebe4bcaf9eeb62f803753f0 | refs/heads/master | 2022-06-13T20:20:12.912285 | 2020-05-07T10:03:02 | 2020-05-07T10:03:02 | 258,636,559 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,847 | r | user_function.R | # user function used in the converttocsv.R script (OSemosys model)
# USER FUNCTIONS----
read_osemosys <- function(path, rownames = "code", pattern = "X", replacement = "", header = TRUE) {
x <- read.table(path, header = header, stringsAsFactors = FALSE) %>%
as_tibble()
names(x) <- str_replace(names(x), pattern = pattern, replacement = replacement)
x
}
read_osemosys_2 <- function(path, header = TRUE, pattern = "X", replacement = "", sep = ",") {
x <- read.csv(path, header = header, sep = sep, dec = ".", stringsAsFactors = FALSE) %>%
as_tibble()
names(x) <- str_replace(names(x), pattern = pattern, replacement = replacement)
x
}
convert_tb <- function(x, into = c("country", "commodity", "technology", "energy_level", "age", "size"),
sep = c(2,4,6,7,8), col = "code", from = NULL) {
# separate col
x <- separate(x, col = col, into = into, sep = sep)
# re-arrange cols
chr <- select_if(x, ~(!is.numeric(.)))
num <- select_if(x, is.numeric)
x <- cbind(chr, num) %>% as_tibble
if(is.null(from)) {
from <- dim(chr)[[2]] + 1}
pivot_longer(x, cols = c((from): dim(x)[[2]]), names_to = "year", values_to = "value")
}
# detect the first TRUE-- very specific for this data
detect_first_true <- function(x) {
x <- as.numeric(x)
y <- c()
for (i in 2:length(x)) {
y[[i-1]] <- isTRUE((x[[i]] - x[[i-1]]) < 0)
}
y <- c(1, y)
cumsum(y)
}
# aggregation -----
my_group_by <- function(x, grp, f) {
x %>% group_by_at(.vars = grp) %>%
summarise(value = f(value)) %>%
ungroup()
}
# str 2 3 -------
# make the function to convert the value
fromOSEtoGAM <- function(A, x) {
A %*% x
}
# find technology
find_tech <- function(x, code = "IT", col = 1) {
res <- c()
res <- c(res, names(x)[[col]])
res <- c(res, x[[col]][which(str_detect(x[[col]], code))])
res
}
# remove_fake
remove_fake <- function(x, code = "IT") {
col <- names(x)[[1]]
x <- filter(x, !str_detect(x[[col]], code))
x
}
# add col
add_col <- function(x, old, new) {
x[[old]] <- new
rename_at(x, .vars = vars(old), function(x) "technology")
}
# structure 3 strategy
convert_struct3 <- function(x) {
techs <- find_tech(x)
x <- add_col(remove_fake(x), old = techs[[1]], new = techs)
# deal with different modes: this works only if the first observation is mode 1, and mode 2
mode <- x %>% select(technology) %>%
mutate(mode = 1) %>%
group_by(technology) %>%
mutate(mode_of_operation = as.character(cumsum(mode))) %>%
select(-mode) %>%
ungroup
x %>% left_join(mode, by = "technology")
}
# arrange cols
arrange_cols <- function(x) {
cbind(select_if(x, ~(!is.numeric(.))), select_if(x, is.numeric)) %>%
as_tibble()
}
find_mapping <- function(input, output) {
nrow <- length(output)
ncol <- length(input)
A <- matrix(0, nrow = nrow, ncol = ncol)
# define residual input
res_input <- input
for(i in 1:nrow) {
res <- 0
for(j in 1:ncol) {
bound <- 1 - colSums(A[1:i,j,drop = FALSE])
if(res + input[[j]] <= output[[i]]) {
A[i,j] <- ifelse(input[[j]] == 0, 0, min(res_input[[j]]/input[[j]], bound))
}
if(res + input[[j]] > output[[i]] & res < output[[i]]) {
# A[i,j]*res_input[[j]] + res = output[[j]] this must be satiafied
A[i,j] <- ifelse(res_input[[j]] == 0, 0, min(bound, (output[[i]] - res)/input[[j]]))
}
if(res > output[[i]]) A[i,j] <- 0
# update the result
res <- res + (A[i,j]*input[j])
}
# update residual input
res_input <- positive_part((1 - colSums(A[1:i,,drop = FALSE]))*input)
}
A
}
|
03e299277e6a35e1f94efd072a741e15449828b8 | cf2cc54e5cec62abcf8d409efdefd524600a34e0 | /pyRAD-fasta-filtering/species_filtering.R | 8dc8d5af4fcdf483b44213531b60edfefbfd7689 | [
"MIT"
] | permissive | laninsky/non_active_repos | 1f59cc87c97bde271bc3c2219560a376531bc97f | fb22042115be68070c005f304271565a5d9d3162 | refs/heads/master | 2022-01-31T19:09:35.491413 | 2019-08-13T02:02:23 | 2019-08-13T02:02:23 | 44,968,638 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 754 | r | species_filtering.R | library(stringr)
intable <- read.table("temp",header=FALSE,stringsAsFactors=FALSE,sep="\t")
species <- read.table("species_assignments",header=FALSE,stringsAsFactors=FALSE,sep="\t")
locusname <- read.table("tempname",header=FALSE,stringsAsFactors=FALSE,sep="\t")
rows <- dim(intable)[1]
species_no <- dim(species)[1]
i <- 1
while (i < rows) {
if ((length(grep(">",intable[i,1])))>0) {
seqlength <- nchar(gsub("N","",intable[(i+1),1]))
if (seqlength > 0) {
output <- rbind(intable[i,1],intable[(i+1),1])
name <- (gsub(">","",intable[i,1]))
for (j in 1:species_no) {
if(name==species[j,1]) {
outputname<-paste(species[j,2],"/",locusname[1,1],sep="")
write.table(output, outputname,quote=FALSE, col.names=FALSE,row.names=FALSE)
break
}
}
}
i <- i + 2
}
}
|
81f611daea291610d2148d93654c4baffb6de8e1 | 05de00b8c0512fe56bb0727b1210e4c6755f5fc4 | /man/BaseRule.Rd | d3a16d5aaac1a76746c5c0e7ce64053f648d1590 | [
"Apache-2.0"
] | permissive | DyfanJones/aws-step-functions-data-science-sdk-r | 8b4a221d6b18d9015a002c50d3063f6e0e405e16 | ab2eb28780791f8e8bb50227b844fa240b1252da | refs/heads/main | 2023-04-28T21:24:34.920961 | 2021-05-30T21:58:04 | 2021-05-30T21:58:04 | 362,764,798 | 1 | 0 | NOASSERTION | 2021-05-30T21:58:05 | 2021-04-29T09:37:47 | R | UTF-8 | R | false | true | 1,564 | rd | BaseRule.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/steps_choice_rule.R
\name{BaseRule}
\alias{BaseRule}
\title{BaseRule}
\description{
Abstract class
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-to_list}{\code{BaseRule$to_list()}}
\item \href{#method-format}{\code{BaseRule$format()}}
\item \href{#method-clone}{\code{BaseRule$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-to_list"></a>}}
\if{latex}{\out{\hypertarget{method-to_list}{}}}
\subsection{Method \code{to_list()}}{
Convert class to list ready to be translated for
Amazon States Language \url{https://states-language.net/spec.html}.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{BaseRule$to_list()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-format"></a>}}
\if{latex}{\out{\hypertarget{method-format}{}}}
\subsection{Method \code{format()}}{
Format class
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{BaseRule$format()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{BaseRule$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
4efd3f7652030af6b17279de12e7b49d4971b910 | f74b0585237f2f35719c8fbc01b1ae76249696d8 | /man/beep.Rd | 696abb6c1cbd30f26711640862e2b666b38a2832 | [] | no_license | sjorsvanheuveln/beepr2 | cafc28176bbee032e5ae08fbd7d861deb1c9a0d8 | f3a95ba09250dd28114b1957576be645ab2399f9 | refs/heads/master | 2021-01-18T02:21:36.000847 | 2016-12-15T14:17:47 | 2016-12-15T14:17:47 | 45,794,698 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,290 | rd | beep.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/beepr2.R
\name{beep}
\alias{beep}
\title{Play an awesome sound}
\usage{
beep(sound = 1, expr = NULL)
}
\arguments{
\item{sound}{character string or number specifying what sound to be played by
either specifying one of the built in sounds, specifying the path to a wav
file or specifying an url. The default is 1. Possible sounds are:
\enumerate{ \item \code{"ping"} \item \code{"coin"} \item \code{"fanfare"}
\item \code{"complete"} \item \code{"treasure"} \item \code{"ready"} \item
\code{"shotgun"} \item \code{"mario"} \item \code{"wilhelm"} \item
\code{"facebook"} \item \code{"sword"} \item \code{"elephant"} \item \code{"1-up"}
\item \code{"airship_clear_smb3"} \item \code{"castle_clear_smw"} \item \code{"congrats"}
\item \code{"course_clear_smw"} \item \code{"data"} \item \code{"fortress_clear_smb3"}
\item \code{"happy_message_sm64"} \item \code{"highscore_sm64"} \item \code{"key_get_sm64"}
\item \code{"level_clear_smb3"} \item \code{"lostlife_smb3"} \item \code{"msuhroom_smb3"}
\item \code{"pause_smb3"} \item \code{"pipe_smb3"} \item \code{"new_item_smb3"}
\item \code{"star_appear_sm64"}\item \code{"warp_wistle_smb3"}} If \code{sound} does not match any
of the sounds above, or is a valid path or url, a random sound will be
played. Currently \code{beep} can only handle http urls, https is not
supported.}
\item{expr}{An optional expression to be excecuted before the sound.}
}
\description{
\code{beep} plays a short sound which is useful if you want to get notified,
for example, when a script has finished. As an added bonus there are a number
of different sounds to choose from. Clip indices 13-30 are derived from Super Mario Brothers 3, Super Mario World and Super Mario 64.
}
\details{
If \code{beep} is not able to play the sound a warning is issued rather than
an error. This is in order to not risk aborting or stopping the process that
you wanted to get notified about.
}
\examples{
# If no argument given, the first sound is played, which is a "ping" sound
beep()
\dontrun{
# Play a pipemaze sound in stead of "ping".
beep("pipemaze")
# or
beep(27)
# Play a random sound
beep(0)
# Update all packages and "ping" when it's ready
update.packages(ask=FALSE); beep()
}
}
|
e6df97d79a803a94b5ff01972f3d246cbbd8d521 | 547f1edb833a6020777eb83ab6bacbaf3d9799fb | /Neural Network/neuralNetwork.R | f6f5eed7a13c806c9e2d138b60c94fc4045d4365 | [] | no_license | mbaumer09/UCSC-Winter-Masters-Econometrics | 635ef5ba8cab7b9d9f2bc2b21e7470a57b4d09d0 | 93e006321fd720044962d11710b1c8bd5591ed75 | refs/heads/master | 2021-01-10T07:26:34.710928 | 2015-12-21T22:29:22 | 2015-12-21T22:29:22 | 48,151,526 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,911 | r | neuralNetwork.R | # This example will construct a basic decision tree using the rpart package
# to predict whether an individual's income is greater or less than 50k USD
# based on 14 observable predictors
library(caret)
library(nnet)
library(NeuralNetTools)
url.train <- "http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data"
url.test <- "http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test"
url.names <- "http://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.names"
download.file(url.train, destfile = "adult_train.csv")
download.file(url.test, destfile = "adult_test.csv")
download.file(url.names, destfile = "adult_names.txt")
# Read the training and test data into memory
train <- read.csv("adult_train.csv", header = FALSE)
# The test data has an unnecessary first line that messes stuff up, this fixes that problem
all_content <- readLines("adult_test.csv")
skip_first <- all_content[-1]
test <- read.csv(textConnection(skip_first), header = FALSE)
# The data file doesn't have the column names in its header, add those in manually...
varNames <- c("Age",
"WorkClass",
"fnlwgt",
"Education",
"EducationNum",
"MaritalStatus",
"Occupation",
"Relationship",
"Race",
"Sex",
"CapitalGain",
"CapitalLoss",
"HoursPerWeek",
"NativeCountry",
"IncomeLevel")
names(train) <- varNames
names(test) <- varNames
levels(test$IncomeLevel) <- levels(train$IncomeLevel)
file.remove("adult_train.csv")
file.remove("adult_test.csv")
# Use caret package to train a model using neural net on all vars
set.seed(1414)
start <- proc.time()[3]
model.nn <- train(IncomeLevel ~ .,
data = train,
method = "nnet")
print(model.nn)
predictions <- predict(model.nn, test[,1:14])
accuracy <- sum(predictions == test[,15])/length(test[,15])
print(accuracy)
end <- proc.time()[3]
print(paste("This took ", round(end-start, digits = 1), " seconds", sep = ""))
# Use feature selection procedure from example
# We will try a different model this time, linear discriminant analysis
set.seed(1414)
model.lda <- train(IncomeLevel ~ .,
data = train,
method = "lda")
plot(varImp(model.lda))
keeps <- c("EducationNum",
"Relationship",
"Age",
"HoursPerWeek",
"MaritalStatus",
"IncomeLevel")
train.reduced <- train[,which(names(train) %in% keeps)]
test.reduced <- test[,which(names(test) %in% keeps)]
set.seed(1414)
start <- proc.time()[3]
model.nn <- train(IncomeLevel ~ .,
data = train.reduced,
method = "nnet")
print(model.nn)
predictions <- predict(model.nn, test.reduced[,1:5])
accuracy <- sum(predictions == test.reduced[,6])/length(test.reduced[,6])
print(accuracy)
end <- proc.time()[3]
print(paste("This took ", round(end-start, digits = 1), " seconds", sep=""))
# For visualization purposes, lets take only columns which are non-factors (or binary)
keeps <- c("EducationNum",
"Age",
"HoursPerWeek",
"Sex",
"CapitalGain",
"IncomeLevel")
train.reduced <- train[,which(names(train) %in% keeps)]
test.reduced <- test[,which(names(test) %in% keeps)]
set.seed(1414)
start <- proc.time()[3]
model.nn <- train(IncomeLevel ~ .,
data = train.reduced,
method = "nnet")
print(model.nn)
predictions <- predict(model.nn, test.reduced[,1:5])
accuracy <- sum(predictions == test.reduced[,6])/length(test.reduced[,6])
print(accuracy)
end <- proc.time()[3]
print(round(end-start, digits = 1))
# Use NeuralNetTools package to visualize this
plotnet(model.nn$finalModel)
# Bonus: another feature importance methodology!
garson(model.nn$finalModel)
|
a5d1fdf12bc3ff36f4de69d9e7ff71edd07da9d9 | e593603c04404f80498b134ffbd94c92a5666014 | /man/dashboardUI.Rd | b3e445aeca0f0fdbc6c21e3c96d6a073576ec240 | [] | no_license | byadu/modcubqry | 9891dd06966f553b9c458c4d9a9076d582281749 | 72cb65467dc9df774422a6deeb747d4eb67c8eae | refs/heads/master | 2022-11-18T07:17:01.696979 | 2020-07-19T11:12:36 | 2020-07-19T11:12:36 | 280,848,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 408 | rd | dashboardUI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dash.R
\name{dashboardUI}
\alias{dashboardUI}
\title{dashboardUI}
\usage{
dashboardUI(id, dashid, dashname, M)
}
\arguments{
\item{id}{is caller id}
\item{dashid}{is the folder id of reports}
\item{dashname}{is the name of the folder}
\item{M}{is the meta data connection structure}
}
\description{
UI to display a dashboard
}
|
142f81a7970af4f444c11b82ec4a6eec7bdba8a3 | 86b88b328fb6a008fe72b229ef518f019f8285b3 | /Outliers checking.R | 51fe3a4d82114fff3a7edd99e79101b7d88865f5 | [] | no_license | DanielKhokhlov/Modelling-Housing-Prices-in-R | aee6a8c408b57db7cc6b535fdf93aef05d8c6486 | 00fe5d702d91b24745e9f8eb8c65a2443fa60cd6 | refs/heads/master | 2020-09-16T18:55:05.108508 | 2019-11-25T04:19:27 | 2019-11-25T04:19:27 | 223,859,398 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | Outliers checking.R | library(car)
data = read.csv("TamSales8.csv", header = T)
names(data)
# Initial test (basic model)
boxplot(data$SALES) # boxplot looks bad (right skewed data)
model = lm(SALES~LAND+IMP+NBHD, data=data)
outlierTest(model)
# Potential outliers 5, 25, 4
# Potential outliers were not suspicious
# New ln Data
data.1 = data
data.1$SALES = log(data.1$SALES)
data.1$LAND = log(data.1$LAND)
data.1$IMP = log(data.1$IMP)
# Matrix Scatterplot for LAND, IMP and NBHD
pairs(~LAND + IMP + as.numeric(NBHD), data = data.1
main = "Scatterplot Matrix of Explanatory Variables")
# Outlier test for ln model
boxplot(data.1$SALES, main = "Sales Boxplot", ylab = "ln(Sales)") # 3 outliers (top 3 sales)
model.1 = lm(SALES~LAND+IMP+NBHD, data=data.1)
outlierTest(model.1)
# Potential outliers 252, 107, 234, 115
# Potential outliers were not suspicious
summary(model)
summary(model.1)
pairs(~LAND*IMP + as.factor(NBHD), data=data)
|
b21388251a44adbe62eb809211a19c89f1d8c44e | 773d610b7eb43bca7c28ee5bcbb2653ac9b77fc7 | /R_Music.R | 0b2599b81d74ec72646d6e91424d689b66b071dd | [] | no_license | damonzon/MEDIUM_Tutorials | 0fa2517511f2756a07503873344b6e3350258950 | 201a267fd0e6a276ee5ea495346bff23da012937 | refs/heads/master | 2021-01-04T21:27:57.391982 | 2020-02-15T18:51:53 | 2020-02-15T18:51:53 | 240,765,393 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 819 | r | R_Music.R | # Let's make music
# https://towardsdatascience.com/compose-and-play-music-in-r-with-the-rmusic-package-b2afa90761ea
library(Rmusic)
mali_pitch <- paste(
"F F G",
"F D F",
"A A Bb",
"A F A",
"C5 C5 C5",
"D5 A C5",
"A A G",
"D A F",
"C5 C5 C5",
"D5 A C5",
"A A G",
"D A F",
"F F F G",
"F D F",
"A A Bb",
"A F A"
)
mali_duration <- c(
2, 3, 1.3,
2, 2, 5,
2, 3, 1.3,
2, 2, 4,
2, 3, 1.3,
2, 2, 4,
2, 3, 1.3,
2, 2, 4,
2, 3, 1.3,
2, 2, 4,
2, 3, 1.3,
2, 2, 4,
1,1,1.2,.9,
2.2,2,3,
2, 3, 1.3,
2, 2, 4
)
mali_pitch <- strsplit(mali_pitch, " ")[[1]]
Rmusic::play_music(mali_pitch,
mali_duration, tempo = 200)
Rmusic::save_music(mali_pitch, mali_duration,
tempo = 200, output_file = "/Users/patrickkelly/Desktop/MEDIUM/mali2.wav")
|
9bdd1d36a39efe7fac0e0070a5c8e50455c29417 | 9f9c1c69adb6bc2ac097ae368c8ba2f293d01b64 | /man/plot_data.Rd | 461693caf56fdfd831b7b3531d9c6bbd20a1e950 | [] | no_license | izabelabujak/MixSIAR | a8be0e62a1299b6cd365ab0fa9ff315ad8c306f9 | 2b1b2545638fcfaacbcaaa695e2bec436a91806f | refs/heads/master | 2020-08-30T03:45:42.453711 | 2019-08-02T22:02:57 | 2019-08-02T22:02:57 | 218,252,966 | 1 | 0 | null | 2019-10-29T09:43:47 | 2019-10-29T09:43:47 | null | UTF-8 | R | false | true | 1,953 | rd | plot_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_data.R
\name{plot_data}
\alias{plot_data}
\title{Plot biotracer data}
\usage{
plot_data(filename, plot_save_pdf, plot_save_png, mix, source, discr)
}
\arguments{
\item{filename}{name of the plot file(s) to save (e.g. "isospace_plot")}
\item{plot_save_pdf}{T/F, save the plot(s) as a pdf?}
\item{plot_save_png}{T/F, save the plot(s) as a png?}
\item{mix}{output from \code{\link{load_mix_data}}}
\item{source}{output from \code{\link{load_source_data}}}
\item{discr}{output from \code{\link{load_discr_data}}}
}
\description{
\code{plot_data} creates plot(s) of the biotracer data and saves the plot(s)
to file(s) in the working directory. All 3 required data files must have been
loaded by \code{\link{load_mix_data}}, \code{\link{load_source_data}},
and \code{\link{load_discr_data}}. Behavior depends on the number of tracers:
\itemize{
\item 1 tracer: calls \code{\link{plot_data_one_iso}} to create a 1-D plot.
\item 2 tracers: calls \code{\link{plot_data_two_iso}} to create a biplot.
\item >2 tracers: calls \code{\link{plot_data_two_iso}} in a loop to create
biplots for each pairwise combination of biotracers.
}
}
\details{
An important detail is that \code{plot_data_two_iso} and \code{plot_data_one_iso}
plot the raw mix data and \emph{add the TDF to the source data}, since this is
the polygon that the mixing model uses to determine proportions. The plotted
source means are:
\deqn{\mu_source + \mu_discr}
The source error bars are +/- 1 standard deviation, \emph{calculated as a
combination of source and TDF variances:}
\deqn{\sqrt{\sigma^2_source + \sigma^2_discr}}
\code{plot_data} looks for 'C', 'N', 'S', and 'O' in the biotracer column
headers and assumes they are stable isotopes, labeling the axes with, e.g.,
expression(paste(delta^13, "C (u2030)",sep="")).
}
\seealso{
\code{\link{plot_data_two_iso}}, \code{\link{plot_data_one_iso}}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.