blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3277a208a70a815fc401ab911ff060fc22bcff1b | c88b0cbeda0edf9e745e324ef942a504e27d4f87 | /Yerkes/Drew version/make data.R | 6ce7cc64c5f4642ccbcfbdeb92f448fd260e5538 | [] | no_license | Diapadion/R | 5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27 | 1485c43c0e565a947fdc058a1019a74bdd97f265 | refs/heads/master | 2023-05-12T04:21:15.761115 | 2023-04-27T16:26:35 | 2023-04-27T16:26:35 | 28,046,921 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,596 | r | make data.R | ### Don't run this!
### final_data.csv has had post script fixes made
setwd('C:/Users/s1229179/Dropbox/R/blood chemistry/Drew version/')
#setwd('C:/Users/Diapadion/Dropbox/R/blood chemistry/Drew version/')
options(stringsAsFactors = FALSE)
# merge 2 personality datasets together and generate avg item scores for individuals that had 2 raters
persdata1 <- read.csv('personality wdh rater1 4 june 2007.csv')
persdata2 <- read.csv('personality wdh rater2 4 june 2007.csv')
combined_pers <- merge(persdata1, persdata2, by="chimp", all = T)
attach(combined_pers)
combined_pers$fear.z <- apply(cbind(fear.x,fear.y),1,mean,na.rm=TRUE)
combined_pers$pers.z <- (pers.x + pers.y)/2
combined_pers$caut.z <- (caut.x + caut.y)/2
combined_pers$stbl.z <- (stbl.x + stbl.y)/2
combined_pers$aut.z <- (aut.x + aut.y)/2
combined_pers$stngy.z <- (stngy.x + stngy.y)/2
combined_pers$jeals.z <- (jeals.x + jeals.y)/2
combined_pers$reckl.z <- (reckl.x + reckl.y)/2
combined_pers$soc.z <- (soc.x + soc.y)/2
combined_pers$tim.z <- (tim.x + tim.y)/2
combined_pers$symp.z <- (symp.x + symp.y)/2
combined_pers$play.z <- (play.x + play.y)/2
combined_pers$sol.z <- (sol.x + sol.y)/2
combined_pers$actv.z <- (actv.x + actv.y)/2
combined_pers$help.z <- (help.x + help.y)/2
combined_pers$buly.z <- (buly.x + buly.y)/2
combined_pers$aggr.z <- (aggr.x + aggr.y)/2
combined_pers$manp.z <- (manp.x + manp.y)/2
combined_pers$gntl.z <- (gntl.x + gntl.y)/2
combined_pers$affc.z <- (affc.x + affc.y)/2
combined_pers$exct.z <- (exct.x + exct.y)/2
combined_pers$impl.z <- (impl.x + impl.y)/2
combined_pers$inqs.z <- (inqs.x + inqs.y)/2
combined_pers$subm.z <- (subm.x + subm.y)/2
combined_pers$depd.z <- (depd.x + depd.y)/2
combined_pers$irri.z <- (irri.x + irri.y)/2
combined_pers$pred.z <- (pred.x + pred.y)/2
combined_pers$decs.z <- (decs.x + decs.y)/2
combined_pers$depr.z <- (depr.x + depr.y)/2
combined_pers$sens.z <- (sens.x + sens.y)/2
combined_pers$defn.z <- (defn.x + defn.y)/2
combined_pers$intll.z <- (intll.x + intll.y)/2
combined_pers$prot.z <- (prot.x + prot.y)/2
combined_pers$invt.z <- (invt.x + invt.y)/2
combined_pers$clmy.z <- (clmy.x + clmy.y)/2
combined_pers$errc.z <- (errc.x + errc.y)/2
combined_pers$frdy.z <- (frdy.x + frdy.y)/2
combined_pers$lazy.z <- (lazy.x + lazy.y)/2
combined_pers$dsor.z <- (dsor.x + dsor.y)/2
combined_pers$unem.z <- (unem.x + unem.y)/2
combined_pers$imit.z <- (imit.x + imit.y)/2
combined_pers$indp.z <- (indp.x + indp.y)/2
write.csv(combined_pers, 'combined_pers and item scores.csv')
detach(combined_pers)
#here you have to manually go into Excel and create the items.a, which is either the average item rating for an individual or its single rater item score from rater 1.
#would be great if someone could provide code in R to do the step above so we don't have to do this again in the future?
blood <- read.csv("blood chemistry data.csv")
#combine persdata3 with cholesterol, triglyceride, and blood pressure data
persdata3 <- read.csv('combined_pers and item scores.csv')
blood_pers_combined <- merge(blood, persdata3, by="chimp", all = T)
write.csv(blood_pers_combined, 'combinedPersItemBlood1.csv')
blood_pressure <- read.csv("Chimpanzee Metabolic Syndrome Worksheet.csv")
blood_pers_combined1 <- merge(blood_pers_combined, blood_pressure, by="chimp", all=T)
write.csv(blood_pers_combined1, 'combinedPersItemBlood2.csv')
hematology <- read.csv("yerkes hematology data.csv")
final_minus_height <- merge(blood_pers_combined1, hematology, by="chimp", all=T)
write.csv(final_minus_height, 'combinedPersItemBlood3.csv')
height <- read.csv("Chimpanzee heights bak.csv")
final_data <- merge(final_minus_height, height, by="chimp", all=T)
#write.csv(final_data, 'C:/Users/s1229179/Dropbox/R/blood chemistry/Drew version/final_data.csv')
#write.csv(final_data, 'C:/Users/Diapadion/Dropbox/R/blood chemistry/Drew version/final_data.csv')
#-------#
# recompute aggregate personality scores
# compare_data$chimp_Dom_CZ <-
# (Dominant-Submissive-Dependent.Follower+Independent-Fearful+Decisive-Timid-Cautious+
# Intelligent+Persistent+Bullying+Stingy.Greedy+40)/12
#
# compare_data$chimp_Ext_CZ <-
# (-Solitary-Lazy+Active+Playful+Sociable-Depressed+Friendly+Affectionate+Imitative+24)/9
#
# compare_data$chimp_Con_CZ <-
# (-Impulsive-Defiant-Reckless-Erratic-Irritable+Predictable-Aggressive-Jealous-Disorganized+64)/9
#
# compare_data$chimp_Agr_CZ <-
# (Sympathetic+Helpful+Sensitive+Protective+Gentle)/5
#
# compare_data$chimp_Neu_CZ <-
# (-Stable+Excitable-Unemotional+16)/3
#
# compare_data$chimp_Opn_CZ <-
# (Inquisitive+Inventive)/2
#
#
|
4c5c51243e62d6579ea00ef85b3eb0a7a1d13758 | e3c9a5bec7c4a595506b5181848628187126aca0 | /man/ndex_update_network.Rd | 9361990be90d0a09ee30864040bcad02f03a2139 | [
"CC-BY-NC-4.0",
"BSD-3-Clause"
] | permissive | frankkramer-lab/ndexr | d1d26a9abeb4e688319d8fd78cc0f1b15c084bbc | 949bffcc7f89ca319925d1c752be26ca94abef33 | refs/heads/master | 2023-04-06T15:42:35.464578 | 2023-03-23T09:57:09 | 2023-03-23T09:57:09 | 68,606,558 | 8 | 2 | BSD-3-Clause | 2023-03-23T10:08:03 | 2016-09-19T13:04:57 | R | UTF-8 | R | false | true | 1,636 | rd | ndex_update_network.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ndex_networks_simple_operations.r
\name{ndex_update_network}
\alias{ndex_update_network}
\title{Update an Entire Network as CX}
\usage{
ndex_update_network(ndexcon, rcx, networkId)
}
\arguments{
\item{ndexcon}{object of class NDExConnection link{ndex_connect}}
\item{rcx}{\code{\link[RCX]{RCX-object}} object}
\item{networkId}{(optional); unique ID of the network}
}
\value{
UUID of the updated network
}
\description{
\strong{Note: In future `ndexr` uses the \link[RCX]{RCX-object} from the corresponding package to handle the networks!}
}
\details{
This method updates/replaces a existing network on the NDEx server with new content from the given RCX object.
The UUID can either be specified manually or it will be extracted from the RCX object (i.e. from rcx$ndexStatus$externalId).
}
\note{
Requires an authorized user! (ndex_connect with credentials)
Compatible to NDEx server version 1.3 and 2.0
}
\section{REST query}{
PUT (multipart/form-data): ndex_config$api$network$update$url
data: CXNetworkStream = data
}
\examples{
## Establish a server connections with credentials
# ndexcon = ndex_connect('MyAccountName', 'MyPassword')
## Find one of your networks and get its UUID
# networks = ndex_find_networks(ndexcon, accountName='MyAccountName')
# networkId = networks[1,"externalId"]
## Get the network data
# rcx = ndex_get_network(ndexcon, networkId)
## Do some changes to rcx..
## and update the network
# networkId = ndex_update_network(ndexcon, rcx, networkId)
# networkId = ndex_update_network(ndexcon, rcx) ## same as previous
NULL
}
|
5468002612bbde3c2c5170b6bf43b21bbefd07b9 | 9777d178fb0545428077345b06bce98605a4bd1c | /EconAnalysis/R/HRQoL_cea.R | da8ecbb61673e6418c8658f48f604fe4d1ce0078 | [] | no_license | ejanderson1/EconAnalysis | c9ff12997a9044d1ae1f90e7c743fd1f7ed0f065 | 79cf171d0d752316c1427d79507c856b5a776dba | refs/heads/master | 2021-01-25T08:19:30.621315 | 2017-07-12T18:47:20 | 2017-07-12T18:47:20 | 93,753,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,311 | r | HRQoL_cea.R | #' @title Health Related Quality of Life Module (HRQoL)
#'
#' @description Calculates the total HRQoL accumulated from
#' person-time spent in each relevant health state,
#' with a final output of total quality-adjusted
#' life years (QALYs)
#'
#' @return This function returns an object with total QALYs accumulated
#' from person-time spent in each health state over the duration of
#' the model
#'
#' @details
#' HRQoL is calculated by multiplying the estimated utility for a
#' particular health state by the amount of person-time spent in that health
#' state. Total QALYs are then calcultated by summing accross the HRQoL for
#' all relevant health states.
#'
#' @keywords CEA
#'
#' @export
#'
load("EconAnalysis/EpiModel SIMS/sim.n3001.rda")
source("EconAnalysis/R/params_cea.r")
# General QALY calculation - Sum states separately (one at a time)
qaly_calc <- function(utility, pt_state) {
qaly_state <- utility * pt_state
return(qaly_state)
}
# Calculate QALYs for each state and sum to get total QALYs
total_qaly_calc <- function(util_vec, pt_vec) {
qaly_vec <- util_vec * pt_vec
total_qaly <- sum(qaly_vec[1:length(qaly_vec)])
return(total_qaly)
}
# Function specific to EpiModel STI project
HIV_qaly_em <- function(HIVneg.util,
acute.undx.util,
acute.dx.util,
early.chron.undx.util,
early.chron.dx.yr1.util,
early.chron.dx.postyr1.util,
early.chron.art.util,
late.chron.undx.util,
late.chron.dx.util,
late.chron.art.util,
aids.undx.util,
aids.dx.util,
aids.art.util,
time.hivneg,
stage.time.ar.ndx,
stage.time.af.ndx,
stage.time.ar.dx,
stage.time.af.dx,
stage.time.early.chronic.ndx,
stage.time.early.chronic.dx.yrone,
stage.time.early.chronic.dx.yrstwotolate,
stage.time.early.chronic.art,
stage.time.late.chronic.ndx,
stage.time.late.chronic.dx,
stage.time.late.chronic.art,
stage.time.aids.ndx,
stage.time.aids.dx,
stage.time.aids.art,
...) {
# Calculate QALYs for each health state
hivneg.qaly <- HIVneg.util * time.hivneg
acute.undx.qaly <- acute.undx.util * (stage.time.ar.ndx + stage.time.af.ndx)
acute.dx.qaly <- acute.dx.util * (stage.time.ar.dx + stage.time.af.dx)
early.chron.undx.qaly <- early.chron.undx.util * stage.time.early.chronic.ndx
early.chron.dx.yr1.qaly <- early.chron.dx.yr1.util *
stage.time.early.chronic.dx.yrone
early.chron.dx.postyr1.qaly <- early.chron.dx.postyr1.util *
stage.time.early.chronic.dx.yrstwotolate
early.chron.art.qaly <- early.chron.art.util * stage.time.early.chronic.art
late.chron.undx.qaly <- late.chron.undx.util * stage.time.late.chronic.ndx
late.chron.dx.qaly <- late.chron.dx.util * stage.time.late.chronic.dx
late.chron.art.qaly <- late.chron.art.util * stage.time.late.chronic.art
aids.undx.qaly <- aids.undx.util * stage.time.aids.ndx
aids.dx.qaly <- aids.dx.util * stage.time.aids.dx
aids.art.qaly <- aids.art.util * stage.time.aids.art
# Calculate total QALYs
total.qalys <- sum(hivneg.qaly, acute.undx.qaly, acute.dx.qaly,
early.chron.undx.qaly, early.chron.dx.yr1.qaly,
early.chron.dx.postyr1.qaly, early.chron.art.qaly,
late.chron.undx.qaly, late.chron.dx.qaly,
late.chron.art.qaly, aids.undx.qaly, aids.dx.qaly,
aids.art.qaly)
return(list(hivneg.qaly, acute.undx.qaly, acute.dx.qaly,
early.chron.undx.qaly, early.chron.dx.yr1.qaly,
early.chron.dx.postyr1.qaly, early.chron.art.qaly,
late.chron.undx.qaly, late.chron.dx.qaly, late.chron.art.qaly,
aids.undx.qaly, aids.dx.qaly, aids.art.qaly, total.qalys))
}
|
02ae4bc67f5cefaa38f7fac2c7ebef9d3f2c17af | 143881b207e34ff3531b04797b5624e772b89313 | /proyecto3.R | c20bc240845d06ea5bfb99025a7c5babf2535627 | [] | no_license | seherrera/Proyecto3 | d6f113327ab5761a320fb02e48e4a6060a67b744 | 0fbb2fd65cbd2dcbff25a3a510e28e398862e8a8 | refs/heads/main | 2023-06-11T02:21:41.857685 | 2021-06-25T17:20:34 | 2021-06-25T17:20:34 | 380,308,801 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 7,658 | r | proyecto3.R | # Análisis de deportes de resistencia
#librerias
```{r librerias}
library(tidyverse)
library(tidymodels)
library(pROC)
library(e1071)
library(dplyr)
library(cluster)
```
## Cargar datos
el data frame consta de 167615 datos con 17 variables los cuales seran almacenados en data.
```{r cargando datos}
getwd()
setwd("D:/U/mineria de datos")
data <- readRDS("endurance.rds")
summary(data)
```
## Limpieza de datos
Antes de evaluar los datos hay que hacer una limpieza de estos.eliminaremos las variables id,athlete, device_name, Start_date_local, Records y has_heartrate
```{r limpiando datos}
data$id <- NULL
data$athlete <- NULL
data$device_name <- NULL
data$start_date_local <- NULL
data$records <- NULL
data$has_heartrate <- NULL
```
existen variables tipo character que hay que cambiar a numéricas.utilizaremos un buleano para definir como 1 las actividades en bicicleta y 0 las que son a pie.
```{r pre procesamiento}
data$elev_low <- as.numeric(data$elev_low)
data$elev_high <- as.numeric(data$elev_high)
data$max_speed <- as.numeric(data$max_speed)
data$average_speed <- as.numeric(data$average_speed)
data$type_code <- (data$type == "Ride" | data$type == "EBikeRide") %>% as.numeric()
data$type <- NULL
```
a continuación buscaremos los NA
```{r NAs}
# Para las observaciones incompletas, le asignamos el valor NA para eliminarlos en el siguiente paso
data[data == ""] <- NA
# Verificamos donde hay valores NAs
data %>%
summarise_all(funs(sum(is.na(.))))
# Eliminamos todas las observaciones que presenten NA
data_pre <- data %>%
filter(!(is.na(calories)|is.na(elev_low)|is.na(elev_high)|is.na(total_elevation_gain)))
# Corroboramos que no queden datos NA
data_pre %>%
summarise_all(funs(sum(is.na(.))))
```
Ademas se puede ver que existen entradas en distance con valores = 0 e igual lo haremos con los demas datos
```{r eliminar distancias 0}
data_pre$distance[data_pre$distance == 0] <- NA
data_pre$calories[data_pre$calories == 0] <- NA
data_pre$elev_high[data_pre$elev_high == 0] <- NA
data_pre$elev_low[data_pre$elev_low == 0] <- NA
data_pre$total_elevation_gain[data_pre$total_elevation_gain == 0] <- NA
data_pre$moving_time[data_pre$moving_time == 0] <- NA
data_pre$elapsed_time[data_pre$elapsed_time == 0] <- NA
data_pre$average_speed[data_pre$average_speed == 0] <- NA
data_pre <- data_pre %>% filter(!(is.na(distance)|is.na(calories)|is.na(elev_high)|is.na(elev_low)|is.na(total_elevation_gain)|is.na(moving_time)|is.na(elapsed_time)|is.na(average_speed)|is.na(type_code)))
```
como sabemos que existen datos atipicos realizaremos boxplot para cada una de las variables.
```{r boxplot }
calories=boxplot(data$calories, horizontal = TRUE)
distance=boxplot(data$distance, horizontal = TRUE)
elev_low=boxplot(data$elev_low, horizontal = TRUE)
elev_high=boxplot(data$elev_high, horizontal = TRUE)
moving_time=boxplot(data$moving_time, horizontal = TRUE)
max_speed=boxplot(data$max_speed, horizontal = TRUE)
elapsed_time=boxplot(data$elapsed_time, horizontal = TRUE)
average_speed=boxplot(data$average_speed, horizontal = TRUE)
total_elevation_gain=boxplot(data$total_elevation_gain, horizontal = TRUE)
```
Los gráficos de boxplot comprueban lo mencionado acerca de la presencia de datos atípicos en cada una de las variables, por lo que se aplicarán filtros a cada una de estas para así eliminar las data que sean anormales.
```{r eliminando datos atipicos}
data_pre <- filter(data ,data$calories < 2000)
data_pre <- filter(data ,data$distance < 50000)
data_pre <- filter(data ,data$elev_low < 2000)
data_pre <- filter(data ,data$elev_low > -1000)
data_pre <- filter(data ,data$elev_high < 5000)
data_pre <- filter(data ,data$moving_time < 25000)
data_pre <- filter(data ,data$max_speed < 60)
data_pre <- filter(data ,data$elapsed_time < 15000)
data_pre <- filter(data ,data$average_speed < 30)
data_pre <- filter(data ,data$total_elevation_gain < 2000)
```
borramos ......
#escalamiento
```{r escalamiento}
set.seed(500)
library(rsample)
data_scal <-data.frame(scale(data_pre[0:9]))
data_scal <- cbind(data_scal,data_pre[10])
data_scal$type_code <- data_scal$type_code %>% as.factor()
datasplit <- initial_split(data_scal,prop= 0.8, strata = NULL)
data_train <- training(datasplit)
data_test <- testing(datasplit)
data_train2 <- training(datasplit)
data_test2 <- testing(datasplit)
```
#regresion multiple
utilizaremos una regresion multiple para ver cuales son las variables mas relevantes.
```{r reg}
library(regclass)
reg_mult <- lm(type_code %>% as.numeric() ~ calories + distance + elev_low +
elev_high + max_speed + moving_time + elapsed_time +
average_speed + total_elevation_gain, data = data_scal)
summary(reg_mult)
VIF(reg_mult)
```
los resultados de la regresion multiple nos muestra un coeficiente de determinacion de un 40% y se puede ver que existen variables que no son tan significativas
como elev_low, elev_hig, elapsed_time
# Modelo Naive Bayes
utilizaremos el método Naive Bayes
```{r naive bayes modelo}
DP_model <- naiveBayes(type_code ~calories + distance + elev_low +
elev_high + max_speed + moving_time + elapsed_time +
average_speed + total_elevation_gain,
data = data_train)
```
Luego corresponde evaluar el modelo, obtener su curva ROC y posteriormente su AUC.
```{r naive bayes predictions}
PredictionModel <- predict(DP_model, newdata = data_test, type = "class")
data_test$Predclass <- PredictionModel
curvaROC <- roc(data_test$type_code %>% as.numeric(), data_test$Predclass %>% as.numeric())
plot(curvaROC)
```
```{r naive bayes roc auc}
auc(curvaROC)
data_test %>%
conf_mat(type_code, Predclass) %>%
autoplot(type = "heatmap")
```
el modelo nos entrega un AUC bastante bajo con un valor de de 74.29%
por esto buscaremos otro modelo
#arbol de decisión
```{r receta}
receta <-
recipe(type_code ~ ., data = data_train2)
receta
```
ahora procedemos a crear nuestro modelo de arbol de decision con 5 capas de decision, y un minimo numero de entides por hoja de 10
```{r modelo_trees}
library(rpart)
modelo_tree <- decision_tree(tree_depth = 5, min_n = 10) %>%
set_engine("rpart") %>%
set_mode("classification")
modelo_tree
```
```{r AUC modelo arbol de decisión}
fit_mod <- function(mod){
modelo_fit <-
workflow() %>%
add_model(mod) %>%
add_recipe(receta) %>%
fit(data = data_train2)
model_pred <-
predict(modelo_fit, data_test2, type = "prob") %>%
bind_cols(data_test2)
return(model_pred %>%
roc_auc(truth = type_code, .pred_0))
}
fit_mod(modelo_tree)
```
el modelo nos entrega un AUC bastante alto con un valor de de 92.2% por lo que lo utilizaremos para extraer nuestra data. de esto extraeremos la data que nos explicara la probabilidad de que pertenezca al grupo 0 o 1
```{r extraccion de la data del arbol de decision}
modelo_fit <-
workflow() %>%
add_model(modelo_tree) %>%
add_recipe(receta) %>%
fit(data = data_train2)
model_pred <-
predict(modelo_fit, data_test2, type = "prob") %>%
bind_cols(data_test2)
```
al ya tener asignada las probabilidades de que la actividad pertenezca a cada grupo veremos a cual grupo corresonde esto comparando las probabilidades dejando la mayor entre 0 y 1 en su grupo correspondiente
```{r asignacion categoria }
data_test2$prediccion <- ifelse(model_pred$.pred_0 >= model_pred$.pred_1, 0, 1)
```
```{r seleccion de data mal clasificada}
data_error <- data_test2 %>% filter(type_code != prediccion)
nrow(data_error)
```
nos dieron un total de 2284 actividades que estaban mal etiquetadas segun nuestro modelo. |
e4a6e86a75a8ac0b46e5b37a0f808ac24cc7422a | 639eed947fe7315626bf13a1b9391229b9e9c22f | /figures01/carsPriceVsWeight/carsPriceVsWeight.R | 79cb0dac26444e8f6b45ff273fde93cc93a6f9a7 | [
"CC0-1.0"
] | permissive | Cstats-learner/statistics | 8bcdcbb92239f75a58cbe691b4e05484a422cda9 | dc75b9de0c331acee8d4ea8d6fce33c3b4ed897d | refs/heads/master | 2022-12-25T02:42:49.088456 | 2020-09-30T18:50:49 | 2020-09-30T18:50:49 | 300,021,890 | 0 | 0 | CC0-1.0 | 2020-09-30T18:50:50 | 2020-09-30T18:44:24 | HTML | UTF-8 | R | false | false | 530 | r | carsPriceVsWeight.R | library(openintro)
data(cars)
data(COL)
myPDF("carsPriceVsWeight.pdf", 5, 3.3, mar=c(3.6,3.6,1,1), mgp=c(2.5,0.7,0))
plot(cars$weight, cars$price, xlab='Weight (Pounds)', ylab='Price ($1000s)', pch=19, col=COL[1,2], ylim=c(0, max(cars$price)))
w <- seq(1000, 5000, 100)
# Rough Model
g1 <- lm(price ~ weight, cars, weights=1/weight^2)
g2 <- lm(price ~ weight + I(weight^2), cars, weights=1/weight^2)
p <- predict(g2, data.frame(weight=w))
lines(w, p, lty=2, col=COL[5,3])
dev.off()
#anova(g1, g2)
#hist(cooks.distance(g2))
|
3d5d0797427b6907479e3aed8656ff6c16b89c95 | 22b6f9c00d3d00be67f8d960b807f3a51cb4a63a | /man/store_micro.Rd | 0e4437048b861002888e58db011b668e964211f5 | [
"MIT"
] | permissive | llrs/integration-helper | 244fa55cd7ae4e33cea54248b9536851bb3c415b | bd6b13bb125ea63e78fb57336adca9f9cbb88bfb | refs/heads/master | 2021-09-23T21:18:08.426431 | 2021-09-21T08:53:54 | 2021-09-21T08:53:54 | 141,977,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 360 | rd | store_micro.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlations.R
\name{store_micro}
\alias{store_micro}
\title{Store result by microorganism}
\usage{
store_micro(x, o2, label)
}
\arguments{
\item{x}{Name of microorganism}
\item{o2}{Filter samples}
\item{label}{Label for the files}
}
\description{
Store result by microorganism
}
|
77e0ec2b09cc2aafafb8a9df23887040a3036568 | 0000272576e5af458ea4c7b5beb5ce9a5aeb8351 | /AutoDetectionExocytosis_JCB_current/Analysis_rfiles/fit_fusion_exp_line.R | 18b8ebd0aae79f51dfc18e85d53ff957dc138066 | [] | no_license | GuptonLab/Automated-Exocytosis-Detection | f0a2c2469863784fb404e3b8d1e1958c3e7f2479 | 97751721e2ea7ae1a8c14f72bbc5ae590dd70d4d | refs/heads/master | 2021-06-05T03:46:17.665003 | 2020-08-19T14:39:07 | 2020-08-19T14:39:07 | 115,131,614 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,256 | r | fit_fusion_exp_line.R |
#You have to individually change the file paths for this piece
#code. I had to fidget a bit with it and dropped the automation, and for time this is the non-automated.
#This is the final code and your output file name should be/contain the detected events.
file_path_name = "E:/Libraries/Documents/SCIENCE_GUPTON/TESTING/DataFiles/Stream_4-1_PreProcessed_fluorescence_traces.csv"
output_file_name = "E:/Libraries/Documents/SCIENCE_GUPTON/TESTING/DataFiles/Stream_4_fusion_stats.csv"
#read in the file
wt1_fin <- read.table(file=file_path_name, header=T, sep=",")
#separate out the data portion
data_only <- wt1_fin[c(24:55)]
background <- wt1_fin[c(5:23)]
#separate out the centroid,time point, and length of track
frame_obj <- wt1_fin[c(3:4)]
#transpose the matrixes back and forth to turn it into a matrix data type
frame_obj = t(frame_obj)
frame_obj = t(frame_obj)
data_only = t(data_only)
data_only = t(data_only)
background <- t(background)
background <- t(background)
for (i in 1:length(data_only[,1])){
mean_bac = mean(background[i,])
for (f in 1:length(data_only[i,])){
data_only[i,f] = (data_only[i,f] - mean_bac)/mean_bac
}
}
#Taking the centroid x,y values
centroid <- wt1_fin[c(1:2)]
#transform the data into a different data type
centroid <- t(centroid)
centroid <- t(centroid)
#Set up the time points in seconds
x_time = seq(0.1,3.2, by = 0.1)
#Add a non-negative term for log transform
data_only = data_only + 10
#creat the vector for the R-square, half_life, and max intensity
r_sq <- vector()
half_life <- vector()
max_int <- vector()
print(length(data_only[,1]))
for (i in 1:length(data_only[,1])){
max_int[i] <- max(data_only[i,])
if(max(data_only[i,]) > 10.1){
dat <- data_only[i,]
exponential.model <- lm(dat~ log(x_time))
coef <- summary(exponential.model)$coefficients
half_life[i] <- -coef[2]
r_sq[i] <- summary(exponential.model)$r.squared
}else{
half_life[i] = NA
r_sq[i] = NA
}
}
tea = rbind(r_sq,half_life)
tea = t(tea)
tea = cbind(centroid,tea)
tea = cbind(frame_obj,tea)
max_int = max_int - 10
tea = cbind(max_int,tea)
full_fusion <- na.omit(tea)
write.csv(full_fusion,file = output_file_name)
|
46058d8cc9599a502020317c4b06b80984a772bb | 9e059240cb494f4b43868fac4f6c0416411139a1 | /man/Coordinate_Covnerter6.Rd | e5fd03aa6be17ad6a6de4a26b985292eb4597ec6 | [] | no_license | kingyang728/genomicCoordinateConverter | b161448b4d595069aa30d12f6ea1921c661ef583 | 66ac990688137b89c8431b3c91e37adfc572f5fb | refs/heads/master | 2022-10-24T05:09:52.679835 | 2020-06-16T08:56:52 | 2020-06-16T08:56:52 | 272,298,011 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 922 | rd | Coordinate_Covnerter6.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Coordinate_Converter.R
\name{Coordinate_Covnerter6}
\alias{Coordinate_Covnerter6}
\title{Coordinate Converter 6}
\usage{
Coordinate_Covnerter6(
chromosome,
genomic_start,
genomic_end,
genomeVersion,
targetGenomeVersion
)
}
\arguments{
\item{chromosome}{sequence name like "chrX"}
\item{genomic_start}{genomic start position}
\item{genomic_end}{genomic end position}
\item{genomeVersion}{original genome version}
\item{targetGenomeVersion}{target genome version which want to transfer}
}
\value{
dataframe which contians corresponding converted target genome coordinate.
}
\description{
This function take genomic location, orginal and target genome version as input and convert these to a dataframe which contains
corresponding genomic position.
}
\examples{
DF6 <- Coordinate_Covnerter6("chrX",45060024,45060024,"hg16","hg18")
}
|
55d457d033ab1332f49c0ad0d5fa10353fa728ee | f58a1c8b5043afb99cfcb386dbddc30fb43becf5 | /tests/doRUnit.R | d481b74b95480dd931e0a56c3e7a0d548bf44993 | [] | no_license | MassBank/RMassBank | b00fe69f7c0026c13b7fe337522ceaa469ee8340 | 3b61006a1a4bac9c94e780ad82834a1dae9ce417 | refs/heads/main | 2023-08-16T18:02:42.074467 | 2023-05-04T09:51:53 | 2023-05-04T09:51:53 | 9,348,686 | 12 | 15 | null | 2023-07-28T09:20:16 | 2013-04-10T15:01:01 | R | UTF-8 | R | false | false | 956 | r | doRUnit.R | #### doRUnit.R --- Run RUnit tests
####------------------------------------------------------------------------
### Structure borrowed from rcppgls:
### https://github.com/eddelbuettel/rcppgsl/blob/master/tests/doRUnit.R
if(require("RUnit", quietly = TRUE)) {
if(require("RMassBankData", quietly = TRUE) && !(compareVersion(installed.packages()["RMassBankData","Version"],"1.99.0") == -1)) {
pkg <- "RMassBank"
print("Starting tests")
require(pkg, character.only=TRUE)
path <- system.file("unitTests", package = pkg)
stopifnot(file.exists(path), file.info(path.expand(path))$isdir)
source(file.path(path, "runTests.R"), echo = TRUE)
} else {
## Taking this message out until the new RMassBankData is on bioc, just to avoid confusion.
# message("Package RMassBankData with version > 1.99 not available, cannot run unit tests")
}
} else {
message("Package RUnit not available, cannot run unit tests")
}
|
8af53100368f66b60e100df7d9052328819c36fb | 6832cf147e551caf2ab219357549b1af2584c3fb | /TN_SoIB_list.R | 500c1aafac39f20be8e4f9007967482dbe048904 | [] | no_license | ashwinv2005/analyses-for-policy-eBird-SoIB | 0561a6ee5a7923d721f6bc9be76f667a1301c2bb | ca8bf423f000366e72b48fd54dbf86f0e6904024 | refs/heads/master | 2022-07-19T09:16:44.114826 | 2020-05-28T19:11:16 | 2020-05-28T19:11:16 | 261,872,950 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,343 | r | TN_SoIB_list.R | ##################################
load("data.RData")
load("maps.RData")
load("clips.RData")
require(tidyverse)
require(rgdal)
require(sp)
require(sf)
require(rgeos)
require(extrafont)
soib = read.csv("stateofindiasbirdsfull.csv")
map = read.csv("Map to Other Lists - map.csv")
map1 = map %>%
filter(!eBird.English.Name.2018 %in% c("Sykes's Short-toed Lark","Green Warbler","Sykes's Warbler",
"Taiga Flycatcher","Chestnut Munia","Desert Whitethroat",
"Hume's Whitethroat","Changeable Hawk-Eagle")) %>%
dplyr::select(eBird.English.Name.2019,India.Checklist.Name)
soib = left_join(soib,map1,by = c("Common.Name" = "India.Checklist.Name"))
soib = soib %>% select(eBird.English.Name.2019,Concern.Status)
data = left_join(data,soib,by = c("COMMON.NAME" = "eBird.English.Name.2019"))
tn = data %>% filter(ST_NM == "Tamil Nadu")
tnsp = tn %>% filter(CATEGORY %in% c("species","issf")) %>% distinct(COMMON.NAME,Concern.Status)
clem = read.csv("eBird-Clements-v2019-integrated-checklist-August-2019.csv")
tnsp = left_join(tnsp,clem,by = c("COMMON.NAME" = "English.name"))
tnsp = tnsp %>% select(scientific.name,Concern.Status)
require(splitstackshape)
require(magrittr)
require(reshape2)
#listmap <- read.delim2("Checklist Mapper.csv", sep=',')
dat <- read.delim("ebird_IN__1900_2020_1_12_barchart.txt",
na.strings = c("NA", "", "null"),
as.is=TRUE,
sep="\t",
header = FALSE,
quote="")
# Extract Sample Size into an array
sample_size <- dat[4,][2:49]
colnames(sample_size) <- 1:48
# Remove first four rows that has no data
dat <- dat[-c(1,2,3,4),]
# Split the species name
dat <- cSplit(dat, 'V1', sep="=", type.convert=FALSE)
colnames(dat) <- c(1:49,"COMMON.NAME","SCIENTIFIC.NAME")
# Clean the species name
dat <- dat %>%
within (COMMON.NAME <- substr(COMMON.NAME,1,nchar(COMMON.NAME)-11)) %>%
within (SCIENTIFIC.NAME <- substr(SCIENTIFIC.NAME,1,nchar(SCIENTIFIC.NAME)-6)) %>%
within (SCIENTIFIC.NAME <- substr(SCIENTIFIC.NAME,7,nchar(SCIENTIFIC.NAME)))
tnsp = left_join(tnsp,dat,by = c("scientific.name" = "SCIENTIFIC.NAME"))
tnsp = tnsp %>% select(COMMON.NAME,scientific.name,Concern.Status)
write.csv(tnsp,"TN_SoIB_species.csv",row.names=F)
|
4fdc2c3630e122b7d8dee27d4bb4f59f90780f14 | fb1be7d45fa43eea1c6c455fd2cb8bd25e49bccb | /man/PSU_strat.Rd | 2235eca30e3e1a0fdee0f293e65c69912169d7ab | [] | no_license | cran/R2BEAT | c0ada01d1b238fd6dd2cea3b678bc2e9b19b6690 | 2dce446a7971399e1a1eb131930bc9d1d54b3be3 | refs/heads/master | 2023-06-17T21:51:57.676157 | 2023-05-25T09:20:06 | 2023-05-25T09:20:06 | 236,877,643 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 850 | rd | PSU_strat.Rd | \name{PSU_strat}
\Rdversion{1.0}
\alias{PSU_strat}
\docType{data}
\title{
Information on Primary Stage Units (PSUs) stratification
}
\description{
Example data frame containing information on Primary Stage Units (PSUs) stratification.
}
\usage{
data(beat.example)
}
\format{
The PSU_strat data frame contains a row for each Primary Stage Units (PSUs) with the following variables:
\describe{
\item{STRATUM}{
Identifier of the stratum (numeric)
}
\item{PSU_MOS}{
Measure of size of the primary stage unit (numeric)
}
\item{PSU_ID}{
Identifier of the primary stage unit (numeric)
}
}
}
\details{
Note: the names of the variables must be the ones indicated above.
}
\examples{
\dontrun{
# Load example data
data(beat.example)
PSU_strat
str(PSU_strat)
}
}
\keyword{datasets}
|
f0084efc80a1c9015adb4e3ffede926d9f625c5f | 82ebc73dc6869319a2bf47f57ac41fe306dc12f2 | /R/calcSig.R | 6a94f26ec6bcb71211426f82ae542d67b4843c44 | [] | no_license | Sage-Bionetworks/snm | d5418b0f989089e342f7505756c2c304101f80c9 | 1ef4124d2819577b4428ffd13a25c443f71cf916 | refs/heads/master | 2021-01-01T18:49:14.757942 | 2012-08-24T22:30:22 | 2012-08-24T22:30:22 | 5,533,556 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,038 | r | calcSig.R |
calcSig <-
function(Y=NULL, bio.var=NULL, adj.var=NULL, df.full=NULL, df.null=NULL) {
if(!is.matrix(Y)) {
stop("Y must be a matrix.")
}
if(is.null(adj.var)) {
adj.var = cbind(rep(1,ncol(Y)))
} else if(!is.matrix(adj.var)) {
stop("adj.var must be a matrix.")
}
if(is.null(bio.var)) {
stop("bio.var must be supplied by the user (and cannot be an intercept term only).")
} else if(!is.matrix(bio.var)) {
stop("bio.var must be a matrix.")
}
if(!all(nrow(bio.var)==nrow(adj.var), nrow(bio.var)==ncol(Y))) {
stop("The dimensions of Y, bio.var, and adj.var are incompatible. Please correct these objects.")
}
if(!all(is.matrix(bio.var), is.matrix(adj.var), is.matrix(Y))) {
stop("bio.var, adj.var, and Y must all be matrices.")
}
if(!is.null(bio.var)) {
bio.var = cbind(bio.var[,!apply(bio.var, 2, function(x) {length(unique(x))==1})])
if(ncol(bio.var)==0) {
stop("bio.var must have at least one column (that is not an intercept term).")
}
}
adj.var = cbind(rep(1,ncol(Y)), adj.var[,!apply(adj.var, 2, function(x) {length(unique(x))==1})])
inf.obj = list()
inf.obj$n.arrays = ncol(Y)
inf.obj$n.probes = nrow(Y)
inf.obj$bio.var = bio.var
inf.obj$adj.var = adj.var
if(is.null(df.full)) {
inf.obj$df.full = ncol(bio.var) + ncol(adj.var)
}else{
inf.obj$df.full = df.full
}
if(is.null(df.null)) {
inf.obj$df.null = ncol(adj.var)
}else{
inf.obj$df.null = df.null
}
inf.obj$dat = Y
class(inf.obj) = "edge"
obs.fit <- edge.fit(inf.obj, odp=FALSE)
obs.stat <- edge.glr(obs.fit, df1=inf.obj$df.full, df0=inf.obj$df.null, norm.pval=TRUE)
inf.obj$cfs <- t(obs.fit$c1)
inf.obj$pval <- obs.stat$pval;
tmp <- try(edge.qvalue(obs.stat$pval))
if(class(tmp) == "try-error"){
inf.obj$pi0 <- NA
inf.obj$qvalues <- NA
}else{
inf.obj$pi0 <- tmp$pi0
inf.obj$qvalues <- tmp$qvalues
}
inf.obj$total.ssq = sum(sweep(Y,1,rowMeans(Y))^2)
inf.obj$full.resid.ssq <- obs.stat$s.rss1
inf.obj$red.resid.ssq <- obs.stat$s.rss0
inf.obj$stat <- obs.stat$stat
inf.obj$dat = NULL
inf.obj
}
|
9bdd1e5feb00648936af5d516e16149a8527fa1c | 99db43ee4d48db933031e6765d17a25287c20e13 | /Point-raster specific extent.R | 05fed110579f7b4ff98d8537342be80f87c59503 | [] | no_license | FrankGuldstrand/point-into-raster | 035f38cd5e7118e9601b9c68dd46711a42c31eae | 35023c23fb1ccfecd9b01d6e0bb626fc76cab677 | refs/heads/master | 2021-11-03T13:14:41.749566 | 2019-04-26T08:55:10 | 2019-04-26T08:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,157 | r | Point-raster specific extent.R | pointtorasnter=function(df=df,shapeData=Lux_shapeData){
spg = na.omit(df)
# coerce to SpatialPixelsDataFrame
coordinates(spg) = ~ x + y
proj4string(spg) = CRS("+proj=longlat +ellps=WGS84")
#extend the SpatialPixelsDataFrame into the regular box
spg@bbox=shapeData@bbox
# Create an empty grid where n is the total number of cells
grd = as.data.frame(spsample(spg, "regular", n=50000))
names(grd) = c("X", "Y")
coordinates(grd) = c("X", "Y")
gridded(grd) = TRUE # Create SpatialPixel object
fullgrid(grd) = TRUE # Create SpatialGrid object
# Add P's projection information to the empty grid
proj4string(grd) = CRS("+proj=longlat +ellps=WGS84")
# Interpolate the grid cells using a power value of 2 (idp=2.0)
P.idw = gstat::idw(num ~ 1, spg, newdata=grd, idp=2.0)
# Convert to raster object then clip to shapefile
r = raster(P.idw)
r.m = mask(r, shapeData)
#extend the SpatialPixelsDataFrame into the regular box
extent(r.m)=extent(shapeData@bbox[1], shapeData@bbox[3],
shapeData@bbox[2], shapeData@bbox[4])
return(r.m)
}
|
38d2d79feb4c77cfb82a8a60e876857cf5993873 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/OCA/examples/Risk.Rd.R | b9f3da5e11b946d7ff47787b2dfc2de721223cb9 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 824 | r | Risk.Rd.R | library(OCA)
### Name: Risk
### Title: Risk measures suchs as Value at Risk (VaR) and Expected
### Shortfall (ES) with normal and t-student distributions.
### Aliases: Risk
### Keywords: manip
### ** Examples
# Reproducing Table 2.1 in page 47 of
# McNeal A., Frey R. and Embrechts P (2005).
alpha <- c(.90, .95, .975, .99, .995)
(Risk(Loss=1, varcov=(0.2/sqrt(250))^2, alpha=alpha,
measure='both', model='both', df=4)-1)*10000
# only VaR results
(Risk(Loss=1, varcov=(0.2/sqrt(250))^2, alpha=alpha,
measure='VaR', model='both', df=4)-1)*10000
# only normal VaR results
(Risk(Loss=1, varcov=(0.2/sqrt(250))^2, alpha=alpha)-1)*10000
# only SE based on a 4 degrees t-student.
(Risk(Loss=1, varcov=(0.2/sqrt(250))^2, alpha=alpha,
measure='ES', model='t-student', df=4)-1)*10000
|
62a3c13fd5b06bf7938fba1a6d372ba5e9c8eeeb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/paramlink/examples/transferMarkerdata.Rd.R | 952f66f80e5231ccc83d43e584370018020c9968 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 388 | r | transferMarkerdata.Rd.R | library(paramlink)
### Name: transferMarkerdata
### Title: Transfer marker data
### Aliases: transferMarkerdata
### ** Examples
x = list(singleton(id=5), nuclearPed(noffs=2))
x = markerSim(x, N=5, alleles=1:5, verbose=FALSE, available=4:5)
y = nuclearPed(noffs=3)
y = transferMarkerdata(x, y)
stopifnot(all.equal(x[[1]], branch(y,5)))
stopifnot(all.equal(x[[2]], subset(y,1:4)))
|
ce9294990980c2b6aacf7a630efff37a59aa38ee | d99c8aec01078fddf9de2cb530ab76584f876812 | /R_Code/plot_top10.r | e8b66d0fb09deda42597d9cefaf9db8093fd4342 | [] | no_license | jdreed1954/MWA-Tools | d6ffc43dec3f6f75b02daccc5c821629ffcb05ef | e68299b2401190c60b738a1cd0feae0483bd0a2a | refs/heads/master | 2020-07-03T11:53:26.110317 | 2016-11-24T05:07:45 | 2016-11-24T05:07:45 | 74,175,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,432 | r | plot_top10.r | #
# <plot_top10.r> - R script to produce pdf plot file of the top ten in each of ten metrics.
#
# Example execution:
#
# > source('C:/Users/jxreed/Desktop/MyProjects/bin/load30.r')
# [1] "sea-hp03"
# [1] "sea-hp04"
# [1] "sea-hp11"
# [1] "sea-hp12"
# [1] "twoface"
# There were 20 warnings (use warnings() to see them)
#
# James D Reed (james.reed@hp.com)
# September 29, 2013
#
## # use all data frames in current workspace to generate graphs.
## Enter the name of the plot file in the pdf command below
pdf("top10_plots.pdf")
require(ggplot2)
require(grid)
require(scales)
require(RColorBrewer)
require(graphics)
Customer <- ""
credit_title<- paste("James D Reed (james.reed@hp.com) Hewlett-Packard Company Solution Architect")
################################################################################################################
# F U N C T I O N S #
################################################################################################################
newPage <- function() {
grid.newpage()
pushViewport(viewport(layout = grid.layout(1,1)))
return
}
printPage <- function(plot,pnum) {
print(plot, vp = vplayout(1,1))
printHeaderFooter(pnum)
return
}
printHeaderFooter <- function(pnum){
pageString <- paste(" Page ", pnum)
#page Header
popViewport()
grid.text(Customer, y = unit(1, "npc") - unit(2,"mm"), just=c("centre"), gp=gpar(col="grey", fontsize=10))
grid.text(reportDateString, x=unit(1,"npc"), y = unit(1, "npc") - unit(2,"mm"), just=c("right"), gp=gpar(col="grey", fontsize=10))
# Page Footer
grid.text(credit_title, x=unit(.5,"npc"), y=unit(2,"mm"),gp=gpar(col="grey", fontsize=8))
grid.text(pageString, x=unit(1,"npc"), y=unit(2,"mm"), just=c("right", "bottom"),gp=gpar(col="grey", fontsize=10))
}
vplayout <- function(x, y) {
viewport(layout.pos.row = x, layout.pos.col = y)
}
################################################################################################################
# M A I N S T A R T S H E R E #
################################################################################################################
# Create a palette for the colors we will use in out plots
mypal <- brewer.pal(10,"Dark2")
#=================================================================================================================
#
# Page 1
#
#=================================================================================================================
pnum <- 0
#
# Setup page format, plot layout
#
par(mfrow=c(2,1), mar=c(4,4,2,2), oma=c(1.5,2,1,1))
# ------------------------------------------------------------------------------------------ CPU30/CPU.98th Barplot
vals <- 1:10
top10 <- head(CPU_30d_Report[order(-CPU_30d_Report$CPU.98th),],10)
vals <- as.numeric(format(top10$CPU.98th,digits=4, nsmall=2))
mp <- barplot(top10$CPU.98th, names.arg=top10$host, cex.names=0.5, ylim = c(0,110), col=rainbow(10),
ylab="CPU.98th Percent")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: CPU_30d_Report$CPU.98th")
rm(top10, vals, mp)
# ------------------------------------------------------------------------------------------ MEM30/MEM.Avg Barplot
vals <- 1:10
top10 <- head(MEM_30d_Report[order(-MEM_30d_Report$MEM.Avg),],10)
vals <- as.numeric(format(top10$MEM.Avg,digits=4, nsmall=2))
mp <- barplot(top10$MEM.Avg, names.arg=top10$host, cex.names=0.5, ylim = c(0,110), col=rainbow(10),
ylab="MEM.Avg Percent")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: MEM_30d_Report$MEM.Avg")
rm(top10, vals, mp)
#
# Page Header
#
pnum <- pnum + 1
ptitle<- paste("Top 10 Systems by Statistic Page", pnum)
print(ptitle)
mtext(ptitle, 3, line=0.01, adj=1.0, cex=0.8, outer=TRUE)
#
# Page Footer
#
mtext(credit_title, 1, line=0.01, cex=0.5, outer=TRUE)
#=================================================================================================================
#
# Page 2
#
#=================================================================================================================
par(mfrow=c(2,1), mar=c(4,4,2,2), oma=c(1.5,2,1,1))
# ------------------------------------------------------------------------------------------ INT30/INT.Avg Barplot
vals <- 1:10
top10 <- head(INT_30d_Report[order(-INT_30d_Report$INT.Avg),],10)
vals <- as.numeric(format(top10$INT.Avg,digits=4, nsmall=2, scientific=TRUE))
mp <- barplot(top10$INT.Avg, names.arg=top10$host, cex.names=0.5, ylim=c(0,1.20*max(top10$INT.Avg)), col=rainbow(10),
ylab="INT.Avg Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: INT_30d_Report$INT.Avg")
rm(top10, vals, mp)
# ------------------------------------------------------------------------------------------ PRC30/PRC.Avg Barplot
vals <- 1:10
top10 <- head(PRC_30d_Report[order(-PRC_30d_Report$PRC.Avg),],10)
vals <- as.numeric(format(top10$PRC.Avg,digits=4, nsmall=0))
mp <- barplot(top10$PRC.Avg, names.arg=top10$host, cex.names=0.5, ylim = c(0,1.20*max(top10$PRC.Avg)), col=rainbow(10),
ylab="PRC.Avg Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: PRC_30d_Repor$PRC.Avg")
rm(top10, vals, mp)
#
# Page Header
#
pnum <- pnum + 1
ptitle<- paste("Top 10 Systems by Statistic Page", pnum)
print(ptitle)
mtext(ptitle, 3, line=0.01, adj=1.0, cex=0.8, outer=TRUE)
#
# Page Footer
#
mtext(bptitle, 1, line=0.01, cex=0.5, outer=TRUE)
#=================================================================================================================
#
# Page 3
#
#=================================================================================================================
par(mfrow=c(2,1), mar=c(4,4,2,2), oma=c(1.5,2,1,1))
# ------------------------------------------------------------------------------------------ NTI30/NTI.Avg Barplot
vals <- 1:10
top10 <- head(NTI_30d_Report[order(-NTI_30d_Report$NTI.Avg),],10)
vals <- as.numeric(format(top10$NTI.Avg,digits=4, nsmall=2, scientific=TRUE))
mp <- barplot(top10$NTI.Avg, names.arg=top10$host, cex.names=0.5, ylim=c(0,1.20*max(top10$NTI.Avg)), col=rainbow(10),
ylab="NTI.Avg Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: NTI_30d_Report$NTI.Avg")
rm(top10, vals, mp)
# ------------------------------------------------------------------------------------------ NTO30/PRC.Avg Barplot
vals <- 1:10
top10 <- head(NTO_30d_Report[order(-NTO_30d_Report$NTO.Avg),],10)
vals <- as.numeric(format(top10$NTO.Avg,digits=4, nsmall=2, scientific=TRUE))
mp <- barplot(top10$NTO.Avg, names.arg=top10$host, cex.names=0.5, ylim=c(0,1.20*max(top10$NTO.Avg)), col=rainbow(10),
ylab="NTO.Avg Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: NTO_30d_Report$NTO.Avg")
rm(top10, vals, mp)
#
# Page Header
#
pnum <- pnum + 1
ptitle<- paste("Top 10 Systems by Statistic Page", pnum)
print(ptitle)
mtext(ptitle, 3, line=0.01, adj=1.0, cex=0.8, outer=TRUE)
#
# Page Footer
#
mtext(bptitle, 1, line=0.01, cex=0.5, outer=TRUE)
#=================================================================================================================
#
# Page 4
#
#=================================================================================================================
par(mfrow=c(2,1), mar=c(4,4,2,2), oma=c(1.5,2,1,1))
# ------------------------------------------------------------------------------------------ RUN30/RQ.Avg Barplot
vals <- 1:10
top10 <- head(RUN_30d_Report[order(-RUN_30d_Report$RQ.Avg),],10)
vals <- as.numeric(format(top10$RQ.Avg,digits=4, nsmall=2, scientific=TRUE))
mp <- barplot(top10$RQ.Avg, names.arg=top10$host, cex.names=0.5, ylim=c(0,1.20*max(top10$RQ.Avg)), col=rainbow(10),
ylab="RQ.Avg Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: RUN_30d_Report$RQ.Avg")
rm(top10, vals, mp)
#
# Page Header
#
pnum <- pnum + 1
ptitle<- paste("Top 10 Systems by Statistic Page", pnum)
print(ptitle)
mtext(ptitle, 3, line=0.01, adj=1.0, cex=0.8, outer=TRUE)
#
# Page Footer
#
mtext(bptitle, 1, line=0.01, cex=0.5, outer=TRUE)
#=================================================================================================================
#
# Page
#
#=================================================================================================================
par(mfrow=c(2,1), mar=c(4,4,2,2), oma=c(1.5,2,1,1))
# ------------------------------------------------------------------------------------------ COR30/COR.Max Barplot
vals <- 1:10
top10 <- head(COR_30d_Report[order(-COR_30d_Report$COR.Max),],10)
vals <- as.numeric(format(top10$COR.Max,digits=4, nsmall=2, scientific=TRUE))
mp <- barplot(top10$COR.Max, names.arg=top10$host, cex.names=0.5, ylim=c(0,1.20*max(top10$COR.Max)), col=rainbow(10),
ylab="COR.Max Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: COR_30d_Report$COR.Max")
rm(top10, vals, mp)
# ------------------------------------------------------------------------------------------ COR30/COR.Avg Barplot
vals <- 1:10
top10 <- head(COR_30d_Report[order(-COR_30d_Report$COR.Avg),],10)
vals <- as.numeric(format(top10$COR.Avg,digits=4, nsmall=2, scientific=TRUE))
mp <- barplot(top10$COR.Avg, names.arg=top10$host, cex.names=0.5, ylim=c(0,1.20*max(top10$COR.Avg)), col=rainbow(10),
ylab="COR.Avg Count")
text(mp,vals, labels= vals, pos = 3, cex=0.8)
title(main = "Top 10 Systems by Statistic: COR_30d_Report$COR.Avg")
rm(top10, vals, mp)
#
# Page Header
#
pnum <- pnum + 1
ptitle<- paste("Top 10 Systems by Statistic Page", pnum)
print(ptitle)
mtext(ptitle, 3, line=0.01, adj=1.0, cex=0.8, outer=TRUE)
#
# Page Footer
#
mtext(bptitle, 1, line=0.01, cex=0.5, outer=TRUE)
dev.off() |
3bd31badfdf513dc03202becf10ffc500ef5d491 | 19b3810cc477ab0c049095d2cb55a7af3f321b64 | /R_visualisations.R | 2d52afaa83f8d7cc88662ab225de4a2f04c61543 | [] | no_license | avinash2692/R-visualizations | 4910a22c4995f3a2a6bdb3efae22f72f4a7e636a | 650f9afed346572ddda3a2884578b0e126d56f22 | refs/heads/master | 2021-01-18T14:05:18.052847 | 2015-06-10T19:57:40 | 2015-06-10T19:57:40 | 37,219,072 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,314 | r | R_visualisations.R | #STAT 430 : Homework 5 ----
#### Avinash B
pckgs = c("dplyr","treemap","streamgraph","ggplot2","babynames")
pkgs_loaded = lapply(pckgs, require, character.only=T)
data(Titanic)
d <- Titanic
d <- as.data.frame(d)
# Excercise 1 =====
# A) By Sex ####
by.sex<-as.data.frame(d %>%
group_by(Sex,Survived) %>%
summarise(k = sum(Freq)) %>%
mutate(survival.pct = k/sum(k),total = sum(k)) %>%
filter(Survived == "Yes"))
treemap(by.sex,
index=c("Sex"),
vSize="total",
vColor="survival.pct",
type="value", title = "Fig 1 : Survival/population based on Sex",
range = c(0,1))
# B) By Age ####
by.age<-as.data.frame(d %>%
group_by(Age,Survived) %>%
summarise(k = sum(Freq)) %>%
mutate(survival.pct = k/sum(k), total = sum(k)) %>%
filter(Survived == "Yes"))
treemap(by.age,
index=c("Age"),
vSize="total",
vColor="survival.pct",
type="value", title = "Fig 2 : Survival/population based on Age",
range = c(0,1))
# C ) By Sex and Class #####
by.sex.class<-as.data.frame(d %>%
group_by(Sex,Class,Survived) %>%
summarise(k = sum(Freq)) %>%
mutate(survival.pct = k/sum(k),total = sum(k))%>%
filter(Survived == "Yes"))
treemap(by.sex.class,
index=c("Sex","Class"),
vSize="total",
vColor="survival.pct",
type="value", title = "Fig 3 : Survival/population based on Sex and Class",
range = c(0,1),
palette=rainbow(10, s = 1, v = 1, start = 0, end = 1/6)
)
# D) By Age, Sex and Class #####
by.age.sex.class<-as.data.frame(d %>%
group_by(Age,Sex,Class,Survived) %>%
summarise(k = sum(Freq)) %>%
mutate(survival.pct = k/sum(k),total = sum(k))%>%
filter(Survived == "Yes"))
treemap(by.age.sex.class,
index=c("Age","Sex","Class"),
vSize="total",
vColor="survival.pct",
type="value", title = "Fig 4 : Survival/population based on Age,Sex and Class",
range = c(0,1),
palette=rainbow(n = 10, s = 1, v = 1, start = 0,end = 1/6)
)
# Excercie 2 : Baby Names Graph Streams =====
# the following code snipets are a modification of the examples in class
# 1) Top Names ####
babynames %>%
group_by(year, sex) %>%
top_n(1,n) -> top.names
top.names
streamgraph(top.names, "name", "n", "year") %>%
sg_fill_brewer("Spectral") %>%
sg_axis_x(tick_units = "year", tick_interval = 10, tick_format = "%Y")
# 2) Top name Female #####
babynames %>%
filter(sex=="F",
name %in% top.names$name) -> top.names.female
streamgraph(top.names.female, "name", "n", "year") %>%
sg_fill_brewer("Spectral") %>%
sg_axis_x(tick_units = "year", tick_interval = 10, tick_format = "%Y")
# 3) Top name Male #####
babynames %>%
filter(sex=="M",
name %in% top.names$name) -> top.names.male
streamgraph(top.names.male, "name", "n", "year") %>%
sg_fill_brewer("Spectral") %>%
sg_axis_x(tick_units = "year", tick_interval = 10, tick_format = "%Y")
# -------- |
0ed6500aeb015a376aa3566b52aa90033b44a364 | 1dc04dd6b686e6540b7dc8bcd402c5a7624389c4 | /man/term_install.Rd | e9a2fff0186e94c55850662803f8c5a4284604d2 | [
"MIT"
] | permissive | jeksterslabds/jeksterslabRterm | f870e87393739676e96299224dd8cafe313d80b2 | 7703a4217be4968aae64d5718b16e1fb3c41e98a | refs/heads/master | 2022-11-15T13:05:29.892668 | 2020-07-08T19:13:25 | 2020-07-08T19:13:25 | 257,536,009 | 0 | 0 | NOASSERTION | 2020-04-22T12:40:22 | 2020-04-21T08:52:20 | R | UTF-8 | R | false | true | 794 | rd | term_install.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/term_install.R
\name{term_install}
\alias{term_install}
\title{Install Terminal Tools}
\usage{
term_install(packages, ...)
}
\arguments{
\item{packages}{Character vector.
Packages to install.}
\item{...}{Arguments to pass to \code{\link[=term_brew]{term_brew()}}, or \code{\link[=term_apt]{term_apt()}}.}
}
\description{
Installs terminal tools.
Uses package managers to install packages
(\code{pacman} for Arch Linux based Linux Distributions,
\code{apt} for Debian based Linux Distributions,
\code{hombrew} for Mac OSX and
\code{chocolatey} for Windows.)
}
\details{
NOTE: \code{chocolatey} for Windows is not currently implemented.
}
\examples{
\dontrun{
term_install()
}
}
\author{
Ivan Jacob Agaloos Pesigan
}
|
30ba9c187c97424ee4a2d2ff95bf0f0125ba3dfd | 34cc9dcaba027079ae08e441f6b1ee0c46432a93 | /doc/intro-to-lau2boundaries4spain.R | 9099fc5e5f7cb33fbfd8456f667bc148edcb45d7 | [
"MIT"
] | permissive | rOpenSpain/LAU2boundaries4spain | d471d1356e5a43125b0ac93fdf14a105ef00a815 | e07fdc690186b63d1792485054d295c17a1679c7 | refs/heads/master | 2023-04-02T06:37:04.917731 | 2021-04-14T21:01:20 | 2021-04-14T21:01:20 | 125,485,567 | 3 | 1 | MIT | 2021-04-14T20:36:48 | 2018-03-16T08:19:50 | null | UTF-8 | R | false | false | 5,143 | r | intro-to-lau2boundaries4spain.R | ## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- echo = FALSE, message = FALSE, warning =FALSE---------------------------
#- ok Granada se lleva la palma con 6 alteraciones municipales
library(LAU2boundaries4spain)
library(knitr)
library(here)
library(tidyverse)
library(ggrepel)
library(sf)
#----------------------------------------
granada_2020 <- LAU2boundaries4spain::municipios_2020 %>% filter(INECodProv == "18")
granada_2002 <- LAU2boundaries4spain::municipios_2002 %>% filter(INECodProv == "18")
granada_2002_df <- LAU2boundaries4spain::municipios_2002 %>% filter(INECodProv == "18") %>% st_set_geometry(NULL)
dif_granada <- anti_join(granada_2020, granada_2002_df, by = c("INECodMuni" = "INECodMuni")) #- los 6 municipios que han cambiado
Provincias <- Provincias
Prov_granada <- Provincias %>% filter(INECodProv == "18")
Prov_limitrofes <- Provincias [st_touches(Provincias, Prov_granada, sparse = FALSE ), ]
centroides_prov <- st_centroid(Prov_limitrofes) #- sustituye la geometry por el centroide
centroides_prov <- cbind(Prov_limitrofes, st_coordinates(st_centroid(Prov_limitrofes$geometry))) #- ahora e
#- centroides de los municipios q han cambiado en Granada
centroides_dif <- st_centroid(dif_granada) #- sustituye la geometry por el centroide
centroides_dif <- cbind(dif_granada, st_coordinates(st_centroid(dif_granada$geometry))) #- ahora e
dif_granada_names <- dif_granada %>% st_drop_geometry()
p <- ggplot() +
geom_sf(data = Provincias, fill = "antiquewhite", color = "grey", size = 0.2) +
geom_sf(data = granada_2002, fill = "antiquewhite", color = "black", size = 0.14) +
geom_sf(data = dif_granada, fill = "#E41A1C", color = "black", size = 0.14) +
geom_sf(data = Prov_granada, fill = NA, color = "black", size = 0.6) +
geom_text(data = centroides_prov, aes(x = X, y = Y, label = NombreProv), color = "grey", check_overlap = TRUE, size = 3) +
coord_sf(xlim = c(-4.50, -2.15), ylim = c(36.4, 38.10), expand = FALSE) +
theme(panel.grid.major = element_line(color = gray(.5), linetype = "dashed", size = 0.15), panel.background = element_rect(fill = "azure")) +
theme(text = element_text(size = 10), axis.text.x = element_text(size = 5)) +
theme(axis.text.y = element_text(size = 5)) +
geom_text_repel(data = centroides_dif, aes(x = X, y = Y, label = NombreMuni), force = 18.5, color = "black", size = 2.4, fontface = "bold") +
labs(title = "Municipios creados en el periodo 2002-2020", subtitle = "(Provincia de Granada)", x = NULL, y = NULL)
p
## -----------------------------------------------------------------------------
library(LAU2boundaries4spain)
CCAA <- CCAA #- geometrías de CC.AA
Provincias <- Provincias #- geometrías provinciales
municipios_2017 <- municipios_2017 #- geometrías municipales año 2017 (años posibles: 2002 - 2021)
## ---- fig.height= 2.8---------------------------------------------------------
library(sf)
Provincias <- Provincias
plot(Provincias, max.plot = 1)
## ---- message = FALSE---------------------------------------------------------
library(LAU2boundaries4spain)
library(tidyverse)
library(sf)
library(patchwork)
CCAA_peninsular <- CCAA %>% filter(!NombreCCAA %in% c("Canarias", "Illes Balears", "Ciudades Autónomas de Ceuta y Melilla"))
Prov_aragon <- Provincias %>% filter(NombreCCAA == "Aragón")
muni_teruel_2017 <- municipios_2017 %>% filter(NombreProv == "Teruel")
Pancrudo <- muni_teruel_2017 %>% filter(NombreMuni == "Pancrudo")
p1 <- ggplot(data = CCAA_peninsular) + geom_sf(fill = "antiquewhite") +
geom_sf(data = Prov_aragon, color = "red", size = 0.15) +
geom_sf(data = muni_teruel_2017, color = "blue", size = 0.05) + theme(panel.background = element_rect(fill = "aliceblue")) +
labs(title = "CC.AA. españolas", subtitle = "(Aragón en gris y rojo. Municipios de Teruel en azul)")
p2 <- ggplot(data = Prov_aragon) + geom_sf() +
geom_sf(data = muni_teruel_2017, color = "black", size = 0.15, fill = "antiquewhite") +
geom_sf(data = Pancrudo, fill = "purple", size = 0.1) + theme_minimal() +
labs(title = "Municipios de Teruel ... existe!!!", subtitle = "(Pancrudo en violeta)") +
theme(axis.text = element_blank()) +
theme(panel.grid.major = element_blank()) +
theme(plot.title = element_text(size = 11))
p1 + p2
## -----------------------------------------------------------------------------
library(LAU2boundaries4spain)
library(tidyverse)
library(sf)
library(patchwork)
canarias <- Provincias %>% filter(INECodProv %in% c(35,38))
peninsula <- Provincias %>% filter( !(INECodProv %in% c(35, 38)) )
my_shift <- st_bbox(peninsula)[c(1,2)]- (st_bbox(canarias)[c(1,2)]) + c(-2.4, -1.1)
canarias$geometry <- canarias$geometry + my_shift
st_crs(canarias) <- st_crs(peninsula)
peninsula_y_canarias <- rbind(peninsula, canarias)
p1 <- ggplot() + geom_sf(data = Provincias)
p2 <- ggplot() + geom_sf(data = peninsula) + geom_sf(data = canarias, fill = "purple")
p1 + p2
|
3f996b98dae39c459524e1cf6018710d57887fa1 | 7b2f383b7c97a7a2ec734cb824e4bb71525d3f94 | /Kaggle_SF_submit.R | 886e919493d4eee11db0d34c6f644ef65f84bc59 | [] | no_license | FaguiCurtain/Kaggle-SF | 61341d0a1c5ba4cff5aa301f569a11b04e44bff5 | 3add8b57a0ab124d96aca2a704849a6207aa6235 | refs/heads/master | 2021-01-21T14:40:50.965126 | 2016-06-11T15:48:39 | 2016-06-11T15:48:39 | 57,424,481 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 807 | r | Kaggle_SF_submit.R | submission <- read.table("/Users/francois-guillaume.rideau/Documents/Kaggle/samplesubmission.csv", header=TRUE, sep=",",na.strings = "NA",check.names=FALSE)
head(submission)
# RANN with traindata_NN=traindata[,c("X","Y","hourslice","DayOfWeek")]
submission[,2:40]=predRANN_test[,1:39]
write.csv(submission,"/Users/francois-guillaume.rideau/Documents/Kaggle/submit_RANN1.csv",row.names=FALSE)
# baseline
res = table3$Prob
submission[,2:40]=matrix(rep(res,Num_RawTestObs),
ncol=length(res),
byrow=T)
write.csv(submission,"/Users/francois-guillaume.rideau/Documents/Kaggle/baseline.csv",row.names=FALSE)
trucmuche=read.table("/Users/francois-guillaume.rideau/Documents/Kaggle/submit_RANN1.csv", header=TRUE, sep=",",na.strings = "NA",check.names=FALSE) |
00d6ab203aea3292354577fc95a3bc689616b1dc | 0e0fb77df671d38ab424ac14d9fc91b7fd0d1318 | /R/aprot.R | cb75fa2ae8f9907033eb85f49e89df9e28ea9e7a | [] | no_license | giraola/taxxo | 7d30f52474d6113e288dfa42297365c0e5ecd5ef | ba9a33a357b6267e11857659b28156f7540d92b1 | refs/heads/master | 2021-01-13T16:56:52.251231 | 2017-09-13T20:58:41 | 2017-09-13T20:58:41 | 77,538,782 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,365 | r | aprot.R | #' aprot
#'
#' Identifies and extracts a set of 37 archaeal universal proteins used as phylogenetic markers.
#' @param path is the full path to where genome annotations in fasta format are placed.
#' @param pattern a pattern (like '.faa') for recognizing the annotation files.
#' @param outdir a name for the output directory that will be created for results.
#' @param align takes a logical (default, TRUE) indicating if performing multiple sequence alignment.
#' @param phylogeny takes a logical (default, TRUE) indicating if building a NJ phylogeny.
#' @param proc is the number of threads used for protein searches.
#' @keywords proteins archaeal markers
#' @export
#' @examples
#' aprot(path='./prodigal_output',pattern='.faa',outdir='aprot_output',proc=2)
aprot<-function(pattern='.faa',
path='.',
outdir='aprot_output',
align=TRUE,
phylogeny=TRUE,
proc=2)
{
# Options #
options(getClass.msg=FALSE)
gw<-getwd()
os<-.getOS()
# Dependencies #
require(seqinr,quietly=T)
require(foreach,quietly=T)
require(doMC,quietly=T)
require(msa,quietly=T)
require(plyr,quietly=T)
# Internal functions #
chunker<-function(m,n){
s<-split(m,cut(seq_along(m),n,labels=F))
return(s)
}
# Select OS #
if (os=='linux'){
hmmsearch<-paste(system.file('hmmer3',package='taxxo'),'/linux/hmmsearch',sep='')
bldbcd<-paste(system.file('blast',package='taxxo'),'/linux/blastdbcmd',sep='')
mkbldb<-paste(system.file('blast',package='taxxo'),'/linux/makeblastdb',sep='')
} else if (os=='darwin'){
hmmsearch<-paste(system.file('hmmer3',package='taxxo'),'/darwin/hmmsearch',sep='')
bldbcd<-paste(system.file('blast',package='taxxo'),'/darwin/blastdbcmd',sep='')
mkbldb<-paste(system.file('blast',package='taxxo'),'/darwin/makeblastdb',sep='')
} else {
stop('ERROR: Unknown OS, unable to proceed.')
}
# List HMMs and genome annotations
phmm<-paste(system.file('exdata',package='taxxo'),'/archaeal',sep='')
flist<-gsub('//','/',list.files(path=path,pattern=pattern,full.names=T))
fnams<-list.files(path=path,pattern=pattern)
hmms<-list.files(phmm,pattern='.hmm',full.names=T)
hmm2<-list.files(phmm,pattern='.hmm')
# Find proteins
result<-matrix(ncol=length(hmms),nrow=length(flist),NA)
colnames(result)<-gsub('.hmm','',hmm2)
rownames(result)<-gsub(pattern,'',fnams)
for (f in 1:length(flist)){
genome<-gsub(pattern,'',fnams[f])
for (h in 1:length(hmms)){
model<-gsub('.hmm','',hmm2[h])
cmd<-paste(hmmsearch,
' --noali --cpu ',proc,
' -o search_tmp',
' --domtblout out_tmp',
' ',hmms[h],' ',flist[f],sep='')
}
system(cmd)
rl<-readLines('out_tmp')
rl<-rl[which(!grepl("\\#",rl))]
rl<-gsub('[ ]+',' ',rl)
if (length(rl)>0){
lst<-strsplit(rl,' ')
hit<-sapply(lst,function(x){x[1]})
pfmID<-sapply(lst,function(x){x[2]})
query<-sapply(lst,function(x){x[4]})
evalu<-as.numeric(sapply(lst,function(x){x[13]}))
score<-as.numeric(sapply(lst,function(x){x[14]}))
htab<-data.frame(Query=query,
Hit=hit,
PfamID=pfmID,
Evalue=evalu,
Score=score,
stringsAsFactors=F)
dimi<-dim(htab)[1]
if (dimi>1){
maxi<-max(htab$Score)
gene<-as.vector(htab[which(htab$Score==maxi),2])
} else if (dimi==1){
gene<-as.vector(htab[1,2])
}
result[f,h]<-gene
system('rm -rf search_tmp out_tmp')
}
}
system(paste('mkdir',outdir))
#setwd(outdir)
presence_absence_aprot<-result
save(presence_absence_aprot,file='presence_absence_aprot.Rdata')
system(paste('mv presence_absence_aprot.Rdata',outdir),ignore.stdout=T)
# Make databases
cmd2<-paste('cat ',paste(flist,collapse=' '),' > all_genomes_aprot.faa',sep='')
system(cmd2)
cmd3<-paste(mkbldb,
' -in all_genomes_aprot.faa',
' -dbtype prot -hash_index -parse_seqids -title',
' aprotdb -out aprotdb',sep='')
system(cmd3,ignore.stdout=T)
system('rm -rf all_genomes_aprot.faa')
# Extract sequences
aprots<-colnames(presence_absence_aprot)
genoms<-rownames(presence_absence_aprot)
for (a in 1:length(aprots)){
outfile<-paste(aprots[a],'.aprot.faa',sep='')
prots<-as.vector(presence_absence_aprot[,a])
for (p in 1:length(prots)){
prot<-prots[p]
if (is.na(prot)==F){
cmd<-paste(bldbcd,' -entry ',prot,' -db aprotdb >> ',outfile,sep='')
system(cmd)
} else {
cat(paste('>',genoms[p],'_absent',sep=''),
file=outfile,
sep='\n',
append=T)
}
}
}
# Align sequences
if (align==TRUE){
multi<-list.files(pattern='.aprot.faa')
for (m in multi){
out<-gsub('.faa','.ali',m)
aux<-capture.output(
alignment<-msa(inputSeqs=m,method='ClustalOmega',type='protein'))
aliconver<-msaConvert(alignment,type='seqinr::alignment')
seqs<-lapply(aliconver$seq,s2c)
nams<-gsub(' ','',gsub('>','',system(paste("grep '>' ",m,sep=''),intern=T)))
write.fasta(seqs,names=nams,file=out)
}
# Concatenate alignment
alis<-list.files(pattern='.aprot.ali')
catmat<-NULL
for (a in alis){
fasta<-read.fasta(a)
sequs<-lapply(getSequence(fasta),toupper)
smatx<-do.call(rbind,sequs)
catmat<-cbind(catmat,smatx)
catlis<-alply(catmat,1)
nams<-getName(fasta)
nams<-unlist(lapply(nams,function(x){strsplit(x,'_')[[1]][1]}))
write.fasta(catlis,names=nams,file='archaeal_proteins.ali')
}
system(paste('mv *aprot.faa',outdir))
system(paste('mv *aprot.ali',outdir))
system(paste('mv archaeal_proteins.ali',outdir))
system('rm -rf aprotdb*')
} else {
system(paste('mv *aprot.faa',outdir))
system('rm -rf aprotdb*')
}
if (align==TRUE & phylogeny==TRUE){
phydat<-msaConvert(alignment,type='phangorn::phyDat')
distan<-dist.ml(phydat,model='JTT')
tre<-NJ(distan)
write.tree(tre,file='NJ.aprot.tree.nwk')
system(paste('mv NJ.aprot.tree.nwk',outdir))
} else if (align==FALSE & phylogeny==TRUE){
stop('For tree building both "align" and "phylogeny" must be set TRUE')
}
setwd(gw)
}
|
9f06e57ad7a6916b2785dcc7f08b0124e24afc6b | 1f4a9b9632b4384cabe391ee547eb7bfd760148f | /Plot_Flux.Comp/Plot_Flux_Comp.R | fc4094de18c4c04aa83090cd17f09a4823d5fd7d | [] | no_license | LunaGeerts/Public-scripts-Luna | ed4b121111e4ea31f13c7b4a9864d3a11110ca84 | ff581f7fb2d1b4da49c9f6f2dce30c6edb1cf195 | refs/heads/master | 2023-01-10T19:51:35.957753 | 2020-07-15T09:03:45 | 2020-07-15T09:03:45 | 214,964,796 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,159 | r | Plot_Flux_Comp.R | #########################################################
#Personal use scripts to quickly plot results of a list of compared fluxes
#########################################################
##########################################################
#Needed liberaries
##########################################################
require(reshape2)
##########################################################
#CODE
##########################################################
Error<- function(Result) {
df.temp <- as.data.frame(Result)
error.L <- ((df.temp$LunaGradient-df.temp$True.Flux)^2)/nrow(Result)
error.F.G <- ((df.temp$FlipperGradient-df.temp$True.Flux)^2)/nrow(Result)
error.F.D <- ((df.temp$FlipperDiscrete-df.temp$True.Flux)^2)/nrow(Result)
error.F.S <- ((df.temp$FlipperSavGolay-df.temp$True.Flux)^2)/nrow(Result)
error.B <- ((df.temp$Discrete.Berg-df.temp$True.Flux)^2)/nrow(Result)
error.true <- ((df.temp$True.Flux-df.temp$True.Flux)^2)/nrow(Result)
method <- colnames(Result)[c(2:5,7,8)]
d<-data.frame(Errors=c(error.L, error.F.G, error.F.D, error.F.S, error.B, error.true),
Method=rep( method, each=length(df.temp[,1]) ) )
return(d)
}
plot.dens <- function(Results, ...){
df.temp <- as.data.frame(Results)
L.dens <- density( df.temp$LunaGradient)
FG.dens <- density( df.temp$FlipperGradient)
FD.dens <- density( df.temp$FlipperDiscrete)
FSG.dens <- density( df.temp$FlipperSavGolay)
B.dens <- density( df.temp$Discrete.Berg)
True.dens<- density( df.temp$True.Flux)
names <-c("L.dens","FG.dens","FD.dens","FSG.dens","B.dens","True.dens")
listing <-list(L.dens,FG.dens,FD.dens,FSG.dens,B.dens,True.dens)
names(listing)<- names
x.range<- range ( sapply(listing, function(list){ return( c(min(list$x),max(list$x))) } ) )
y.range<- range ( sapply(listing, function(list){ return( c(min(list$y),max(list$y))) } ) )
plot(True.dens,main= ...,xlim=x.range,ylim = y.range,lwd=2)
#sapply(listing[-(length(listing))],lines,col=2) #you probably can do this with a sapply but not sure how
#for gradient
lines(listing[["L.dens"]],lty=2,col=2,lwd=1.7)
lines(listing[["FG.dens"]],lty=2,col=3,lwd=1.7)
#for discrete
lines(listing[["FD.dens"]],lty=3,col=2,lwd=1.7)
lines(listing[["B.dens"]],lty=3,col=3,lwd=1.7)
#Golay
lines(listing[["FSG.dens"]],lty=4,col=4,lwd=1.7)
legend("topright",legend = c("True flux","Luna.Grad","Flip.Grad","Flip.Disc","Berg.disc","Flip.sav"),
lty = c(1,2,2,3,3,4),lwd=1.7,col = c(1,2,3,2,3,4) )
}
L.plot.func<- function(x,title.line= "line graph" ,
title.box= "Squared error of each method versus true flux",
title.dens="Density plot",
text.graph= ("Data generated with:\n
Standard deviation of \n
Depth of x meters \n
x points spread over this depth (but trimmed)")) {
#Creating the data needed later in plots
df<-as.data.frame(x)
error.df<-Error(x)
methods<-c(levels(error.df$Method))
df.temp<- melt(df,id.vars=c("ID","True.Flux"),measure.vars = methods)
df.temp.2<-df.temp[!df.temp$variable=="True.Flux",]
df.temp.2$ID<-as.factor(df.temp.2$ID)
xrange<-range(df.temp.2$True.Flux)
yrange<-range(df.temp.2$value)
x11(width=210,height=120)
par(mfrow=c(2,2))
plot(True.Flux~1 ,data=df.temp.2,type="n",xlim=rev(xrange),ylim=rev(yrange),lwd=3,
xlab="True.Flux",ylab="predicted flux",main=title.line)
abline(a=0,b=1,lwd=3)
points(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="LunaGradient",],type="p",xlim=rev(xrange),ylim=rev(yrange))
abline(lm(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="LunaGradient",]))
points(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="FlipperGradient",],type="p",col=2)
abline(lm(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="FlipperGradient",]),col=2)
points(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="FlipperDiscrete",],type="p",col=3)
abline(lm(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="FlipperDiscrete",]),col=3,lty=2)
points(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="Discrete.Berg",],type="p",col=4)
abline(lm(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="Discrete.Berg",]),col=4,lty=2)
points(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="FlipperSavGolay",],type="p",col=6)
abline(lm(value~True.Flux ,data=df.temp.2[df.temp.2$variable=="FlipperSavGolay",]),col=6,lty=3)
legend("bottomright",legend=c("Luna.Grad","Flip.Grad","Flip.Disc","Berg.disc","Flip.sav"),col=c(1,2,3,4,6),lty=c(1,1,2,2,3))
boxplot(error.df$Errors~error.df$Method,ylab="Flux error",main=title.box)
plot.dens(df,main=title.dens)
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.5, y = 0.5, paste(text.graph),
cex = 1.6, col = "black")
par(mar = c(5, 4, 4, 2) + 0.1)
par(mfrow=c(1,1))
}
|
99269d3cde74f6b744d23fd0b56386b3e1f94180 | bace4a42aa4e1b4e11b620ada6aecc5ab19b1b34 | /R/analysis_of_posterior.R | fbaf7f5159c1843c253183f66cfdd3f596213f43 | [] | no_license | mtrachs/stepps_full_prediciton | 5eb529c362847c925e93f9f728dd49b3f5ca8bb4 | 16b80a0785d8b9e9b061ac4499e7c8a3ca59a6d9 | refs/heads/master | 2020-03-28T17:45:15.105031 | 2018-10-03T19:56:00 | 2018-10-03T19:56:00 | 148,819,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,184 | r | analysis_of_posterior.R | ########################################################################################################################
#
#########################################################################################################################
library(rstan)
library(RColorBrewer)
library(fields)
setwd('~/workflow_stepps_prediction/prediction/')
help.fun.loc <- 'utils/'
data.loc <- 'data/'
plot.loc <- 'plots/'
output.loc <- 'output/'
#########################################################################################################################
source(paste(help.fun.loc,'pred_helper_funs.r',sep=''))
source(paste(help.fun.loc,'pred_plot_funs.r',sep=''))
fit <- read_stan_csv(paste(output.loc,'prediction_nd_reduced_domain_old_calibration.csv',sep=''))
post <- rstan::extract(fit, permuted=FALSE, inc_warmup=FALSE)
var_names<- colnames(post[,1,])
par_names <- sapply(var_names, function(x) unlist(strsplit(x,'[[]'))[1])
post_dat <- list(post = post,par_names = par_names)
r <- build_r_nb(post_dat=post_dat,N = 554,T=19,K=13)
r_mean <- apply(r$r,c(1,2),median)
saveRDS(r$r,paste(output.loc,'predicted_vegetation.RDS',sep=''))
load(paste(data.loc,'test_prediction_old_calibration.rdata',sep=''))
limits <- list(xlims=range(coord.agg.final$east),ylims=range(coord.agg.final$north))
colours <- rev(brewer.pal(10,'RdYlBu'))
taxa <- colnames(y)
breaks = c(0, 0.01, 0.05, 0.10, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 1)
breaklabels = apply(cbind(breaks[1:(length(breaks)-1)], breaks[2:length(breaks)]), 1,
function(r) { sprintf("%0.2f - %0.2f", r[1], r[2]) })
#
#r_mean_category <- cut(r_mean, breaks, include.lowest=TRUE, labels=FALSE)
#r_mean_category <- matrix(r_mean_category,ncol=13)
plot_pred_maps(r_mean=r_mean,
centers = coord.agg.final,
taxa = taxa,
t = seq(150,1950,100),
N = 554,
K = K,
T = 19,
thresh = breaks,
limits = limits,
type = 'prop',
suff = 'test_downcore',
save_plots =TRUE ,
fpath = plot.loc,
height = 36,
width = 36)
|
f0f62d94e8fdbe6c38d8b9d1d300eb3e9900ad02 | bfb4ca3eb464a7b79ef991a3ccfb832aca6cb7f2 | /R/logbook.R | d09b3442dcd47eada9d43f88b6cd8daa3af2dc46 | [] | no_license | fornasaros/hyperSpec | 82f8768201d166ebbd8f98f789a4641793e2c756 | f52ffdb180e6e9bd425dc776847f48ecf9426e50 | refs/heads/master | 2020-12-31T03:26:02.535398 | 2014-05-23T00:00:00 | 2014-05-23T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 787 | r | logbook.R | ##' Logging the processing ot a hyperSpec Object
##' Extracts the slot \code{@@log} of a \code{hyperSpec} object.
##'
##' A \code{data.frame} in slot \code{x@@log} keeps track of the changes done to
##' the \code{hyperSpec} object.
##'
##' If option \code{log} is \code{TRUE}, entries will be generated
##' automatically by hyperSpec functions that do assignments (see
##' \code{\link{hy.setOptions}}). Entries can also be created manually via
##' \code{\link{logentry}}.
##'
##' @param x a \code{hyperSpec} object
##' @return a \code{data.frame} containing \code{x@@log}
##' @author C. Beleites
##' @seealso \code{\link{hy.setOptions}}, \code{\link{logentry}}.
##' @export
##' @examples
##'
##' logbook (flu)
##'
logbook <- function (x){
chk.hy (x)
validObject (x)
x@log
}
|
4877ec740f0569b973f79f08fa6a0de0a6a1cdde | c7a9c08588eb4c11173bba92f3958cd12410dd66 | /man/poisson.glm.mix.Rd | 9a27cee45309ce6c38526b7094e455edd0e83597 | [] | no_license | cran/poisson.glm.mix | b3fc1706efdf46f8d647844bd60f4c2453189f8f | a24cc684d6707e1a6ccc87716a4bcf447f3644a5 | refs/heads/master | 2023-09-04T09:39:40.162672 | 2023-08-19T08:10:02 | 2023-08-19T09:30:46 | 17,698,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,647 | rd | poisson.glm.mix.Rd | \name{poisson.glm.mix}
\alias{poisson.glm.mix}
\alias{poisson.glm.mix-package}
\docType{package}
\title{Estimation of high dimensional Poisson GLMs via EM algorithm.}
\description{
This package can be used to cluster high dimensional count data under the presence of covariates. A mixture of Poisson Generalized Linear models (GLM's) is proposed. Conditionally to the covariates, Poisson multivariate distribution describing each cluster is a product of independent Poisson distributions. Different parameterizations for the slopes are proposed. Case of partioning the response variables into a set of replicates is considered. Poisson GLM mixture is estimated via Expectation Maximization (EM) algorithm with Newton-Raphson steps. An efficient initialization of EM algorithm is proposed to improve parameter estimation. It is a splitting scheme which is combined with a Small EM strategy. The user is referred to the function \code{\link{pois.glm.mix}} for an automatic evaluation of the proposed methodology.
}
\details{
\tabular{ll}{
Package: \tab poisson.glm.mix\cr
Type: \tab Package\cr
Version: \tab 1.4\cr
Date: \tab 2023-08-19\cr}
Assume that the observed data can be written as \eqn{y = (y_{1},\ldots,y_{n})} where \eqn{y_i=\{y_{ij\ell};j = 1, \ldots,J,\ell = 1, \ldots,L_{j}\}}, \eqn{y_i\in Z_+^{d}}, \eqn{i = 1,\ldots,n}, with \eqn{d = \sum_{j=1}^{J}L_{j}} and \eqn{L_j \geq 1}, \eqn{j=1,\ldots,J}. Index \eqn{i} denotes the observation, while the vector \eqn{L=(L_1,\ldots,L_J)} defines a partition of the \eqn{d} variables into \eqn{J} blocks: the first block consists of the first \eqn{L_1} variables, the second block consists of the next \eqn{L_2} variables and so on. We will refer to \eqn{j} and \eqn{\ell} using the terms ``condition'' and ``replicate'', respectively. In addition to \eqn{y}, consider that a vector of \eqn{V} covariates is observed, denoted by \eqn{x_{i} := \{x_{iv};v=1,\ldots,V\}}, for all \eqn{i = 1, \ldots,n}. Assume now that conditional to \eqn{x_{i}}, a model indicator \eqn{m} taking values in the discrete set \eqn{\{1,2,3\}} and a positive integer \eqn{K}, the response \eqn{y_{i}}, is a realization of the corresponding random vector \deqn{Y_{i}|x_{i}, m\sim \sum_{k = 1}^{K}\pi_{k}\prod_{j=1}^{J}\prod_{\ell=1}^{L_{j}}\mathcal P(\mu_{ij\ell k;m})} where \eqn{\mathcal P} denotes the Poisson distribution. The following parameterizations for the Poisson means \eqn{\mu_{ij\ell k;m}} are considered: If \eqn{m=1} (the ``\eqn{\beta_{jk}}'' parameterization), then \deqn{\mu_{ij\ell k;m}:=\alpha_{jk}+\gamma_{j\ell}+\sum_{v=1}^{V}\beta_{jkv}x_i.} If \eqn{m=2} (the ``\eqn{\beta_{j}}'' parameterization), then \deqn{\mu_{ij\ell k;m}:=\alpha_{jk}+\gamma_{j\ell}+\sum_{v=1}^{V}\beta_{jv}x_i.} If \eqn{m=3} (the ``\eqn{\beta_{k}}'' parameterization), then \deqn{\mu_{ij\ell k;m}:=\alpha_{jk}+\gamma_{j\ell}+\sum_{v=1}^{V}\beta_{kv}x_i.} For identifiability purposes assume that \eqn{\sum_{\ell=1}^{L_j}\gamma_{j\ell}=0}, \eqn{j=1,\ldots,J}.}
\author{Papastamoulis Panagiotis
Maintainer: Papastamoulis Panagiotis <papapast@yahoo.gr>
}
\references{
Papastamoulis, P., Martin-Magniette, M. L., & Maugis-Rabusseau, C. (2016). On the estimation of mixtures of Poisson regression models with large number of components. Computational Statistics & Data Analysis, 93, 97-106.
}
\keyword{ package }
\examples{
## load a small dataset of 500 observations
data("simulated_data_15_components_bjk")
## in this example there is V = 1 covariates (x)
## and d = 6 response variables (y). The design is
## L = (3,2,1).
V <- 1
x <- array(sim.data[,1],dim=c(dim(sim.data)[1],V))
y <- sim.data[,-1]
## We will run the algorithm using parameterization
## m = 1 and the number of components in the set
## {2,3,4}.
rr<-pois.glm.mix(reference=x, response=y, L=c(3,2,1), m=1,
max.iter=1000, Kmin=2, Kmax= 4,
m1=3, m2=3, t1=3, t2=3, msplit=4, tsplit=3,mnr = 5)
# note: useR should specify larger values for Kmax, m1, m2, t1,
# t2, msplit and tsplit for a complete analysis.
# retrieve the selected models according to BIC or ICL
rr$sel.mod.icl
rr$sel.mod.bic
# retrieve the estimates according to ICL
# alpha
rr$est.sel.mod.icl$alpha
# beta
rr$est.sel.mod.icl$beta
# gamma
rr$est.sel.mod.icl$gamma
# pi
rr$est.sel.mod.icl$pi
# frequency table with estimated clusters
table(rr$est.sel.mod.icl$clust)
# histogram of the maximum conditional probabilities
hist(apply(rr$est.sel.mod.icl$tau,1,max),30)
##(the full data of 5000 observations can be loaded using
## data("simulated_data_15_components_bjk_full")
}
|
c1d1621c6baa9a10e05513252780e83c33f7bcdf | 866176711469f9dbdd375aff30116d8785af021e | /man/ped_build.Rd | 2c9d0975bf3d41379643dd406f56c3a1f627ab0d | [] | no_license | funkhou9/breedTools | 5aa9686e1cd87c61111eff17549a9847d12e4749 | db737bc2b612bda06a32f7e270328839ad377c73 | refs/heads/master | 2021-05-01T00:27:02.028991 | 2017-02-26T20:28:42 | 2017-02-26T20:28:42 | 49,148,734 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 538 | rd | ped_build.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ped_build.R
\name{ped_build}
\alias{ped_build}
\title{Constructs pedigree information for select individuals}
\usage{
ped_build(ids, ped, gmax = 1000)
}
\arguments{
\item{ids}{character vector containing individuals of interest}
\item{ped}{data.frame of full pedigree to subset from}
\item{gmax}{numeric max number of generations to go back}
}
\value{
data.frame of subsetted pedigree
}
\description{
Constructs pedigree information for select individuals
}
|
b8822d7f49f971b1549d5bef7a04c065ff75f997 | f366c79ee2557e887030dd0fd892f7692e25bf81 | /R/countPerm.R | 82d05c7a591d6fe2cf4d59cde2bd2775533938f5 | [] | no_license | SchroederFabian/CVOC | be365dd0492cfe03d8230f3e3033dc91cec598f8 | 662bdbcad7e180dfc503204fc92abe03207d5ebe | refs/heads/master | 2021-01-12T00:04:09.463130 | 2017-01-13T16:01:12 | 2017-01-13T16:01:12 | 78,667,263 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,614 | r | countPerm.R |
countPerm <- function(left, tp, fn, tn, fp, c0, c1, pi0) {
rnd <- 5
n1 <- tp + fn
n0 <- tn + fp
n <- n0 + n1
sum.fp <- gmp::as.bigz(0)
sum.fn <- gmp::as.bigz(0)
# number of permutations of false positives
# starting values
level.init <- fp
mnpsr <- ifelse(left, min( which( round((c(1:n)*c1/n1*(1-pi0)),rnd) > round(c0/n0*pi0,rnd))),
min( which( round((c(1:n)*c1/n1*(1-pi0)),rnd) >= round(0/n0*pi0,rnd))))
mxpsr <- ifelse(left, suppressWarnings( max( which( round( (fp - level.init + tn)*c0/n0*pi0 + (tp - mnpsr - c(0:n))*c1/n1*(1-pi0),rnd) >= round(fp*c0/n0*pi0 + fn*c1/n1*(1-pi0),rnd))-1)),
suppressWarnings( max( which( round( (fp - level.init + tn)*c0/n0*pi0 + (tp - mnpsr - c(0:n))*c1/n1*(1-pi0),rnd) > round(fp*c0/n0*pi0 + fn*c1/n1*(1-pi0),rnd))-1)))
if (!is.finite(mxpsr)) {return(gmp::as.bigz(0))}
start.init <- (fp + tp) - mnpsr
stop.init <- max(level.init, tp - mnpsr + level.init - mxpsr)
# initialize
if (level.init==0) {
if (tp==0 & left==FALSE) {
sum.fp <- gmp::as.bigz(0)
} else {
sum.fp <- gmp::as.bigz(1)
}
} else {
if (left) { sum.fp <- addup_fp_left(level.init, start.init, stop.init, tp, fn, tn, fp, c0, c1, pi0)
} else { sum.fp <- addup_fp_right(level.init, start.init, stop.init, tp, fn, tn, fp, c0, c1, pi0)
}
}
# number of permutations for false negatives
# staring values
level.init <- fn
mnpsr <- ifelse(left, min( which( round( c(1:n)*c0/n0*pi0,rnd) >= round( c1/n1*(1-pi0),rnd))),
min( which( round( c(1:n)*c0/n0*pi0,rnd) > round( c1/n1*(1-pi0),rnd))))
mxpsr <- ifelse(left, suppressWarnings( max( which( round( (fn - level.init + tp)*c1/n1*(1-pi0) + (tn - mnpsr - c(0:n))*c0/n0*pi0,rnd) >= round(fp*c0/n0*pi0 + fn*c1/n1*(1-pi0),rnd))-1)),
suppressWarnings( max( which( round( (fn - level.init + tp)*c1/n1*(1-pi0) + (tn - mnpsr - c(0:n))*c0/n0*pi0,rnd) > round(fp*c0/n0*pi0 + fn*c1/n1*(1-pi0),rnd))-1)))
if (!is.finite(mxpsr)) {return(gmp::as.bigz(0))}
start.init <- (fn + tn) - mnpsr
stop.init <- max(level.init, tn - mnpsr + level.init - mxpsr)
# initialize
if (level.init==0) {
if (tn==0 & left==FALSE) {
sum.fn <- gmp::as.bigz(0)
} else {
sum.fn <- gmp::as.bigz(1)
}
} else {
if (left) { sum.fn <- addup_fn_left(level.init, start.init, stop.init, tp, fn, tn, fp, c0, c1, pi0)
} else { sum.fn <- addup_fn_right(level.init, start.init, stop.init, tp, fn, tn, fp, c0, c1, pi0)
}
}
outpt <- gmp::mul.bigz(sum.fn, sum.fp)
return(outpt)
}
|
54257cb3350d8a00b600f6fc4c824b77d3ef0b01 | 6b4d4fc90123de9fe306de142843e0fbc8555418 | /run_analysis.R | f1c3928fdafd76b03d8960a9bd232d2d2e34252f | [] | no_license | bartaelterman/datasci_getting_and_cleaning_data | 1899dfa3d2f51508b857687b0eba4e1e9e3cc446 | b3222cdb95d1fb4e7343122225fac8e3013d3e87 | refs/heads/master | 2016-09-11T10:26:05.360133 | 2014-07-15T14:35:52 | 2014-07-15T14:35:52 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,322 | r | run_analysis.R | library(plyr)
# Read feature names
featurenames = read.table("features.txt", row.names=1, col.names=c("id", "feature"))
# Create logical vector for 'mean' and 'std' feature names
meansAndStds = sapply(featurenames$feature, function(x) {
l = strsplit(as.character(x), "-")
"mean()" %in% l[[1]] | "std()" %in% l[[1]]
})
fnames = featurenames$feature[meansAndStds]
# Create nice featurenames
fnamesNoBrackets = sapply(fnames, function(x) {
a = gsub("\\(\\)", "", x)
b = gsub("-", ".", a)
gsub("([a-zA-Z]{4,})\\1", "\\1", b)
})
# Read activities
activities = read.table("activity_labels.txt", row.names=1, col.names=c("id", "activity"))
readData = function(testOrTrain) {
# set 3 filenames
datafile = paste(testOrTrain, "/X_", testOrTrain, ".txt", sep="")
activitiesfile = paste(testOrTrain, "/y_", testOrTrain, ".txt", sep="")
subjectfile = paste(testOrTrain, "/subject_", testOrTrain, ".txt", sep="")
# read data
alldata = read.table(datafile)
# extract only mean and std
data = alldata[,meansAndStds]
# Set nice featurenames to columns
colnames(data) = fnamesNoBrackets
# read activities file
activitiesDF = read.table(activitiesfile)
# apply activity levels to activities factor
activity = factor(activitiesDF[,1], levels=row.names(activities), labels=activities$activity)
# read subjects file
subject = read.table(subjectfile)
# append outcome and subject data to data
data$activity = activity
data$subject = subject[,1]
data
}
# Read testdata
testdata = readData("test")
# Read trainingdata
traindata = readData("train")
# Rbind the data frames
tidyData = rbind(traindata, testdata)
write.table(tidyData, file="ucihar_tidy_data.txt", sep=",")
# Create new dataset with mean value per subject/activity
dfsPerSubject = split(tidyData, factor(tidyData$subject))
aggDataframes = lapply(dfsPerSubject, function(df) {
d = aggregate(df[fnamesNoBrackets], list(activity = df$activity), mean)
d$subject = rep(df$subject[1], length(d$activity))
d
})
# merge the splitted and aggregated dataframes
combinedNewData = ldply(aggDataframes, data.frame)
# drop that .id column that ldply attaches
meanValues = combinedNewData[,sapply(colnames(combinedNewData), function(x) {".id" != x})]
write.table(meanValues, file="average_data.txt", sep=",")
|
141823149d255c4c8ea7cc00ff5b3a6f12e13ecd | c60643059742dc5407c7c706b53ee9351060ec04 | /R/ITN_make_plot_function.R | 7ff52c17a892ba2c1418522df116cd60514051cf | [] | no_license | MatthewSmith430/ITNr | 3cfa84c24d4be4b7db6697ad8f302c8371ed0c56 | ebd5b715210c6e036b30fc789aee0028d8430edc | refs/heads/master | 2023-04-08T18:38:57.906451 | 2023-03-31T16:01:12 | 2023-03-31T16:01:12 | 116,069,028 | 42 | 14 | null | 2021-07-09T18:42:43 | 2018-01-02T23:44:29 | R | UTF-8 | R | false | false | 2,328 | r | ITN_make_plot_function.R | #' @title Single Clean ITN Plot
#'
#' @description This function plots a single/clean ITN
#' @param gs International Trade Network - igraph object
#' @param LABEL Should labels be present - TRUE/FALSE
#' @param REGION Should nodes be coloured on the basis of region TRUE/FALSE
#' @export
#' @return Panel of ITN plots
#' @examples\donttest{
#' ##Load graph
#' data("ELEnet16")
#'
#' ##Otherwise download data from WITS and create an
#' ##International Trade Network using WITSclean()
#'
#' ##Plot the network - No Label, colour by region
#' ITN_plot_example<-ITN_make_plot(ELEnet16,FALSE,TRUE)
#'}
ITN_make_plot<-function(gs,LABEL,REGION){
gNET<-intergraph::asNetwork(gs)
REG<-igraph::V(gs)$regionNAME
REG<-as.vector(REG)
region<-gsub("all income levels", "", REG)
regionCOLOUR<-gsub("\\(|\\)", "", region)
CENT<-ITNcentrality(gs)
WO<-CENT$Weighted.Out.Degree
WO<-as.vector(WO)
WO<-as.numeric(WO)
if (REGION==TRUE){
if (LABEL==TRUE){
GGally::ggnet2(gNET,
node.size=WO*2,node.color = regionCOLOUR,color.palette = "Set1",
color.legend = "Region",label = TRUE,
label.size = 2.5,edge.size = igraph::E(gs)$weight,
edge.color = c("color", "grey50"),arrow.size =5 )+
ggplot2::guides(size = FALSE)
}else{
GGally::ggnet2(gNET,
node.size=WO*2,node.color = regionCOLOUR,color.palette = "Set1",
color.legend = "Region",
edge.size = igraph::E(gs)$weight,
edge.color = c("color", "grey50"),arrow.size =5 )+
ggplot2::guides(size = FALSE)
}
}else{
if (LABEL==TRUE){
GGally::ggnet2(gNET,
size = WO*2,node.color = "#E41A1C",
label = TRUE,label.size = 2.5,edge.size = igraph::E(gs)$weight,
edge.color = "grey50",arrow.size=5)+
ggplot2::guides(color = FALSE, size = FALSE)
} else{
GGally::ggnet2(gNET,
size = WO*2,node.color = "#E41A1C",
label = FALSE,edge.size = igraph::E(gs)$weight,
edge.color = "grey50",arrow.size=5)+
ggplot2::guides(color = FALSE, size = FALSE)
}
}
}
|
b3e72aaf31e2d4463b8d36125b5a956eb5c3afa5 | ec2b9803a923d928751c76bbf1c31227928bffc9 | /man/get.chain.Rd | 982f52983f89fab06a236bf919fa2dfb360ef33d | [] | no_license | cran/BRugs | a5106711a3f8d3fa0adb91465df235e0f27a1b18 | acafa2035e6ef39e566085026eeabf67cd1361cd | refs/heads/master | 2023-05-27T11:23:45.986896 | 2023-05-15T05:52:29 | 2023-05-15T05:52:29 | 17,677,954 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 334 | rd | get.chain.Rd | \name{getChain}
\alias{getChain}
\title{Current chain to be initialized}
\description{This function is intended for internal use only.}
\usage{
getChain()
}
\value{Number of the chain to be initialized next.}
\seealso{\code{\link{BRugs}}, \code{\link{help.WinBUGS}}}
\keyword{interface}
\keyword{internal}
\concept{chain}
|
4aec1605643b8f6dea77854a15d959c734473ac9 | f94f0132bf3b4bcae8ddb0a24220c17e8438b889 | /src/training.R | 5259e2918230dd15ba3ca3db962dc4264d86e87c | [] | no_license | EricLiuCY/Stat_306_Project | 22c6efb7001abfbef0130bd8cb304b348aa8bd61 | eceb6f6d6ffc6e782f7c4cfa5f427bc445d4f857 | refs/heads/main | 2023-08-18T07:36:00.581508 | 2021-04-14T05:04:25 | 2021-04-14T05:04:25 | 351,658,201 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,657 | r | training.R | library(arrow)
library(dplyr)
library(leaps)
library(caret)
# __________________________ FUNCTIONS ______________________________
getMSE <- function(model, data) {
actual <- data$SalePrice
pred <- predict(model, newdata = data)
return (mean((actual - pred)^2))
}
bestModelVanilla <- function(data) {
model <- lm(formula = SalePrice ~ MSZoning + LotArea + Street + LandContour +
LotConfig + LandSlope + Neighborhood + Condition1 + Condition2 +
BldgType + HouseStyle + OverallQual + OverallCond + YearBuilt +
YearRemodAdd + RoofStyle + RoofMatl + Exterior1st + MasVnrType +
MasVnrArea + ExterQual + Foundation + BsmtQual + BsmtExposure +
BsmtFinSF1 + BsmtFinSF2 + BsmtUnfSF + X1stFlrSF + X2ndFlrSF +
BedroomAbvGr + KitchenAbvGr + KitchenQual + TotRmsAbvGrd +
Functional + Fireplaces + GarageCars + GarageArea + GarageQual +
GarageCond + WoodDeckSF + ScreenPorch + PoolArea + SaleType,
data = data)
return(model)
}
bestModelTransformed <- function(data) {
model <- lm(formula = SalePrice ~ MSSubClass + MSZoning + LotArea + Street +
LotShape + LandContour + Utilities + LotConfig + LandSlope +
Neighborhood + Condition1 + Condition2 + OverallQual +
OverallCond + YearBuilt + YearRemodAdd + RoofMatl +
Exterior1st + MasVnrType + MasVnrArea + ExterCond + BsmtQual +
BsmtExposure + BsmtUnfSF + TotalBsmtSF + X2ndFlrSF + GrLivArea +
FullBath + BedroomAbvGr + KitchenAbvGr + KitchenQual +
Functional + Fireplaces + GarageFinish + GarageCars +
GarageArea + GarageQual + GarageCond + PavedDrive + WoodDeckSF +
OpenPorchSF + EnclosedPorch + X3SsnPorch + ScreenPorch +
PoolArea + SaleCondition + UnfinishedRatio + BedPerBath +
BathsPerLivAbv + hasOpenPorchSF + hasScreenPorch +
I(TotalBsmtSF^2) + I(MasVnrArea^2) + I(GarageArea^2) +
I(GrLivArea^2) + BsmtQual:UnfinishedRatio +
MasVnrType:I(MasVnrArea^2) + BsmtQual:I(TotalBsmtSF^2) +
BsmtExposure:I(TotalBsmtSF^2) + GarageFinish:I(GarageArea^2) +
MSZoning:LotArea + RoofMatl:X2ndFlrSF + MSSubClass:MSZoning +
LotArea:LotShape + LotArea:LotConfig + LotArea:LandSlope +
Neighborhood:I(GrLivArea^2) + LotArea:Condition1 +
LotArea:Condition2, data = data)
return(model)
}
kFoldCV <- function(data, k, transformed) {
# partition data
folds <- createFolds(1:nrow(data), k = k, list = FALSE, returnTrain = FALSE)
partedData <- list()
for(i in 1:k ) {
partedData <- append(partedData, list(data[folds==i,]))
}
# Train and get MSE
avgMSE <- 0
for (i in 1:k){
train <- bind_rows(partedData[-i])
holdo <- partedData[[i]]
model <- NULL
if (transformed) {
model <- bestModelTransformed(train)
} else {
model <- bestModelVanilla(train)
}
avgMSE <- avgMSE + getMSE(model, holdo)
break
}
return(avgMSE / k)
}
# _______________________________ TRAIN SCRIPT ________________________
set.seed(888)
vTrain <- read_parquet("data/processed/train.parquet")
vTrainMSE <- kFoldCV(vTrain, nrow(vTrain), FALSE)
set.seed(888)
tTrain <- read_parquet("data/processed/transformed_train.parquet")
tTrainMSE <- kFoldCV(tTrain, nrow(tTrain), TRUE)
# Full model
vModel <- lm(SalePrice ~ ., data = vTrain)
summary(vModel)
# Transformed Model
tModel <- lm(SalePrice ~ ., data = tTrain)
summary(tModel) |
e6787c1aeb1b63f54e04e273fe8cea762d565634 | 90098066ddb7d5356b109769126d766b0afd56aa | /Earthquake_v4/ALL_FUNCTIONS/02_13_Plot_Mw_Raster.R | fcfc9862752f29697ae7cf26d5854f091968203b | [] | no_license | PeterHedley94/Earthquake_Analysis | a8a8136d557787775dea0ce832dda191da473bc0 | 4bcca87de0fbcaff70f0e58a0a25717aca0cca81 | refs/heads/master | 2020-03-20T00:30:24.628030 | 2018-06-12T09:27:40 | 2018-06-12T09:27:40 | 137,046,320 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,867 | r | 02_13_Plot_Mw_Raster.R |
plotMw_Raster2 <- function(Population_Data, Map,Dissipation_coeff,Distribution){
setwd(workingdir2)
Olddir <- getwd()
Newdir <- paste(getwd(),'/',Distribution,'/FL_DISS_RASTER/',Dissipation_coeff,sep = '')
dir.create(Newdir)
Newdir2 <- paste(getwd(),'/',Distribution,sep = '')
Mw_Raster_file <- paste(getwd(),'/',Distribution,'/FL_DISS_RASTER/',Dissipation_coeff,'/COMBINED_FAULTS.asc',sep = '')
Mw_Data <- read.asc(Mw_Raster_file)
Mw <- raster.from.asc(Mw_Data)
Mw.sp1 <- sp.from.asc(Mw_Data, projs = CRS(as.character("+init=epsg:4326")))
class(Mw.sp1)
Mw.sp2 <- Mw.sp1[Mw.sp1@data$z > 0,]
Mw.sp3 <- as(Mw.sp2, "SpatialPolygonsDataFrame")
setwd(paste(workingdir2,'/DATA/TUR_adm_shp/',sep = ''))
TUR <- readOGR(".", "TUR_adm0")
#TUR <- sp.from.asc(Mw33, projs = CRS(as.character("+init=epsg:4326")))
TUR <- spTransform(TUR, CRSobj = CRS(proj4string(Mw.sp2)))
summary(TUR)
summary(Mw.sp2)
Mw.sp2 <- Mw.sp2[TUR,]
TRy50 <- data.frame(Mw.sp2)
setwd(workingdir2)
Newdir <- paste(getwd(),'/',Distribution,'/FL_DISS_RASTER/',Dissipation_coeff,sep = '')
setwd(Newdir)
Tur.f = data.frame(TUR)
#save.image('Plot.RData')
Tur.f <- tidy(TUR)
bTry50 <- TRy50
TRy50$z <- round(TRy50$z)
TRy50$z <- as.character(TRy50$z)
save.image('ReadytoPlot.RData')
cols <- c("9" = "firebrick4", "8" = 'firebrick3', "7" = "orangered3","6" = "orangered","5" = "darkorange", "4" = "yellow2","3" = "whitesmoke")
graph2 <- ggplot(aes(x = s1, y = s2),data = TRy50) + geom_raster(aes(fill = z),size = 0.01) + scale_fill_manual(name = 'Mw', values = cols,breaks = c(4,5,6,7,8))
graph2 <- graph2 + geom_path(aes(group = group, x = long, y = lat),data = Tur.f, color = 'black',size = 1) + theme_void()
pdf('Mw_Raster_Data.pdf')
print(graph2)
dev.off()
setwd(Olddir)
}
|
6a74dd63a78bd2ef983c482a5a634fe8a312b031 | f6bea793577a664d4af83c45ac12381a68927084 | /Injectivity MA(2).R | 5bd0a20c1db2437159ed3a95659a3121e1009920 | [] | no_license | NTomasetti/ABC-Project | d79608a10a631e7d429c4351c57a19a312837359 | 414562b71317c3c871cd1233984fd38c4c8d4123 | refs/heads/master | 2020-07-02T08:15:27.613115 | 2016-08-22T00:47:23 | 2016-08-22T00:47:23 | 66,228,164 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,214 | r | Injectivity MA(2).R | source("bindma2.R")
##Step One - Initialise theta grid##
T <- 15000
d_t <- 2
d_e <- 4
theta1 <- seq(-0.95, .95, .5)
theta2 <- seq(-0.95, .95, .5)
theta <- expand.grid(theta1, theta2)
##Step Two - Generate errors##
eps <- rnorm(T)
##Step Three - Calculate numerical partial derivatives##
h = 0.0000001
score <- array(0, dim=c(d_e, d_t, nrow(theta)))
for(i in 1:nrow(theta)){
score1 <- rep(0, 4)
score2 <- rep(0, 4)
grid1 <- c(theta[i,1]+0.5*h, theta[i,1]-0.5*h)
score1 <- (bind.ma2(T, grid1[1], theta[i,2], eps) - bind.ma2(T, grid1[2], theta[i,2], eps))/h
grid2 <- c(theta[i,2]+0.5*h, theta[i,2]-0.5*h)
score2 <- (bind.ma2(T, theta[i,1], grid2[1], eps) - bind.ma2(T, theta[i,1], grid2[2], eps))/h
score[,,i] <- cbind(score1, score2)
}
##Step Four - Take every combination of 2x2 matrices##
b <- combn(d_e, d_t)
mats2x2 <- array(0, dim=c(d_t, d_t, nrow(theta), ncol(b)))
for(i in 1:ncol(b)){
mats2x2[,,,i] <- score[b[,i],,] ##First two dims - 2x2 matrix, third dim - theta grid, fourth dim - b combination
}
##Step Five - Check eigenvalues##
posdef <- apply(mats2x2, 3:4, function(x) all(Re(eigen(x)$value)>0 & all(Im(eigen(x)$value)==0)))
b[,apply(posdef, 2, function(x) all(x == TRUE))]
|
1ef1d509c014a5f1bb0386e782492c379ae5efd6 | 4b3987493fc49d6a55b948a7cca5c0e89d801020 | /06_API_Webscraping_Workshop_Sep18/exercises_1_API.R | 6444fc5522fbd5a5b484cad66531c63fc0f27971 | [] | no_license | rladies/meetup-presentations_brussels | 4f342c3caf6a080808eeab586d34acf3003a1a05 | 5778c852463e3a897ce81baed0829fd1d2dd1413 | refs/heads/master | 2021-09-27T01:28:31.184367 | 2018-11-05T10:44:10 | 2018-11-05T10:44:10 | 113,076,048 | 2 | 5 | null | 2018-03-19T09:03:11 | 2017-12-04T17:55:36 | HTML | UTF-8 | R | false | false | 2,534 | r | exercises_1_API.R | # API exercises
# --------
# number 1
# --------
library(httr)
# url
url_ex1 <- "http://api.open-notify.org/iss-now.json"
# GET request
response <- GET(url_ex1)
#show content as json data
content <- content(response, as = "text")
#prettier
jsonlite::prettify(content)
#parsing json to R
content_jsonlite <- jsonlite::fromJSON(content)
#or much shorter - json format is autodetected and parsed
content_parsed <- content(response)
---
#------------
# API exercise 2
#------------
#Coordinates brussels
lat_bxl <- 50.85045
long_bxl <- 4.34878
#option 1: manual
url <- "http://api.open-notify.org/iss-pass.json?lat=50.85045&lon=4.34878"
#option 2: paste together
url <- paste0("http://api.open-notify.org/iss-pass.json?lat=", lat_bxl,
"&lon=", long_bxl)
#option 3: build URL
url <- modify_url(url = "http://api.open-notify.org",
path = "iss-pass.json",
query = list(lat = lat_bxl, lon = long_bxl))
#GET request
response <- GET(url)
#alternative
GET(url = "http://api.open-notify.org/iss-pass.json",
query = list(lat = lat_bxl, lon = long_bxl))
#unpack response
content <- content(response)
#modify to R
pass_times <- data.frame(
risetime = purrr::map_chr(content$response, "risetime"),
duration = purrr::map_chr(content$response, "duration"))
#------------
# API exercise 3
#------------
# API: get all books
url <- "https://www.anapioficeandfire.com/api/books"
response_books <- GET(url)
content_books <- content(response_books)
#look into the book
str(content_books, max.level = 2)
#find all book names
all_book_names <- purrr::map_chr(content_books, "name")
#find the info on all POV characters in book 1
content_book1 <- content_books[[1]]
str(content_book1, max.level = 1)
book1_pov <- content_book1$povCharacters
book1_pov <- unlist(book1_pov)
#call api for info on one character
url1 <- book1_pov[1]
response <- GET(url1)
content <- content(response)
df <- data.frame(
name = content$name,
gender = content$gender,
culture = content$culture,
born = content$born,
died = content$died)
#wrap it in a function
get_pov_info <- function(url) {
response <- GET(url)
content <- content(response)
df <- data.frame(
name = content$name,
gender = content$gender,
culture = content$culture,
born = content$born,
died = content$died, stringsAsFactors = FALSE)
Sys.sleep(1)
df
}
#call for all - waitfor 9x 1s
all_pov_info <- purrr::map_df(book1_pov, get_pov_info)
|
b400f56e5b2155399a9dc50571cb1203a76ac91e | bc8856849b443540778cd8a99a4424538bd0e87f | /plot4.R | 3f4a5fc4b2d075d73678566f5f49529b52df1403 | [] | no_license | yykhoo88/ExData_Plotting1 | 298dbf130aaa91a3b1ae4bfe35cf035eecca1245 | d23ec6e19da63fa637c6ed12fabf197d9269ae0f | refs/heads/master | 2020-03-12T09:07:46.049029 | 2018-04-22T10:00:19 | 2018-04-22T10:00:19 | 130,545,251 | 0 | 0 | null | 2018-04-22T07:30:27 | 2018-04-22T07:30:27 | null | UTF-8 | R | false | false | 1,540 | r | plot4.R | #cleanup, change local working directory
rm(list=ls())
getwd()
setwd("/Dropbox/Dropbox/Codes/Rstudio")
#file handling: downloading dataset and unzip if not yet!
filename <- "dataset.zip"
filenameUnzipped <- "household_power_consumption.txt"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL, filename)
}
if (!file.exists(filenameUnzipped)) {
unzip(filename)
}
#read data (may take a while)
dataFull <- read.table(filenameUnzipped, header=TRUE, na.strings="?", sep=";")
data <- dataFull[(dataFull$Date=="1/2/2007" | dataFull$Date=="2/2/2007" ), ]
#need to clean up date/time data
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
dateTime <- paste(data$Date, data$Time)
data$DateTime <- as.POSIXct(dateTime)
#plot4
par(mfrow=c(2,2)) #2x2
with(data, {
#plot4-1
plot(Global_active_power~DateTime, type="l", ylab="Global Active Power", xlab="")
#plot4-2
plot(Voltage~DateTime, type="l", ylab="Voltage", xlab="")
#plot4-3
plot(Sub_metering_1~DateTime, type="l",ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~DateTime,col='Red')
lines(Sub_metering_3~DateTime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#plot4-4
plot(Global_reactive_power~DateTime, type="l", ylab="Global Rective Power",xlab="")
})
## export image
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
66ae0ce1406988094d5e8d91a050a1a1a9e71b33 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/prioritylasso/tests/test-cvmpl.R | 28a62761c03d50d5540ad05ac8a07fbfc7a9e047 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,802 | r | test-cvmpl.R | # gaussian
cvm_pl1 <- cvm_prioritylasso(X = matrix(rnorm(50*500),50,500), Y = rnorm(50), family = "gaussian", type.measure = "mse",
blocks.list = list(list(block1=1:75,block2=76:200, block3=201:500), list(1:75, 201:500, 76:200)),
block1.penalization = TRUE, lambda.type = "lambda.min", standardize = TRUE, nfolds = 5)
cvm_pl1a <- cvm_prioritylasso(X = matrix(rnorm(50*500),50,500), Y = rnorm(50), family = "gaussian", type.measure = "mse",
blocks.list = list(list(block1=1:75,block2=76:200, block3=201:500), list(1:75, 201:500, 76:200)),
max.coef.list = list(c(10,5,7), c(10,7,3)),
block1.penalization = TRUE, lambda.type = "lambda.min", standardize = TRUE, nfolds = 5)
# binomial
cvm_pl2 <- cvm_prioritylasso(X = matrix(rnorm(50*500),50,500), Y = rbinom(n=50, size=1, prob=0.5), family = "binomial",
type.measure = "auc", blocks.list = list(list(block1=1:75,block2=76:200, block3=201:500),
list(1:75, 201:500, 76:200)),
block1.penalization = TRUE, lambda.type = "lambda.min",
standardize = TRUE, nfolds = 5)
# cox
n <- 50;p <- 300
nzc <- trunc(p/10)
x <- matrix(rnorm(n*p), n, p)
beta <- rnorm(nzc)
fx <- x[, seq(nzc)]%*%beta/3
hx <- exp(fx)
ty <- rexp(n,hx)
tcens <- rbinom(n = n,prob = .3,size = 1)
library(survival)
y <- Surv(ty, 1-tcens)
blocks <- list(list(block1=1:20, block2=21:200, block3=201:300), list(1:20, 201:300, 21:200))
cvm_pl3 <- cvm_prioritylasso(x, y, family = "cox", type.measure = "deviance", blocks.list = blocks,
block1.penalization = FALSE,
lambda.type = "lambda.min", standardize = TRUE, nfolds = 5)
library(testthat)
test_that("testing error messages", {
expect_that(cvm_prioritylasso(X = matrix(rnorm(50*500),50,500), Y = rnorm(50), family = "gaussian", type.measure = "mse",
blocks.list = list(list(1:75, 76:200, 201:500),list(1:75, 201:500)),
block1.penalization = TRUE, lambda.type = "lambda.min", standardize = TRUE, nfolds = 5),
throws_error("Each predictor should be included in exactly one block."))
expect_that(cvm_prioritylasso(X = matrix(rnorm(50*500),50,500), Y = rnorm(50), family = "gaussian", type.measure = "mse",
blocks.list = list(list(block1=1:75,block2=76:200, block3=201:500), list(1:75, 201:500, 76:200)),
max.coef.list = list(c(5,6,7)),
block1.penalization = TRUE, lambda.type = "lambda.min", standardize = TRUE, nfolds = 5),
throws_error("blocks.list and max.coef.list must have the same length."))
expect_that(cvm_prioritylasso(X = matrix(rnorm(50*500),50,500), Y = rnorm(50), family = "gaussian", type.measure = "mse",
blocks = list(list(block1=1:75,block2=76:200, block3=201:500), list(1:75, 76:500)),
max.coef.list = list(c(5,6,7), c(8,9,10)),
block1.penalization = TRUE, lambda.type = "lambda.min", standardize = TRUE, nfolds = 5),
throws_error("blocks.list and the entries of max.coef.list must have the same length."))
})
test_that("testing cvm_prioritylasso", {
expect_that(length(cvm_pl1$best.blocks), testthat::equals(3))
expect_that(cvm_pl2$name, matches("AUC"))
expect_that(cvm_pl3, is_a("prioritylasso"))
expect_that(cvm_pl1a$nzero[[1]] <= 10, is_true())
expect_that(sum(unlist(cvm_pl1a$nzero)) <= 22, is_true())
})
|
7c600bbd1913fbf3e6dbf83f4eb64aef76f63011 | af8592801abaa039f133dc618359e813519eb6de | /functions/danielfun.R | 558a2a63df15480a05603acd2383a6e711343fce | [] | no_license | alex-cernat/MTME-MM | 100fd72b4a07cca7043a1c299fe0b19184ad6ff7 | 378039d60c9c8bef265bc21b7d37546c821d13fc | refs/heads/master | 2020-07-30T10:36:45.073627 | 2019-09-30T08:51:02 | 2019-09-30T08:51:02 | 210,194,915 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,976 | r | danielfun.R | create_table_varcomps <- function(df_tab) {
if("label" %in% names(df_tab)) df_tab <- df_tab[, names(df_tab) != "label"]
df_tab %>%
mutate_each(funs(round(., digits = 4)), -lhs, -op, -rhs) %>%
filter(op == "~~" & lhs == rhs & # Select variances
grepl("^[TASP]", lhs) & # Select latent variables
abs(se) > 1e-5) %>% # Exclude fixed parameters
datatable(., extensions = c("FixedHeader"), filter =list(position = 'top', clear = TRUE, plain = FALSE), style = "bootstrap", fillContainer = FALSE)
}
create_table_stdized <- function(df_tab) {
if("label" %in% names(df_tab)) df_tab <- df_tab[, names(df_tab) != "label"]
df_tab %>%
mutate_each(funs(round(., digits = 4)), -lhs, -op, -rhs) %>%
filter(op != "~1" & !(op == "~~" & grepl("^G", lhs)) &
abs(se) > 1e-6) %>%
datatable(., extensions = c("FixedHeader"), filter= list(position = 'top', clear = TRUE, plain = FALSE),
style = "bootstrap", fillContainer = FALSE)
}
make_zero_to_one <- function(x) {
x - min(x, na.rm = TRUE)
}
ev_plot <- function(ev) {
eigenvalue_negative <- ifelse(ev >= 0, "No", "Yes")
evdf <- data.frame(x = seq_along(ev), ev = ev)
ggplot(evdf, aes(x, ev)) +
geom_bar(stat = "identity", aes(fill = eigenvalue_negative)) +
scale_y_continuous("Eigenvalue") + xlab("") +
scale_fill_manual(values = c("green", "red"))
}
scoring_func <- function(object, data.obs) {
data.obs <- data.obs[, object@Data@ov.names[[1]]] %>% as.matrix
lavmodel <- object@Model
lavsamplestats <- object@SampleStats
Sigma.hat <- lavaan:::computeSigmaHat(lavmodel = lavmodel)
Sigma.hat.inv <- lapply(Sigma.hat, solve)
VETA <- lavaan:::computeVETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats)
EETA <- lavaan:::computeEETA(lavmodel = lavmodel, lavsamplestats = lavsamplestats)
EY <- lavaan:::computeEY(lavmodel = lavmodel, lavsamplestats = lavsamplestats)
LAMBDA <- lavaan:::computeLAMBDA(lavmodel = lavmodel, remove.dummy.lv = FALSE)
g <- 1
FSC <- VETA[[g]] %*% t(LAMBDA[[g]]) %*% Sigma.hat.inv[[g]]
RES <- sweep(data.obs, MARGIN = 2L, STATS = EY[[g]], FUN = "-")
napats <- apply(RES, 2, function(x) 1*!is.na(x)) %>% apply(1, paste, collapse = "")
napats_unique <- unique(napats)
scores_pats <- list()
for(ipat in seq_along(napats_unique)) {
pat <- napats_unique[ipat]
RES_sub <- RES[napats == pat, ,drop = FALSE]
is_observed <- as.logical(as.numeric(strsplit(pat, "")[[1]]))
RES_sub <- RES_sub[, is_observed, drop = FALSE]
FSC_sub <- FSC[, is_observed, drop = FALSE]
scores_pats[[ipat]] <-
sweep(RES_sub %*% t(FSC_sub), MARGIN = 2L, STATS = EETA[[g]], FUN = "+")
}
FS <- Reduce(rbind, scores_pats)
colnames(FS) <- object@pta$vnames$lv[[1]]
FS
}
|
67da2b87e09b20375a6360e99bb061626eeb0ee2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/photobiologyFilters/examples/clear_filters.Rd.R | c28c7e3e0fa03ed63feebebd3f1a29f997a97092 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 449 | r | clear_filters.Rd.R | library(photobiologyFilters)
### Name: clear_filters
### Title: Spectral data for filters of different 'colors'
### Aliases: clear_filters colors neutral_filters uv_filters blue_filters
### blue_green_filters green_filters yellow_filters orange_filters
### red_nir_filters heat_filters
### Keywords: datasets
### ** Examples
clear_filters
# select filters of amber, yellow, orange color ("blue absorbing")
filters.mspct[yellow_filters]
|
b138c5c6c1fafe21b89bbcf8a07a6741a6f06c36 | e9435f9fa958237832141e9b64b72f3464ac71ef | /plot1.R | 6a728278934ba27e3f330a16c6b3dc604ebf3f2b | [] | no_license | pjpjean/ExData_Plotting1 | d841c85e0a90bf3ed83bd23dc5eb84e6e46d75fc | 74f2e8f384da63e7028c2aa6387569dfba27a7a5 | refs/heads/master | 2021-01-15T10:41:45.142832 | 2014-05-11T00:30:47 | 2014-05-11T00:30:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,366 | r | plot1.R | # ---------------------------------------------------------------------
# Reading household data only from dates 2007-02-01 and 2007-02-02.
# ---------------------------------------------------------------------
cat("Reading household data...\n")
# check if file exists
filename <- "household_power_consumption.txt"
stopifnot(file.exists(filename))
# I have decided to use fread {package data.table} because is fast.
# First I read only the Date column (~ 8Mb instead of ~ 126Mb for the
# full dataset) to find out where the lines of specified dates are
# (thanks to the fact that they are contiguous). After that, I read
# a filtered dataset setting fread's skip and nrow parameters.
# It returns a data.table object though, so I convert it to data.frame,
# which I'm more familiar with.
library(data.table)
# read only the Date column
hh.dates <- fread(filename, sep=";", select="Date")
# get the range of lines to read
lines.to.read <- with(hh.dates, range(which(Date == "1/2/2007" | Date == "2/2/2007")))
household <- fread(filename, sep=";", na.strings="?",
skip=lines.to.read[1],
nrow=lines.to.read[2] - lines.to.read[1] + 1)
# get column names (because skipping lines skips the header altogether)
hh.colnames <- colnames(fread(filename, sep=";", nrow=1))
setnames(household, hh.colnames)
# just to make sure it has no wrong dates
if (!all(household$Date == "1/2/2007" | household$Date == "2/2/2007"))
household <- household[household$Date == "1/2/2007" | household$Date == "2/2/2007", ]
# convert household data.table to data.frame
# there are more efficient ways to do that, but the filtered
# dataset is not that big (only 2880 observations)
household <- as.data.frame(household)
# convert date and time columns
household$Time <- strptime(paste(household$Date, household$Time), "%d/%m/%Y %H:%M:%S")
household$Date <- as.Date(household$Date, "%d/%m/%Y")
# remove some temporary variables
rm(hh.dates)
rm(hh.colnames)
# ---------------------------------------------------------------------
# Creating plot #1
# ---------------------------------------------------------------------
cat("Creating plot 1...\n")
png("plot1.png", width=480, height=480)
hist(household$Global_active_power,
col="red",
main="Global Active Power",
xlab="Global Active Power (kilowatts)")
dev.off()
cat("Done!\n") |
069c1f23d1bb659e151b254faac62abddeee7b07 | 2ee837abeb354e7cc882a35110198f1715db93c7 | /code/ChartMockups/percentChart.R | b358df55ed0548b922309b5600956269d06ec721 | [] | no_license | timothydobbins/drug-trends-deaths | 8f8b1d2edeb8895c8891855ed693664be92cd57f | b8b509cb4b3619e95926e4a225700289348ac121 | refs/heads/master | 2020-03-26T10:09:23.759840 | 2019-07-17T00:24:21 | 2019-07-17T00:24:21 | 144,783,593 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,507 | r | percentChart.R | library(readr)
library(janitor)
library(dplyr)
library(ggplot2)
library(plotly)
df <- read_csv("data/Transformed/Deaths_Pop_CI.csv")
sub <- filter(df, drug %in% c( "Exclusive illicit opioids",
"Exclusive pharmaceutical opioids",
"Heroin/Opium with pharmaceutical opioids",
"Unspecified opioids") &
intent %in% c("All", "Accidental")) %>%
select(year, drug, intent, nature, sex, jurisdiction, age_group, n) %>%
group_by(year, intent, nature, sex, jurisdiction, age_group) %>%
mutate(alldeaths = sum(n),
percent = round(n/sum(n)*100, 2))
sub
ggplot(filter(sub, (intent=="All" & sex=="Male" & age_group=="Allages")), aes(x=year, y=percent, fill=drug)) +
geom_area() +
scale_fill_brewer(palette = "PuBu")
ggplot(filter(sub, (intent=="All" & sex=="Female" & age_group=="Allages")), aes(x=year, y=percent, fill=drug)) + geom_area()
ggplot(filter(sub, (intent=="All" & sex=="All" & age_group=="Allages")), aes(x=year, y=percent, fill=drug)) + geom_area()
ggplot(filter(sub, (intent=="Accidental" & sex=="Male" & age_group=="Allages")), aes(x=year, y=percent, fill=drug)) + geom_area()
ggplot(filter(sub, (intent=="Accidental" & sex=="Female" & age_group=="Allages")), aes(x=year, y=percent, fill=drug)) + geom_area()
g <- ggplot(filter(sub, (intent=="Accidental" & sex=="All" & age_group=="Allages")), aes(x=year, y=percent, fill=drug)) + geom_area()
g
ggplotly(g, group=1) |
0399af5d538d594760f0e888b6f3cde2493d6b35 | 12ef479f127f10ad72943976678e0e27622eef4c | /Demo_Simulation.R | bb71f6f011558e3a9b6643a349b3629e708daa7a | [] | no_license | bandyopd/Skewed-Matrix-variate-Graphical-Modeling | 71d28dfa1c078f805b3b9b3a3f29bc515217a3b6 | c18b832820437711a5242eb353456126dcb401d2 | refs/heads/master | 2020-03-19T10:37:16.400441 | 2018-06-19T14:50:58 | 2018-06-19T14:50:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,910 | r | Demo_Simulation.R | # use simulated data to demonstrate the codes
# set working directory the path to the directory of 'demo_files'
setwd('demo_files')
# load required library for data generation
library(plyr) # split 3D array to list of matrices
library(expm) # calculate square root of a matrix
# load all R function codes
files = list.files('Code_MVST/')
sapply(files,function(f) source(file.path('Code_MVST',f)))
#############################
### Generat Complete Data ###
#############################
load('true_para.rda')
S=42; J=2; p=5; T=7; n=50; # no spatial covariate in simulation
true$nu = 20;
X = matrix(rnorm(p*n),p,n)
true$xbeta = t(true$beta)%*%X
true$gamma = rgamma(n,true$nu/2,rate=true$nu/2)
true$z = abs(sapply(true$gamma, function(x) matrix(rnorm(S*J)/sqrt(x),S,J), simplify='array'))
true$dzv = sapply(alply(true$z,3),function(x) diag(true$eta)%*%x%*% sqrtm(true$V), simplify='array')
true$U = replicate(n,true$u) + aperm(replicate(S,true$xbeta),c(3,1,2))
true$y = mapply(function(x1,x2) t(chol(solve(true$Omega)/x1))%*%x2%*%chol(true$V), true$gamma,
alply(array(rnorm(S*J*n),c(S,J,n)),3),SIMPLIFY='array')+true$U
########################
# Generate missingness #
########################
meanop = kronecker(diag(1,T),rep(1,S/T)/(S/T))
true$miss_quant = sapply(alply(true$y,3),function(x) t(meanop)%*%x%*%true$misspara[2:3]+true$misspara[1])
true$miss_prob = apply(true$miss_quant,1:2,pnorm);
delta = 1*( matrix(runif(T*n),T,n) < true$miss_prob )
true$yMISS = aperm(replicate(2,apply(delta,2,rep,each=S/T)),c(1,3,2))
y = true$y; y[true$yMISS==1] = NA;
####################
### Run Analysis ###
####################
nu = true$nu;
N=2000; burn=1000; thin=1; update=100; ncore=8;
postsamp = mvst.info.miss(y,delta,X,missing.type='probit',nu=nu,runs=N,burn=burn,thin=thin,update=update,ncore=ncore)
|
961cad68c4a9ab8a27d176f5ccd01fea74ae02c2 | 9b29a93d4aa6679d2e9d2ecc65b324fcaa8d7328 | /MT_plots/Chapter 5 - Modification of Depth Functions/outlier_detection_BR_ET_EVT.R | 1d83ac5fc5ac2165c3e8e9f232663fea84c49a9c | [] | no_license | Gordon90s/R_codes_master_thesis | 850fded79d15404bc5c65456b0e1b78035576604 | e1ad7792c13f729babf7e45d13a169b7140bfe66 | refs/heads/master | 2021-01-01T16:02:14.626557 | 2017-08-08T15:19:03 | 2017-08-08T15:19:03 | 97,760,988 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,820 | r | outlier_detection_BR_ET_EVT.R | # MT_MAIN_outlier_detection
#=============================================================
# ------------- load MT_MAIN_most_extreme_depths_comparaison.R ---------------------
source("./MT_functions/MT_MAIN_depth_function_for_analysis.R")
print = T
BR.data <- readRDS("./MT_functions/Comparaison_data_simulations/BR_array_1000_100_100.rds")
ET.data <- readRDS("./MT_functions/Comparaison_data_simulations/extremal_t_array.rds_1000_100_1000.rds")
n <- 5
simulation.names <- c("simulation 1", "simulation 2", "simulation 3", "simulation 4", "simulation 5")
most.extremes <- 5
data.to.use.BR <- BR.data[1:1000,,1:n]
data.to.use.ET <- ET.data[1:1000,,1:n]
set.seed(5)
start.timing <- proc.time()
most.extremes.depth.BR <- vector("list", n)
most.extremes.depth.ET <- vector("list", n)
for(i in 1:n){
most.extremes.depth.BR[[i]] <- most.extremes.depth.EVT(data.to.use.BR[,,i],most.extremes)
most.extremes.depth.ET[[i]] <- most.extremes.depth.EVT(data.to.use.ET[,,i],most.extremes)
}
proc.time() - start.timing
number.of.depths <- 3
depth.names <- c("FRPDT1 mean","FRPDT1 min","FRPDT1 min EVT")
cex = 1
if(print == T){pdf("most_extremes_BR_EVT.pdf", height = 8, width = 10)}
#par(mfrow = c(5,n), mar = c(5, 4, 4, 2) + 0.1)
par(mfrow = c(number.of.depths+1,n), oma = c(0,2.2,2.2,0))
for(j in 1:n){
par(mar = c(0,0,0,0))
rainbow.plot(data.to.use.BR[,,j], col = most.extremes.depth.BR[[j]]$col, ylim = range(data.to.use.BR[,,j]), ylab="", xlab="", main="", xaxt="n", yaxt="n")
mtext(text=simulation.names[j],side=3,line=1, cex = cex)
if(j == 1) mtext(text="all data",side=2,line=1, cex = cex)
}
for(i in 1:number.of.depths){
for(j in 1:n){
rainbow.plot(most.extremes.depth.BR[[j]]$sorted.data.outlier[,,i], col = most.extremes.depth.BR[[j]]$col.outlier[i,], ylim = range(data.to.use.BR[,,j]), ylab="", xlab="", main="", xaxt="n", yaxt="n")
if(j == 1) mtext(text=depth.names[i],side=2,line=1, cex = cex)
}
}
if(print == T){dev.off()}
if(print == T){pdf("most_extremes_ET_EVT.pdf", height = 8, width = 10)}
#par(mfrow = c(5,n), mar = c(5, 4, 4, 2) + 0.1)
par(mfrow = c(number.of.depths+1,n), oma = c(0,2.2,2.2,0))
for(j in 1:n){
par(mar = c(0,0,0,0))
rainbow.plot(data.to.use.ET[,,j], col = most.extremes.depth.ET[[j]]$col, ylim = range(data.to.use.ET[,,j]), ylab="", xlab="", main="", xaxt="n", yaxt="n")
mtext(text=simulation.names[j],side=3,line=1, cex = cex)
if(j == 1) mtext(text="all data",side=2,line=1, cex = cex)
}
for(i in 1:number.of.depths){
for(j in 1:n){
rainbow.plot(most.extremes.depth.ET[[j]]$sorted.data.outlier[,,i], col = most.extremes.depth.ET[[j]]$col.outlier[i,], ylim = range(data.to.use.ET[,,j]), ylab="", xlab="", main="", xaxt="n", yaxt="n")
if(j == 1) mtext(text=depth.names[i],side=2,line=1, cex = cex)
}
}
if(print == T){dev.off()}
|
238612f6ba01b0c2b55f7d3b395d9292f05d660c | dd693c1387e8eb1afd5ffb13c7f8736f8b1fac22 | /code.R | 1b89852222950d5c22dbbbd897a353ee75818a8e | [] | no_license | jbajo09/covid19-severity | fade054b99087a71e65ca133bcd6d12134484859 | feb6b5d38327696bfc49c64238037c2ecbeec2dc | refs/heads/main | 2023-08-31T19:37:48.095188 | 2021-10-19T17:29:59 | 2021-10-19T17:29:59 | 364,891,870 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,590 | r | code.R | require('KnowSeq')
require(caret)
require(dplyr)
require(ROCR)
require(limma)
require(readxl)
require(GEOquery)
require(class)
set.seed(50)
# PREPROCESs.
# GSE156063
GSE156063 <- getGEO("GSE156063", destdir = 'GSE156063')
GSE156063_age <- round(as.numeric(GSE156063$GSE156063_series_matrix.txt.gz$`age:ch1`))
GSE156063_gender <- GSE156063$GSE156063_series_matrix.txt.gz$`gender:ch1`
GSE156063_rpm <- GSE156063$GSE156063_series_matrix.txt.gz$`sars-cov-2 pcr:ch1`
GSE156063_pcr <- GSE156063$GSE156063_series_matrix.txt.gz$`sars-cov-2 rpm:ch1`
GSE156063_labels <- GSE156063$GSE156063_series_matrix.txt.gz$`disease state:ch1` #
GSE156063_counts <- read.csv('GSE156063_swab_gene_counts.csv')
rownames <- GSE156063_counts[,1]
GSE156063_counts[,1] <- NULL
rownames(GSE156063_counts) <- rownames
Annotation_gene_GSE156063 <- getGenesAnnotation(rownames(GSE156063_counts))
Annotation_gene_GSE156063 <- Annotation_gene_GSE156063[order(Annotation_gene_GSE156063$ensembl_gene_id),]
GSE156063_expressionMatrix <- calculateGeneExpressionValues(as.matrix(GSE156063_counts), Annotation_gene_GSE156063, genesNames = TRUE) #
GSE156063_severity <- read_excel('Patient_class_inpatient_outpatient_MickKammetal.xlsx')
GSE156063_outliers <- RNAseqQA(GSE156063_expressionMatrix,toPNG = FALSE, toPDF = FALSE,toRemoval = TRUE)
GSE156063_age <- GSE156063_age[-which(colnames(GSE156063_expressionMatrix)%in%GSE156063_outliers$outliers)]
GSE156063_gender <- GSE156063_gender[-which(colnames(GSE156063_expressionMatrix)%in%GSE156063_outliers$outliers)]
GSE156063_rpm <- GSE156063_rpm[-which(colnames(GSE156063_expressionMatrix)%in%GSE156063_outliers$outliers)]
GSE156063_pcr <- GSE156063_pcr[-which(colnames(GSE156063_expressionMatrix)%in%GSE156063_outliers$outliers)]
GSE156063_labels <- GSE156063_labels[-which(colnames(GSE156063_expressionMatrix)%in%GSE156063_outliers$outliers)]
GSE156063_severity <- GSE156063_severity[-which(GSE156063_severity$CZB_ID%in%GSE156063_outliers$outliers),]
GSE156063_expressionMatrix <- GSE156063_outliers$matrix
GSE156063_severity <- GSE156063_severity[which(GSE156063_severity$Viral_status=='SARS-CoV-2'),]
GSE156063_severity <- GSE156063_severity[which(GSE156063_severity$Patient_class=='Outpatient'|GSE156063_severity$Patient_class=='Inpatient'|GSE156063_severity$Patient_class=='Emergency'),]
GSE156063_age <- GSE156063_age[which(colnames(GSE156063_expressionMatrix)%in%GSE156063_severity$CZB_ID)]
GSE156063_gender <- GSE156063_gender[which(colnames(GSE156063_expressionMatrix)%in%GSE156063_severity$CZB_ID)]
GSE156063_rpm <- GSE156063_rpm[which(colnames(GSE156063_expressionMatrix)%in%GSE156063_severity$CZB_ID)]
GSE156063_pcr <- GSE156063_pcr[which(colnames(GSE156063_expressionMatrix)%in%GSE156063_severity$CZB_ID)]
GSE156063_labels <- GSE156063_labels[which(colnames(GSE156063_expressionMatrix)%in%GSE156063_severity$CZB_ID)]
GSE156063_expressionMatrix <- GSE156063_expressionMatrix[,which(colnames(GSE156063_expressionMatrix)%in%GSE156063_severity$CZB_ID)]
GSE156063_age <- GSE156063_age[order(colnames(GSE156063_expressionMatrix))]
GSE156063_gender <- GSE156063_gender[order(colnames(GSE156063_expressionMatrix))]
GSE156063_rpm <- GSE156063_rpm[order(colnames(GSE156063_expressionMatrix))]
GSE156063_pcr <- GSE156063_pcr[order(colnames(GSE156063_expressionMatrix))]
GSE156063_expressionMatrix <- GSE156063_expressionMatrix[,order(colnames(GSE156063_expressionMatrix))]
GSE156063_severity <- GSE156063_severity[order(GSE156063_severity$CZB_ID),]
GSE156063_labels_severity <- GSE156063_severity$Patient_class
GSE156063_labels_severity[which(GSE156063_severity$ICU=='ICU')]<- 'ICU'
GSE156063_expressionMatrix <- GSE156063_expressionMatrix[,-which(GSE156063_labels_severity=='Emergency')]
GSE156063_age <- GSE156063_age[-which(GSE156063_labels_severity=='Emergency')]
GSE156063_gender <- GSE156063_gender[-which(GSE156063_labels_severity=='Emergency')]
GSE156063_rpm <- GSE156063_rpm[-which(GSE156063_labels_severity=='Emergency')]
GSE156063_pcr <- GSE156063_pcr[-which(GSE156063_labels_severity=='Emergency')]
GSE156063_labels_severity <- GSE156063_labels_severity[-which(GSE156063_labels_severity=='Emergency')]
# GSE162835
GSE162835 <- getGEO("GSE162835", destdir = 'GSE162835')
GSE162835_sup <- as.data.frame(read_excel('sup_info.xlsx'))
GSE162835_age <- GSE162835_sup$...2[3:52]
GSE162835_gender <- GSE162835_sup$...3[3:52]
GSE162835_labels <- GSE162835$GSE162835_series_matrix.txt.gz$`disease:ch1`
for (i in 1:50){
GSE162835_labels[i] <- 'SC2'
}
GSE162835_labels_severity <- GSE162835$GSE162835_series_matrix.txt.gz$`disease severity:ch1`
for (i in 1:length(GSE162835_labels_severity)){
if (GSE162835_labels_severity[i]=='Asymptomatic/Mild'){
GSE162835_labels_severity[i] <- 'Outpatient'
} else if (GSE162835_labels_severity[i]=='Moderate'){
GSE162835_labels_severity[i] <- 'Inpatient'
} else if (GSE162835_labels_severity[i]=='Severe/Critical'){
GSE162835_labels_severity[i] <- 'ICU'
}
}
GSE162835_expressionMatrix <- as.matrix(read_excel('GSE162835_COVID_GEO_processed.xlsx'))
rownames <- GSE162835_expressionMatrix[,1]
rownames(GSE162835_expressionMatrix) <- rownames
GSE162835_expressionMatrix <- GSE162835_expressionMatrix[,2:51]
GSE162835_expressionMatrix <- apply(GSE162835_expressionMatrix,2,as.numeric)
rownames(GSE162835_expressionMatrix) <- rownames
GSE162835_outliers <- RNAseqQA(GSE162835_expressionMatrix,toPNG = FALSE, toPDF = FALSE,toRemoval = TRUE)
GSE162835_labels <- GSE162835_labels[-which(colnames(GSE162835_expressionMatrix) %in% GSE162835_outliers$outliers)]
GSE162835_labels_severity <- GSE162835_labels_severity[-which(colnames(GSE162835_expressionMatrix) %in% GSE162835_outliers$outliers)]
GSE162835_expressionMatrix <- GSE162835_outliers$matrix
GSE162835_age <- GSE162835_age[-c(48,50)]
GSE162835_gender <- GSE162835_gender[-c(48,50)]
# GSE152075
GSE152075 <- getGEO("GSE152075", destdir = 'GSE152075')
GSE152075_age <- GSE152075$GSE152075_series_matrix.txt.gz$`age:ch1`
GSE152075_gender <- GSE152075$GSE152075_series_matrix.txt.gz$`gender:ch1`
GSE152075_labels <- GSE152075$GSE152075_series_matrix.txt.gz$`sars-cov-2 positivity:ch1`
for (i in 1:length(GSE152075_labels)){
if (GSE152075_labels[i] == 'pos'){
GSE152075_labels[i] <- 'SC2'
} else {
GSE152075_labels[i] <- 'Control'
}
}
GSE152075_counts <- as.matrix(read.table('GSE152075_raw_counts_GEO.txt', header =TRUE))
Annotation_gene_GSE152075 <- getGenesAnnotation(rownames(GSE152075_counts), filter = 'external_gene_name')
Annotation_gene_GSE152075 <- Annotation_gene_GSE152075[order(Annotation_gene_GSE152075$external_gene_name),]
GSE152075_counts<- GSE152075_counts[order(rownames(GSE152075_counts)),]
GSE152075_counts_1 <- GSE152075_counts[which(rownames(GSE152075_counts) %in% Annotation_gene_GSE152075[,2]),]
for (i in 1:length(rownames(GSE152075_counts_1))){
rownames(GSE152075_counts_1)[i] <- Annotation_gene_GSE152075[which(Annotation_gene_GSE152075[,2] == rownames(GSE152075_counts_1)[i])[1],1]
}
Annotation_gene_GSE152075_1 <- getGenesAnnotation(rownames(GSE152075_counts_1))
Annotation_gene_GSE152075_1 <- Annotation_gene_GSE152075_1[order(Annotation_gene_GSE152075_1$ensembl_gene_id),]
GSE152075_counts_1<- GSE152075_counts_1[order(rownames(GSE152075_counts_1)),]
GSE152075_expressionMatrix <- calculateGeneExpressionValues(GSE152075_counts_1, Annotation_gene_GSE152075_1, genesNames = TRUE) #
GSE152075_severity <- read.csv('2021-03-19_Rojas.csv')
GSE152075_outliers <- RNAseqQA(GSE152075_expressionMatrix,toPNG = FALSE, toPDF = FALSE,toRemoval = TRUE) #
GSE152075_age <- GSE152075_age[-which(colnames(GSE152075_expressionMatrix)%in%GSE152075_outliers$outliers)]
GSE152075_gender <- GSE152075_gender[-which(colnames(GSE152075_expressionMatrix)%in%GSE152075_outliers$outliers)]
GSE152075_labels <- GSE152075_labels[-which(colnames(GSE152075_expressionMatrix)%in%GSE152075_outliers$outliers)]
GSE152075_severity <- GSE152075_severity[-which(GSE152075_severity$ï..alt_name%in%GSE152075_outliers$outliers),]
GSE152075_expressionMatrix <- GSE152075_outliers$matrix
GSE152075_severity <- GSE152075_severity[which(GSE152075_severity$covid_status=='pos'),]
GSE152075_severity <- GSE152075_severity[which(GSE152075_severity$admitted.to.hospital..not.just.ED..at.time.of.initial.test.=='yes'|GSE152075_severity$admitted.to.hospital..not.just.ED..at.time.of.initial.test.=='no'),]
GSE152075_expressionMatrix_severity <- GSE152075_expressionMatrix[,which(colnames(GSE152075_expressionMatrix)%in%GSE152075_severity$ï..alt_name)]
GSE152075_expressionMatrix_severity <- cbind(GSE152075_expressionMatrix_severity,GSE152075_expressionMatrix[,which(GSE152075_labels=='Control')])
GSE152075_labels_severity <- c(GSE152075_severity$admitted.to.hospital..not.just.ED..at.time.of.initial.test., rep('Control',52))
for (i in 1:length(GSE152075_labels_severity)){
if (GSE152075_labels_severity[i]=='no'){
GSE152075_labels_severity[i]<-'Outpatient'
} else if (GSE152075_labels_severity[i]=='yes'){
GSE152075_labels_severity[i]<-'Inpatient'
}
}
GSE152075_labels_severity[which(GSE152075_severity$ICU=='yes')]<- 'ICU'
# GSE152075 52 CONTROL / 4 UCI / 5 INPATIENT / 43 OUTPATIENT
# GSE162835 3 UCI / 10 INPATIENT / 35 OUTPATIENT
# GSE156063 4 UCI / 4 INPATIENT / 52 OUTPATIENT
I1 <- intersect(rownames(GSE156063_expressionMatrix), rownames(GSE152075_expressionMatrix_severity))
I2 <- intersect(I1, rownames(GSE162835_expressionMatrix))
GSE156063_I <- GSE156063_expressionMatrix[which(rownames(GSE156063_expressionMatrix) %in% I2),]
GSE152075_I <- GSE152075_expressionMatrix_severity[which(rownames(GSE152075_expressionMatrix_severity) %in% I2),]
GSE162835_I <- GSE162835_expressionMatrix[which(rownames(GSE162835_expressionMatrix) %in% I2),]
GSE156063_I <- GSE156063_I[order(rownames(GSE156063_I)),]
GSE152075_I <- GSE152075_I[order(rownames(GSE152075_I)),]
labels <- c(GSE156063_labels_severity,GSE152075_labels_severity,GSE162835_labels_severity)
expression_matrix <- cbind(GSE156063_I,GSE152075_I,GSE162835_I)
#NORMALIZATION BETWEEN ARRAYS
expression_matrix_norm_scale <- normalizeBetweenArrays(expression_matrix, method = 'scale')
# BACTH EFFECT
expression_matrix_norm_scale_fix <- batchEffectRemoval(expression_matrix_norm_scale,labels, method = 'sva')
#OUTLIERS REMOVAL
outliers_scale <- RNAseqQA(expression_matrix_norm_scale_fix,toRemoval = TRUE, toPNG = FALSE, toPDF = FALSE) #3
expression_matrix_norm_scale_fix_out <- expression_matrix_norm_scale_fix[,-which(colnames(expression_matrix_norm_scale_fix) %in% outliers_scale$outliers)]
labels_scale <- labels[-which(colnames(expression_matrix_norm_scale_fix) %in% outliers_scale$outliers)]
#train-test
Index_train_test_scale <- createDataPartition(labels_scale, p = .80, list = FALSE, times = 1)
train_labels_scale <- labels_scale[Index_train_test_scale]
test_labels_scale <- labels_scale[-Index_train_test_scale]
train_matrix_scale <- expression_matrix_norm_scale_fix_out[,Index_train_test_scale]
test_matrix_scale <- expression_matrix_norm_scale_fix_out[,-Index_train_test_scale]
#DEGS
folds <-5
cvIndex_scale <- createDataPartition(train_labels_scale, p = .80, list = FALSE, times = folds)
cvResults_scale <- list()
cvDEGs_scale <- list ()
for (i in seq_len(folds)){
cvResults_scale[[i]] <- DEGsExtraction(train_matrix_scale[,cvIndex_scale[,i]], as.factor(train_labels_scale[cvIndex_scale[,i]]), lfc=1, cov=2, pvalue = 0.05, number = Inf)
cvDEGs_scale[[i]] <- rownames(cvResults_scale[[i]]$DEGsMatrix)
}
DEGs_scale <- Reduce(f='intersect', cvDEGs_scale) # lfc 1 cov 2 pvalue 0.05 #136 genes
#feature selection algorithms
gene_mrmr_scale <- featureSelection(t(train_matrix_scale),train_labels_scale,DEGs_scale, mode ='mrmr')
#k-nn
set.seed(200)
knn_train_mrmr_scale <- knn_trn(t(train_matrix_scale), as.factor(train_labels_scale), names(gene_mrmr_scale), 5)
knn_test_mrmr_scale <- knn_test(t(train_matrix_scale), as.factor(train_labels_scale), t(test_matrix_scale), as.factor(test_labels_scale), names(gene_mrmr_scale), bestK = knn_train_mrmr_scale$bestK)
#validation plot
plot(knn_train_mrmr_scale$accuracyInfo$meanAccuracy[1:15], type = 'l', col= 'black', ylab='Metric Performance', xlab='Genes', lwd=2, ylim = c(0.89,1), panel.first = grid(col='gray45'),cex.axis=1.2,cex.lab=1.2)
lines(knn_train_mrmr_scale$sensitivityInfo$meanSensitivity[1:15], col='blue', lwd=2, lty=2)
lines(knn_train_mrmr_scale$specificityInfo$meanSpecificity[1:15], col='#FF8B00', lwd=2, lty=4)
lines(knn_train_mrmr_scale$F1Info$meanF1[1:15], col='red', lwd=2, lty=4)
legend(x=11.9 ,y =0.9137, c('Accuracy', 'Sensitivity','Specificity','F1-Score'), lty = c(1,2,4,5), col = c('black','blue','#FF8B00','red'), cex=1.2)
#LOOCV
require(class)
LOOCV <- knn.cv(t(expression_matrix_norm_scale_fix_out[which(rownames(expression_matrix_norm_scale_fix_out) %in% names(gene_mrmr_scale)[1:4]),]), cl = labels_scale, k=7, use.all = TRUE)
confusionMatrix(data = LOOCV, reference = as.factor(labels_scale))
dataPlot(confusionMatrix(data = LOOCV, reference = as.factor(labels_scale))$table, labels = labels_scale ,mode = "confusionMatrix",toPNG = FALSE, toPDF = FALSE)
# OVA knn LOOCV AUC
response <- as.factor(labels_scale)
aucs <- rep(NA, length(levels(response))) # store AUCs
legendLabels <- as.character()
colours <- c('red','blue','green','black')
par(oma = c(5, 1, 0, 1))
plot(x=NA, y=NA, xlim=c(0,1), ylim=c(0,1),
ylab="Sensitivity",
xlab="1 - Specificity",
bty='n',
cex.axis=1.3,
cex.lab=1.3)
for (i in seq_along(levels(response))) {
cur.class <- levels(response)[i]
binaryTest.labels <- as.factor(labels_scale == cur.class)
binary_LOOCV <- knn.cv(t(expression_matrix_norm_scale_fix_out[which(rownames(expression_matrix_norm_scale_fix_out) %in% names(gene_mrmr_scale)[1:4]),]), cl = binaryTest.labels, k=7)
score <- binary_LOOCV
score <- as.vector(score)
score[score=='FALSE'] <- 0
score[score=='TRUE'] <- 1
binaryTest.labels <- as.vector(binaryTest.labels)
binaryTest.labels[binaryTest.labels=='FALSE'] <- 0
binaryTest.labels[binaryTest.labels=='TRUE'] <- 1
pred <- prediction(as.numeric(score), as.numeric(binaryTest.labels))
perf <- performance(pred, "tpr", "fpr")
roc.x <- unlist(perf@x.values)
roc.y <- unlist(perf@y.values)
lines(roc.y ~ roc.x, col = colours[i], lwd = 2)
# store AUC
auc <- performance(pred, "auc")
auc <- unlist(slot(auc, "y.values"))
aucs[i] <- auc
legendLabels[i] <- paste(levels(response)[i], " AUC: ",format(round(aucs[i], 4), nsmall = 3),sep = "")
}
print(paste0("Mean AUC under the precision-recall curve is: ", round(mean(aucs), 2)))
lines(x=c(0,1), c(0,1))
legend(x=0.61 ,y =0.305, legendLabels[c(1,4,3,2)], lty=1, ncol= 1,inset = c(0,0), col = colours, cex = 1.3,lwd=3)
#T-SNE
require(M3C)
tsne(expression_matrix[which(rownames(expression_matrix)%in%names(gene_mrmr_scale)[1:4]),],labels=as.factor(labels),controlscale=TRUE, scale=3, colvec = c('red','blue','green','black'))
tsne(expression_matrix_norm_scale[which(rownames(expression_matrix_norm_scale)%in%names(gene_mrmr_scale)[1:4]),],labels=as.factor(labels),controlscale=TRUE, scale=3, colvec = c('red','blue','green','black'))
tsne(expression_matrix_norm_scale_fix_out[which(rownames(expression_matrix_norm_scale_fix_out)%in%names(gene_mrmr_scale)[1:4]),],labels=as.factor(labels_scale),controlscale=TRUE, scale=3, colvec = c('red','blue','green','black'))
|
558307b8502985d02da0de8711be39df0745585e | 2253c85e1c90b54df4b69ad40b6ce9b207c76415 | /R/utils.R | 8e67918a2337da33ef3b8fbe4a1aa417fde8df85 | [
"MIT"
] | permissive | djnavarro/hugodown | a204e1709df31ac1dae81f895bf3e89191f93e39 | 168a361518f5450e498d0fa9e34eea93f0aa677d | refs/heads/master | 2023-07-02T11:17:59.870024 | 2021-07-04T23:50:13 | 2021-07-04T23:50:13 | 270,511,218 | 0 | 0 | NOASSERTION | 2020-07-10T05:29:44 | 2020-06-08T03:15:26 | R | UTF-8 | R | false | false | 1,036 | r | utils.R | first_path <- function(paths) {
for (path in paths) {
if (file_exists(path)) {
return(path)
}
}
abort(c(
"Can't find any of the following candidate paths",
paths
))
}
# copies from withr
set_envvar <- function(envs, action = "replace") {
if (length(envs) == 0) return()
stopifnot(is_named(envs))
stopifnot(is.character(action), length(action) == 1)
action <- match.arg(action, c("replace", "prefix", "suffix"))
# if there are duplicated entries keep only the last one
envs <- envs[!duplicated(names(envs), fromLast = TRUE)]
old <- Sys.getenv(names(envs), names = TRUE, unset = NA)
set <- !is.na(envs)
both_set <- set & !is.na(old)
if (any(both_set)) {
if (action == "prefix") {
envs[both_set] <- paste(envs[both_set], old[both_set])
} else if (action == "suffix") {
envs[both_set] <- paste(old[both_set], envs[both_set])
}
}
if (any(set)) do.call("Sys.setenv", as.list(envs[set]))
if (any(!set)) Sys.unsetenv(names(envs)[!set])
invisible(old)
}
|
ec05ec08f08fe016b5ea35f8e8a598e254e6ae43 | a7ea6a245d9bd4d8b80fbd1705b1d33f191697aa | /man/make.all.bedassle.plots.Rd | 7dc3bfcbe341b35ebe3a0ffa7175ab3eef79543d | [] | no_license | gbradburd/bedassle | 9cd49d60fc9ff28bcb02806e1f3336ba54f80423 | 42551fe501ac4cca9d7570dd7028a912672267d2 | refs/heads/master | 2022-06-03T11:45:12.335260 | 2022-05-17T15:44:54 | 2022-05-17T15:44:54 | 195,564,476 | 4 | 2 | null | null | null | null | UTF-8 | R | false | true | 2,477 | rd | make.all.bedassle.plots.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.output.R
\name{make.all.bedassle.plots}
\alias{make.all.bedassle.plots}
\title{Make output plots}
\usage{
make.all.bedassle.plots(
results.files,
data.block.file,
prefix,
chain.cols = NULL
)
}
\arguments{
\item{results.files}{A \code{vector} of the filenames (in quotes,
with the full file path) of the posterior results files
output by the different chains of a \code{BEDASSLE} run.
Can also be a single filename if user wants to visualize
only a single run.}
\item{data.block.file}{The filename (in quotes, with the full file
path) of the data.block R object (.Robj) file output by a
\code{BEDASSLE} run.}
\item{prefix}{A character vector to be prepended to all figures.}
\item{chain.cols}{A \code{vector} of colors to be used in plotting
results from different MCMC chains. There should be one
color specified for each chain. If \code{NULL}, the plots
will use the required set or subset of 12 pre-specified
colors. If there are more than 12 chains, users must supply
their own colors.}
}
\value{
This function has only invisible return values.
}
\description{
\code{make.all.bedassle.plots} makes figures from the output from a
BEDASSLE analysis.
}
\details{
This function takes the file output from a BEDASSLE analysis and
generates a number of plots for visualizing results and
diagnosing MCMC performance.
This function produces a variety of plots that can be
useful for visualizing results or diagnosing MCMC performance.
The plots made are by no means exhaustive, and users are
encouraged to make further plots, or customize these plots as they
see fit. The plots generated (as .pdf files) are:
\itemize{
\item model.fit.CIs - A plot of the sample allelic covariance
shown with the 95\% credible interval of the parametric
covariance for each entry in the matrix. Only generated
if either the \code{geoDist} or \code{envDist} arguments
in the \code{run.bedassle} function call are specified. One
plot is produced for each chain.
\item Trace plots - Plots of parameter values over the MCMC.
\itemize{
\item lpd - A plot of the log posterior probability over the MCMC.
\item nuggets - A plot of estimates of the nugget parameters
over the MCMC.
\item alpha parameters - Plots of estimates of the
various parameters (all or some of {alpha0,alphaD,alphaE,alpha2},
depending on the model specified) over the MCMC.
}
}
}
|
ca01ecee14e0a03674bf456b7ab8f79acb5f5781 | dba28fe9f7195183874b2a14d546709a272031bb | /R/ped_subgroups.R | b6ccd4eb9f0c1a23eceb3497902c42bd55e1e0b8 | [] | no_license | cran/pedtools | f7cee3bd85e75eafa30d8a4ff093747219160187 | c943d35b91832c11c181a8e2840e126fe4abf269 | refs/heads/master | 2023-09-01T08:51:40.932153 | 2023-08-24T12:30:02 | 2023-08-24T13:32:08 | 236,637,879 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,226 | r | ped_subgroups.R | #' Pedigree subgroups
#'
#' A collection of utility functions for identifying pedigree members with
#' certain properties.
#'
#' @param x A [ped()] object or a list of such.
#' @param id,ids A character (or coercible to such) with one or several ID
#' labels.
#' @param inclusive A logical indicating whether an individual should be counted
#' among his or her own ancestors/descendants
#' @param internal A logical indicating whether `id` (or `ids`) refers to the
#' internal order.
#' @param degree,removal Non-negative integers.
#' @param half a logical or NA. If TRUE (resp. FALSE), only half (resp. full)
#' siblings/cousins/nephews/nieces are returned. If NA, both categories are
#' included.
#'
#' @return The functions `founders`, `nonfounders`, `males`, `females`, `leaves`
#' each return a vector containing the IDs of all pedigree members with the
#' wanted property. (Recall that a founder is a member without parents in the
#' pedigree, and that a leaf is a member without children in the pedigree.)
#'
#' The functions `father`, `mother`, `cousins`, `grandparents`,
#' `nephews_nieces`, `children`, `parents`, `siblings`, `spouses`, `unrelated`,
#' each returns a vector containing the IDs of all pedigree members having the
#' specified relationship with `id`.
#'
#' The commands `ancestors(x, id)` and `descendants(x, id)` return vectors
#' containing the IDs of all ancestors (resp. descendants) of the individual
#' `id` within the pedigree `x`. If `inclusive = TRUE`, `id` is included in the
#' output, otherwise not.
#'
#' For `commonAncestors(x, ids)` and `commonDescendants(x, ids)`, the output is
#' a vector containing the IDs of common ancestors (descendants) to all of
#' `ids`.
#'
#' Finally, `descentPaths(x, ids)` returns a list of lists, containing all
#' pedigree paths descending from each individual in `ids` (by default all
#' founders).
#' @author Magnus Dehli Vigeland
#'
#' @examples
#'
#' x = ped(id = 2:9,
#' fid = c(0,0,2,0,4,4,0,2),
#' mid = c(0,0,3,0,5,5,0,8),
#' sex = c(1,2,1,2,1,2,2,2))
#'
#' spouses(x, id = 2) # 3, 8
#' children(x, 2) # 4, 9
#' descendants(x, 2) # 4, 6, 7, 9
#' siblings(x, 4) # 9 (full or half)
#' unrelated(x, 4) # 5, 8
#' father(x, 4) # 2
#' mother(x, 4) # 3
#'
#' siblings(x, 4, half = FALSE) # none
#' siblings(x, 4, half = TRUE) # 9
#'
#' leaves(x) # 6, 7, 9
#' founders(x) # 2, 3, 5, 8
#'
#' @name ped_subgroups
NULL
#' @rdname ped_subgroups
#' @export
founders = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, founders, internal = FALSE))))
}
isFOU = x$FIDX == 0
if (internal) which(isFOU) else labels.ped(x)[isFOU]
}
#' @rdname ped_subgroups
#' @export
nonfounders = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, nonfounders, internal = FALSE))))
}
isNF = x$FIDX > 0
if(internal) which(isNF) else labels.ped(x)[isNF]
}
#' @rdname ped_subgroups
#' @export
leaves = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, leaves, internal = FALSE))))
}
lvs = if(is.singleton(x)) 1L else (1:pedsize(x))[-c(x$FIDX, x$MIDX)]
if(internal) lvs else labels.ped(x)[lvs]
}
#' @rdname ped_subgroups
#' @export
males = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, males, internal = FALSE))))
}
m = x$SEX == 1
if(internal) which(m) else labels.ped(x)[m]
}
#' @rdname ped_subgroups
#' @export
females = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, females, internal = FALSE))))
}
f = x$SEX == 2
if(internal) which(f) else labels.ped(x)[f]
}
#' @rdname ped_subgroups
#' @export
typedMembers = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, typedMembers, internal = FALSE))))
}
nMark = nMarkers(x)
labs = x$ID
if(nMark == 0)
return(if(internal) integer(0) else character(0))
allelematrix = unlist(x$MARKERS)
typed = .rowSums(allelematrix, m = length(labs), n = 2*nMark) > 0
# dim(allelematrix) = c(pedsize(x), 2*nMark)
# typed = rowSums(allelematrix) > 0
if(internal) which(typed) else labs[typed]
}
#' @rdname ped_subgroups
#' @export
untypedMembers = function(x, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
return(unname(unlist(lapply(x, untypedMembers, internal = FALSE))))
}
nMark = nMarkers(x)
labs = x$ID
if(nMark == 0)
return(if(internal) seq_along(labs) else labs)
allelematrix = unlist(x$MARKERS)
untyped = .rowSums(allelematrix, m = length(labs), n = 2*nMark) == 0
# dim(allelematrix) = c(pedsize(x), 2*nMark)
# untyped = rowSums(allelematrix) == 0
if(internal) which(untyped) else labs[untyped]
}
#' @rdname ped_subgroups
#' @export
father = function(x, id, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(father(x[[comp]], id, internal = FALSE))
}
if(!internal)
id = internalID(x, id)
fa = x$FIDX[id]
if(internal) fa else labels.ped(x)[fa]
}
#' @rdname ped_subgroups
#' @export
mother = function(x, id, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(mother(x[[comp]], id, internal = FALSE))
}
if(!internal)
id = internalID(x, id)
mo = x$MIDX[id]
if(internal) mo else labels.ped(x)[mo]
}
#' @rdname ped_subgroups
#' @export
children = function(x, id, internal = FALSE) {
if(length(id) != 1)
stop2("`id` must have length 1")
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(children(x[[comp]], id, internal = FALSE))
}
if(!internal)
id = internalID(x, id)
ch = (x$FIDX == id | x$MIDX == id)
if(internal) which(ch) else labels.ped(x)[ch]
}
#' @rdname ped_subgroups
#' @export
offspring = children
#' @rdname ped_subgroups
#' @export
spouses = function(x, id, internal = FALSE) {
if(length(id) != 1)
stop2("`id` must have length 1")
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(spouses(x[[comp]], id, internal = FALSE))
}
# Returns a vector containing all individuals sharing offspring with <id>.
if(!internal)
id = internalID(x, id)
spous = switch(x$SEX[id] + 1,
c(x$MIDX[x$FIDX == id], x$FIDX[x$MIDX == id]), # sex = 0
x$MIDX[x$FIDX == id], # sex = 1
x$FIDX[x$MIDX == id]) # sex = 2
spous_uniq = unique.default(spous)
if(internal) spous_uniq else labels.ped(x)[spous_uniq]
}
#' @rdname ped_subgroups
#' @export
unrelated = function(x, id, internal = FALSE) {
if(length(id) != 1)
stop2("`id` must have length 1")
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
unr = unrelated(x[[comp]], id, internal = FALSE)
# Add indivs from all other comps
unr = c(unr, unname(unlist(labels(x[-comp]))))
return(unr)
}
if(!internal)
id = internalID(x, id)
ancs = ancestors(x, id, inclusive = TRUE, internal = TRUE)
rel = lapply(ancs, function(a) descendants(x, a, inclusive = TRUE, internal = TRUE))
unrel = setdiff(1:pedsize(x), unlist(rel))
if(internal) unrel else labels.ped(x)[unrel]
}
#' @rdname ped_subgroups
#' @export
parents = function(x, id, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(parents(x[[comp]], id, internal = FALSE))
}
if(!internal)
id = internalID(x, id)
par = c(x$FIDX[id], x$MIDX[id])
if(internal) par else labels.ped(x)[par]
}
#' @rdname ped_subgroups
#' @export
grandparents = function(x, id, degree = 2, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(grandparents(x[[comp]], id, degree = degree, internal = FALSE))
}
if(!internal)
id = internalID(x, id)
nextgen = id
for(i in seq_len(degree))
nextgen = c(x$FIDX[nextgen], x$MIDX[nextgen])
if(internal) nextgen else labels.ped(x)[nextgen]
}
#' @rdname ped_subgroups
#' @export
siblings = function(x, id, half = NA, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comp = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
return(siblings(x[[comp]], id, half = half, internal = FALSE))
}
if(!internal)
id = internalID(x, id)
fa = x$FIDX[id]
mo = x$MIDX[id]
if(fa == 0 && mo == 0)
return(if(internal) integer(0) else character(0))
samefather = x$FIDX == fa
samemother = x$MIDX == mo
sib =
if(isTRUE(half)) xor(samefather, samemother) # half only
else if(isFALSE(half)) samefather & samemother # full only
else if(is.na(half)) samefather | samemother # either
sib[id] = FALSE
if(internal) which(sib) else labels.ped(x)[sib]
}
# TODO: Review this before re-export
cousins = function(x, id, degree = 1, removal = 0, half = NA, internal = FALSE) {
if (!internal) id = internalID(x, id)
gp = grandparents(x, id, degree = degree, internal = TRUE)
gp = gp[gp > 0]
if(length(gp) == 0)
return(if(internal) integer(0) else character(0))
uncles = unique.default(unlist(lapply(gp, function(a)
siblings(x, a, half = half, internal = TRUE))))
cous = uncles
for (i in seq_len(degree + removal))
cous = unique.default(unlist(lapply(cous, children, x = x, internal = TRUE)))
if (internal) cous else labels.ped(x)[cous]
}
#' @rdname ped_subgroups
#' @export
nephews_nieces = function(x, id, removal = 1, half = NA, internal = FALSE) {
cousins(x, id, degree = 0, removal = removal, half = half, internal = internal)
}
#' @rdname ped_subgroups
#' @export
ancestors = function(x, id, inclusive = FALSE, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comps = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
ancList = lapply(unique.default(comps), function(co) {
idsComp = id[comps == co]
ancestors(x[[co]], idsComp, inclusive = inclusive, internal = FALSE)
})
return(unlist(ancList))
}
# climbs upwards storing parents iteratively. (Not documented: Accepts id of length > 1)
if(!internal)
id = internalID(x, id)
FIDX = x$FIDX
MIDX = x$MIDX
ancest = if(inclusive) id else integer(0)
up1 = c(FIDX[id], MIDX[id])
up1 = up1[up1 > 0]
while (length(up1)) {
ancest = c(ancest, up1)
up1 = c(FIDX[up1], MIDX[up1])
up1 = up1[up1 > 0]
}
ancest = .mysortInt(unique.default(ancest))
if(internal) ancest else labels.ped(x)[ancest]
}
#' @rdname ped_subgroups
#' @export
commonAncestors = function(x, ids, inclusive = FALSE, internal = FALSE) {
if(length(ids) < 2)
stop2("Argument `ids` must have length at least 2")
anc = ancestors(x, ids[1], inclusive = inclusive, internal = internal)
for(id in ids[-1]) {
if(length(anc) == 0)
break
newanc = ancestors(x, id, inclusive = inclusive, internal = internal)
anc = .myintersect(anc, newanc)
}
anc
}
#' @rdname ped_subgroups
#' @export
descendants = function(x, id, inclusive = FALSE, internal = FALSE) {
if(is.pedList(x)) {
if(internal)
stop2("Argument `internal` cannot be TRUE when `x` is a pedlist")
comps = getComponent(x, id, checkUnique = TRUE, errorIfUnknown = TRUE)
ancList = lapply(unique.default(comps), function(co) {
idsComp = id[comps == co]
descendants(x[[co]], idsComp, inclusive = inclusive, internal = FALSE)
})
return(unlist(ancList))
}
if(!internal)
id = internalID(x, id)
FIDX = x$FIDX
MIDX = x$MIDX
desc = if(inclusive) id else integer()
nextoffs = id
while(length(nextoffs)) {
nextoffs = which(FIDX %in% nextoffs | MIDX %in% nextoffs)
desc = c(desc, nextoffs)
}
desc = .mysortInt(unique.default(desc))
if(internal) desc else labels.ped(x)[desc]
}
#' @rdname ped_subgroups
#' @export
commonDescendants = function(x, ids, inclusive = FALSE, internal = FALSE) {
if(length(ids) < 2)
stop2("Argument `ids` must have length at least 2")
desc = descendants(x, ids[1], inclusive = inclusive, internal = internal)
for(id in ids[-1]) {
if(length(desc) == 0)
break
newdesc = descendants(x, id, inclusive = inclusive, internal = internal)
desc = .myintersect(desc, newdesc)
}
desc
}
#' @rdname ped_subgroups
#' @export
descentPaths = function(x, ids = founders(x), internal = FALSE) {
if(!internal) {
idsInt = internalID(x, ids)
names(idsInt) = ids # ensures names on output list
labs = labels(x)
}
else
idsInt = ids
offs = lapply(1:pedsize(x), children, x = x, internal = TRUE)
lapply(idsInt, function(id) {
res = list(id)
while (TRUE) {
newoffs = offs[vapply(res, function(path) path[length(path)], 1)]
if (length(unlist(newoffs)) == 0)
break
nextstep = lapply(seq_along(res), function(r)
if (length(newoffs[[r]]) == 0) res[r]
else lapply(newoffs[[r]], function(kid) c(res[[r]], kid)))
res = unlist(nextstep, recursive = FALSE)
}
if (!internal)
res = lapply(res, function(v) labs[v])
res
})
}
.descentPaths = descentPaths
|
693c4780ef7b4324ea6dac6fd7626527f884e1c7 | 6d17f3cead3800a97a190d764322a5c5383b5b62 | /ejercicio1.R | 4c786bff83f1c2b910023d0b100a6ce4b12be305 | [] | no_license | axelmora/ejercicios-svm | e472f34a04b91a9c6a37a7100de6608196c8ef36 | 289eb1ac87a040bd6dc13ebf949b9b1b1fef99b9 | refs/heads/master | 2021-01-20T07:03:21.447132 | 2017-05-01T19:07:45 | 2017-05-01T19:07:45 | 89,951,582 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,099 | r | ejercicio1.R | install.packages("e1071")
library(e1071)
#Creacion de 10 datos, clasificados como a y b
data <- seq(1,11)
classes <- c('b','b','b','b','a','a','a','a','b','b','c')
#definicion del modelo svm con los datos creado, kernel del tipo lineal y clasificacion C
mysvm <- svm (data, classes, type='C', kernel='linear')
#prediccion de de la clasificacion de los 10 datos
pred1 <- predict (mysvm, data)
t1 <- table(pred1, classes)
#variacion del modelo con kernel polinomico y degree en 2
mysvm <- svm (data, classes, type='C', kernel='polynomial', degree=2)
pred2 <- predict (mysvm, data)
t2 <- table(pred2, classes)
#segunda variacion con kernel radial y gamma en 0.1
mysvm <- svm (data, classes, type='C', kernel='radial', gamma=0.1)
pred3 <- predict (mysvm, data)
t3 <- table(pred3, classes)
#tercer variacion con kernel radial, gamma en 0.1 y costo en 10
mysvm <- svm (data, classes, type='C', kernel='radial', gamma=0.1, cost=100)
pred4 <- predict (mysvm, data)
t4 <- table(pred4, classes)
#Se muestra el comportamiento
#de los modelos SVM seg?n su configuracion en la precision en su prediccion
|
b8d281682f566635c3357b67334e954fcffded63 | bfed60614ab34dbf78d3c26b3006151377c42881 | /NF-downstream_analysis/bin/not_used/monocle3.R | d150367ec2963d38e45b23af41ad10c80eb6db86 | [] | no_license | alexthiery/10x_neural_plate_border | 862976d73ced5a3532e937414ef276493dc94247 | 2ad231c54882b7eecdec264969f1588a858a014a | refs/heads/master | 2023-08-10T12:34:30.329161 | 2023-08-07T13:25:36 | 2023-08-07T13:25:36 | 292,818,769 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,276 | r | monocle3.R | library(Seurat)
library(scHelper)
library(monocle3)
library(igraph)
seurat_data <- readRDS('./output/NF-downstream_analysis_stacas/filtered_seurat/seurat/state_classification/rds_files/contamination_cell_state_classification.RDS')
gene_annotation = seurat_data@assays$RNA@meta.features
gene_annotation$gene_short_name = rownames(gene_annotation)
cds <- new_cell_data_set(seurat_data@assays$RNA@counts,
cell_metadata = seurat_data@meta.data,
gene_metadata = gene_annotation)
cds <- preprocess_cds(cds, num_dim = 100)
reducedDims(cds) <- list(UMAP = seurat_data@reductions$umap@cell.embeddings)
cds <- cluster_cells(cds)
colData(cds)$scHelper_cell_type = factor(seurat_data@meta.data$scHelper_cell_type)
cds <- learn_graph(cds, use_partition = T)
plot_cells(cds,
color_cells_by = "scHelper_cell_type",
label_groups_by_cluster=FALSE,
label_leaves=FALSE,
label_branch_points=T,
cell_size = 0.5,
label_cell_groups=F)+NoLegend()
cds <- order_cells(cds)
plot_cells(cds,
color_cells_by = "pseudotime",
label_cell_groups=FALSE,
label_leaves=FALSE,
label_branch_points=FALSE,
graph_label_size=1.5)
|
f8fc2f488ce63d68edf2b6cf34e2c31b740b666d | 3d9984199a5760d9f7b935c6805a625c5f086c3f | /R/RcppExports.R | 9d5c6e7c6b9a44dea88fd4ff72e4661bf610bebb | [] | no_license | TomMayo/M3Ddevel | c8e374c6fe70f77da27472e278e2531407cc899a | 28c6f5d1d7e2939a652948d80d7dc6c134a7c62e | refs/heads/master | 2021-01-18T21:36:18.847724 | 2016-06-12T15:44:59 | 2016-06-12T15:44:59 | 48,060,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 724 | r | RcppExports.R | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Finds the median
#'
#' Returns the median of a list of values with corresponding frequencies. This
#' is not intended to be called directly by the user.
#'
#' @param values A vector of the unique values that occur
#' @param freqs A vector of the number of occurrences of each value
#' @return Returns the median value of the data comprising each entry in values
#' repeated the corresponding entry in freqs number of times, as a numeric.
#' @author Tom Mayo \email{t.mayo@@ed.ac.uk}
#' @export
median_freq <- function(values, freqs) {
.Call('M3Ddevel_median_freq', PACKAGE = 'M3Ddevel', values, freqs)
}
|
bbe731a570666ce3096bfbb573b8bf58b468a9d8 | ebe0a4b323f2e17dca808583da2363e4d6efbd7e | /R/fitCircle2D.R | 4a7cd07add82a7bea9126fba5448bab9716358d1 | [] | no_license | aaronolsen/linkR | 7aeeb924b209cdf8d3e58f488d7d04af505d899a | 6a9a40888fda73171372aee85f9b410068f0169e | refs/heads/master | 2021-01-10T22:50:07.336303 | 2019-06-13T14:45:28 | 2019-06-13T14:45:28 | 70,348,665 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 861 | r | fitCircle2D.R | fitCircle2D <- function(mat){
# MATH SOURCE: www.dtcenter.org/met/users/docs/write_ups/circle_fit.pdf
# NUMBER OF POINTS
N <- nrow(mat)
# POINT MEAN
xb <- mean(mat[, 1])
yb <- mean(mat[, 2])
# CIRCLE CENTER IN TRANSFORMED COORDINATE SYSTEM
u <- mat[, 1] - xb
v <- mat[, 2] - yb
Suu <- sum(u^2)
Suuu <- sum(u^3)
Svv <- sum(v^2)
Svvv <- sum(v^3)
Suv <- sum(u*v)
Suvv <- sum(u*v*v)
Svuu <- sum(v*u*u)
# SET UP LINEAR EQUATIONS TO SOLVE
a <- matrix(c(Suu, Suv, Suv, Svv), nrow=2)
b <- matrix(c((Suuu + Suvv)/2, (Svvv + Svuu)/2), nrow=2)
# Solve equations
a_si <- solve(t(a) %*% a)
ab <- t(a) %*% b
sol <- a_si %*% ab
# CIRCLE CENTER IN ORIGINAL COORDINATE SYSTEM
xy <- c(sol[1] + xb, sol[2] + yb)
# CIRCLE RADIUS
alpha <- sol[1]^2 + sol[2]^2 + (Suu + Svv)/N
r <- sqrt(alpha)
list('C' = c(sol[1] + xb, sol[2] + yb), 'R' = r)
} |
e2257d95df7ed110a17114c75d3f25f803614a30 | e9c88d14708d65f1793d8203b813d3cdab348942 | /evalforecast/R/apply_corrections.R | 054052bac6228460685838586e7d1e484b376baf | [
"MIT"
] | permissive | brookslogan/covid-19-iif-blog-post-code | 1127d91468e407556a28b99c9f8422d705b8ffc4 | 643c4e2ffed1b7723b70cf19e62aabb852f31832 | refs/heads/main | 2023-04-09T10:31:03.842876 | 2021-04-02T20:52:46 | 2021-04-02T20:52:46 | 309,191,333 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,869 | r | apply_corrections.R | #' Apply corrections, if available, to upstream data frame
#'
#' Corrections data are collection of replacement records for the
#' original data. This means that the variables appear exactly in the
#' same order as in the original record and only the `value` of a
#' variable is potentially different. The replacement process returns
#' a new tibble by removing the matching original data, matched by the
#' variables `location`, `reference_date` and `variable_name` and
#' appending the entire corrections data at the end. Ideally, this
#' function should only make corrections that a properly versioned
#' data frame cannot account for, i.e. persistent bad data rows that
#' are likely to mess up forecasting algorithms (this has the salutory
#' effect of keeping the number of corrections small). Note that
#' `issue_date` is not accounted for; this function will have to
#' modified to account for non-`NA` `issue_date`.
#'
#' @param df the upstream data frame corresponding to the geo type
#' @param geo_type the geo_type corresponding to the upstream data
#' frame
#' @param forecast_date the forecast date as a date object to account
#' for the response variable name change that happened on
#' `2020-07-13`
#' @param log_info a boolean flag indicating whether to log
#' information on changes, default `TRUE`
#' @return a df with corrections applied if the corrections are
#' available, or same dataframe
#' @importFrom fs file_exists
#' @importFrom dplyr anti_join select
#' @importFrom logger log_info
#' @importFrom magrittr %>%
#' @export apply_corrections
#'
#' @examples
#'
#' \dontrun{
#' e <- new.env()
#' load("upstream_df_state_2020-08-30.Rdata", envir = e)
#' new_df <- apply_corrections(df = e$df, geo_type = "state", forecast_date = lubridate::ymd("2020-08-09"))
#' nrow(e$df) == nrow(new_df) # Same number of rows?
#' }
apply_corrections <- function(df, geo_type = c("county", "state"), forecast_date,
log_info = TRUE) {
geo_type <- match.arg(geo_type)
if (geo_type == "state") {
corrections_file <- system.file("extdata", "state_corrections.RDS", package = "evalforecast")
} else {
corrections_file <- system.file("extdata", "county_corrections.RDS", package = "evalforecast")
}
if (fs::file_exists(corrections_file)) {
if (log_info) logger::log_info(sprintf("Reading corrections file for %s\n", geo_type))
corrections <- readRDS(corrections_file)
if (log_info) logger::log_info(sprintf("Applying %d row replacements\n", nrow(corrections)))
dplyr::anti_join(x = df, y = corrections, by = c("location", "reference_date", "variable_name")) %>%
dplyr::bind_rows(corrections)
} else {
if (log_info) logger::log_info(sprintf("No corrections available for %s\n", geo_type))
df
}
}
|
6fa06aa1a7f24c29ad7d510a32be2d82efd323d9 | d55c03b0f4a1a8a7c757ee653198d28b62f43f41 | /specificCR.R | 8b780ac93e6825e0c268fbd1ab8b0f523703b653 | [] | no_license | fataltes/herRingShiny | 58c4aa251ea9d57546622ef9c6d657b288d2ef66 | 049fa42b3a81d2224a5b7049960d7d29e912bc18 | refs/heads/master | 2021-07-24T01:03:41.327790 | 2017-10-09T05:35:29 | 2017-10-09T05:35:29 | 96,694,420 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,293 | r | specificCR.R |
selectBuilder <- function(input, output, session) {
crType <- reactive({
# If no file is selected, don't do anything
validate(need(input$crType, message = FALSE))
input$crType
})
getSelects <- reactive({
ns <- session$ns
if (crType() == 'CC') {
renderedPanel <- selectInput(ns("Propc"), dispName('Propc'),
choices = uniquePropc)
} else if (crType() == 'CCC') {
renderedPanel <- fluidRow(
column(6, selectInput(ns("Propc"),dispName('Propc'),
choices = uniquePropc)),
column(6, selectInput(ns("Fcapprop"),dispName('Fcapprop'),
choices = uniqueFcapprop))
)
} else {
renderedPanel <-
selectInput(ns("FracBmsyThreshLo"), dispName('FracBmsyThreshLo'),
choices = uniqueFracBmsyThreshLo)
}
return(renderedPanel)
})
output$crParams <- renderUI({
ns <- session$ns
getSelects()
})
output$low <- renderUI({
ns <- session$ns
if (input$crType == 'BB' | input$crType == 'BB3yr' | input$crType == 'BB5yr' | input$crType == 'BB3yrPerc' )
return (
selectInput(ns("FracBmsyThreshHi"), dispName('FracBmsyThreshHi'),
choices = uniqueFracBmsyThreshHi[
uniqueFracBmsyThreshHi$FracBmsyThreshLo == input$FracBmsyThreshLo, 'FracBmsyThreshHi'])
)
})
output$hi <- renderUI({
ns <- session$ns
if (input$crType == 'BB' | input$crType == 'BB3yr' | input$crType == 'BB5yr' | input$crType == 'BB3yrPerc' )
return (
selectInput(ns("FracFtarg"), dispName('FracFtarg'),
choices = uniqueFracFtarg[uniqueFracFtarg$FracBmsyThreshHi == input$FracBmsyThreshHi &
uniqueFracFtarg$FracBmsyThreshLo == input$FracBmsyThreshLo
,'FracFtarg'])
)
})
}
showSpecificCRResult <- function(input, output, session) {
ns <- session$ns
x <- reactive({
validate(need(input$x, message = FALSE))
input$x
})
y <- reactive({
validate(need(input$y, message = FALSE))
input$y
})
xy <- reactive({
crRes = allres[allres$CR == input$crType,]
if (input$crType == 'CC') {
if (!is.null(input$Propc))
selectedRes <- crRes[crRes$Propc == input$Propc,]
} else if (input$crType == 'CCC') {
if (!is.null(input$Propc) & !is.null(input$Fcapprop))
selectedRes <- crRes[crRes$Propc == input$Propc &
crRes$Fcapprop == input$Fcapprop,]
} else {
if (!is.null(input$FracBmsyThreshHi) & !is.null(input$FracBmsyThreshLo) & !is.null(input$FracFtarg)) {
selectedRes <- crRes[crRes$FracBmsyThreshHi == input$FracBmsyThreshHi &
crRes$FracBmsyThreshLo == input$FracBmsyThreshLo &
crRes$FracFtarg == input$FracFtarg,]
}
}
if (exists("selectedRes")) {
return (selectedRes)
}
})
output$distPlot <- renderPlot({
xyVal <- xy()
x25id = get25(x())
x75id = get75(x())
y25id = get25(y())
y75id = get75(y())
if (!is.null(xyVal) && !is.na(xyVal) && nrow(xyVal) > 0) {
pll <- ggplot(xyVal, aes_string(x=input$x, y=input$y, color='bias', shape='steep'))
if (!is.null(x25id) && !is.na(x25id) && x25id != "" ) {
x1 <- xyVal[x()]-xyVal[x25id]
y <- xyVal[y()]
x2 <- xyVal[x()]+xyVal[x75id]
dd <- data.frame(x1, y, x2)
pll <- pll +
geom_segment(aes(x=xyVal[x()]-xyVal[x25id], y=xyVal[y()], xend=xyVal[x()]+xyVal[x75id], yend=xyVal[y()]), alpha=1)
}
if (!is.null(y25id) && !is.na(y25id) && y25id != "" ) {
pll <- pll +
geom_segment(aes(x=xyVal[x()], y=xyVal[y()]-xyVal[y25id], xend=xyVal[x()], yend=xyVal[y()]+xyVal[y75id]), alpha=1)
}
pll +
geom_point(size=3, alpha=1) +
theme_classic() +
xlab(dispName(x())) +
ylab(dispName(y()))
#+
# theme(plot.title = element_text(hjust = 0.5))
}
})
get25 <- function(metric) {
if (!is.null(metric) && !is.na(metric)) {
if (length(grep(paste("Q25",substring(metric, 4), sep=""), v25)) > 0)
return (paste("Q25",substring(metric, 4), sep=""))
else if (length(grep(paste(metric, "_25", sep=""), v25)) > 0)
return (paste(metric, "_25", sep=""))
}
return(NULL)
}
get75 <- function(metric) {
if (!is.null(metric) && !is.na(metric)) {
if (length(grep(paste("Q75",substring(metric, 4), sep=""), v75)) > 0)
return (paste("Q75",substring(metric, 4), sep=""))
else if (length(grep(paste(metric, "_75", sep=""), v75)) > 0)
return (paste(metric, "_75", sep=""))
}
return(NULL)
}
output$view <- renderTable({
res <- xy()
if (!is.null(res) && !is.na(res) && nrow(res) > 0) {
res <- res[c("Bias", "Steep", x(), y())]
colnames(res) <- c("Bias", "Steep", toString(dispName(x())), toString(dispName(y())))
return (res)
}
})
} |
79edbbf22e7eb6d4fb46ee504f0b2ca45c8950e1 | 9980ef61e7d5e8c6e6acee034230ba807b2e83e0 | /R/imd_postcode_lookup.R | 8ec4389d922c770aef3cc65504c0d19e584f1f73 | [] | no_license | northernjamie/imdr | ee6295bc3ad947baa5bb9618c64e084387054071 | 67908c73f780b7a938848c218ffb771585f4fcef | refs/heads/master | 2021-05-06T11:12:13.668272 | 2018-02-18T13:12:53 | 2018-02-18T13:12:53 | 114,245,606 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,573 | r | imd_postcode_lookup.R | #' Lookup IMD Using Postcode
#'
#' Function for looking up a column of postcodes and returning information about the
#' indices of deprivation about that area.
#'
#' Uses the SPARQL endpoint at Open Data Communities - The Ministry of Housing, Communities and Local Government in the UK.
#' Coverage - England only.
#' @title Gets English indices of deprivation ranks by postcode
#'
#' @description This package takes a dataframe containing a column of postcodes (and other information) and uses it to look up IMD ranks for that area
#'
#'
#' @param postcodes This is the dataframe containing all data
#' @param pcdcolumn This is the index of the column that contains the postcode data (1 being the 1st column in the daataframe)
#'
#' @import httr
#' @import reshape2
#'
#' @return returns a dataframe with the original data with extra columns containing deprivation ranks
#'
#' @examples new_dataframe <- imd_lookup(dataToBeMatched,3) [3 is the index of the column with the postcode in]
#'
#' @export imd_lookup
#'
imd_lookup <- function(postcodes,pcdcolumn) {
# Set the variables for the encoded sparql endpoint and the two halves of the encoded SPARQL query
sparql_endpoint <- "http://opendatacommunities.org/sparql?"
sparql_part1 <- "query=PREFIX%20dcterms%3A%20%3Chttp%3A%2F%2Fpurl.org%2Fdc%2Fterms%2F%3E%0APREFIX%20owl%3A%20%3Chttp%3A%2F%2Fwww.w3.org%2F2002%2F07%2Fowl%23%3E%0APREFIX%20qb%3A%20%3Chttp%3A%2F%2Fpurl.org%2Flinked-data%2Fcube%23%3E%0APREFIX%20rdf%3A%20%3Chttp%3A%2F%2Fwww.w3.org%2F1999%2F02%2F22-rdf-syntax-ns%23%3E%0APREFIX%20rdfs%3A%20%3Chttp%3A%2F%2Fwww.w3.org%2F2000%2F01%2Frdf-schema%23%3E%0APREFIX%20skos%3A%20%3Chttp%3A%2F%2Fwww.w3.org%2F2004%2F02%2Fskos%2Fcore%23%3E%0APREFIX%20xsd%3A%20%3Chttp%3A%2F%2Fwww.w3.org%2F2001%2FXMLSchema%23%3E%0A%0ASELECT%20%3Fpostcodeuri%20%3Fpostcode%20%3Fpostcodestatus%20%3Flsoa%20%3Flsoaname%20%3Fdomain%20%3Frank%20%3Fdecile%20%3Fscore%20WHERE%20%7B%0A%20%20%7B%0A%20%20%20%20SELECT%20(%3Flsoa_uri_inner%20as%20%3Flsoa_uri)%20%3Fpostcode%20%3Fpostcodeuri%20%3Fpostcodestatus%20%3Flsoa%20%3Flsoaname%20%3Fposition%20WHERE%20%7B%0A%20%20%20%20%20%20VALUES%20(%3Fposition%20%3Fpostcodeuri)%20%7B"
sparql_part2 <- "%7DGRAPH%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fgraph%2Fgeography%2Fuk-postcodes%3E%20%7B%0A%20%20%20%20%20%20%20%20%3Fpostcodeuri%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fadmingeo%2Flsoa%3E%20%3Flsoa_uri_inner%20%3B%0A%20%20%20%20%20%20%20%20%3Chttp%3A%2F%2Fwww.w3.org%2F2000%2F01%2Frdf-schema%23label%3E%20%3Fpostcode%20%3B%0A%20%20%20%20%20%20%20%20%3Chttp%3A%2F%2Fwww.w3.org%2F2004%2F02%2Fskos%2Fcore%23note%3E%20%3Fpostcodestatus%20.%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20GRAPH%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fgraph%2Fons-geography-administration%3E%20%7B%0A%20%20%20%20%20%20%20%20%3Flsoa_uri_inner%20%3Chttp%3A%2F%2Fwww.w3.org%2F2004%2F02%2Fskos%2Fcore%23notation%3E%20%3Flsoa%20%3B%0A%20%20%20%20%20%20%20%20rdfs%3Alabel%20%3Flsoaname%20.%0A%20%20%20%20%20%20%7D%0A%20%20%20%20%20%20%7D%0A%20%20%7D%0A%20%20GRAPH%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fgraph%2Fsocietal-wellbeing%2Fimd%2Findices%3E%20%7B%0A%20%20%20%20%3Frank_obs%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fgeography%2FrefArea%3E%20%3Flsoa_uri%20%3B%0A%20%20%20%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fcommunities%2Fsocietal_wellbeing%2Fimd%2Findices%3E%20%3Fdomain_uri%20%3B%0A%20%20%20%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fcommunities%2Fsocietal_wellbeing%2Fimd%2FrankObs%3E%20%3Frank%20.%0A%20%20%20%20%3Fdecile_obs%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fgeography%2FrefArea%3E%20%3Flsoa_uri%20%3B%0A%20%20%20%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fcommunities%2Fsocietal_wellbeing%2Fimd%2Findices%3E%20%3Fdomain_uri%20%3B%0A%20%20%20%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fcommunities%2Fsocietal_wellbeing%2Fimd%2FdecObs%3E%20%3Fdecile%20.%0A%20%20%20%20%3Fscore_obs%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fgeography%2FrefArea%3E%20%3Flsoa_uri%20%3B%0A%20%20%20%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fcommunities%2Fsocietal_wellbeing%2Fimd%2Findices%3E%20%3Fdomain_uri%20%3B%0A%20%20%20%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fdef%2Fontology%2Fcommunities%2Fsocietal_wellbeing%2Fimd%2FscoreObs%3E%20%3Fscore%20.%0A%20%20%7D%0A%20%20%3Fdomain_uri%20rdfs%3Alabel%20%3Fdomain%20.%0A%20%20%7D%20ORDER%20BY(%3Fposition)"
whole_values_statement <- ""
postcodes$strippedpcd <- gsub('\\s+', '', postcodes[[pcdcolumn]])
for (row in 1:nrow(postcodes)) {
cur_values_statement <- paste(whole_values_statement,"(",row,"%20%3Chttp%3A%2F%2Fopendatacommunities.org%2Fid%2Fgeography%2Fpostcode%2Fpostcodeunit%2F",as.character(postcodes$strippedpcd)[row],"%3E)%0A",sep="")
whole_values_statement <- cur_values_statement
}
query <- paste(sparql_part1,whole_values_statement,sparql_part2,sep="")
results <- POST(url = sparql_endpoint,
add_headers(Accept = "text/csv"),
body = query)
results <- content(results,"parsed")
results_dcasted <- reshape2::dcast(results, postcode + lsoa ~ domain, value.var = "rank", fun.aggregate = max, na.rm = TRUE)
results_dcasted$postcode <- gsub('\\s+', '', results_dcasted$postcode)
merged <- merge(x=postcodes, y = results_dcasted, by.x="strippedpcd", by.y = "postcode", all.x = TRUE)
return(merged)
}
# End -------------------------------------------------------------------------------------------------------
|
9a41f22750ee0491250cc2d68b2d7cf48684e2c2 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/Countr/inst/examples/example-data-fertility.R | 2df42ca85777d3b8a9a3047354ea5947d7086edf | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,440 | r | example-data-fertility.R | ## make sure the countr pkg is loaded first, then load the fertility data
data(fertility)
## specify the model as in McShane(2008)
form <- children ~ german + years_school + voc_train + university + religion +
year_birth + rural + age_marriage
## fit the weibull model
wei <- renewalCount(formula = form, data = fertility, dist = "weibull",
computeHessian = TRUE, weiMethod = "conv_dePril",
control = renewal.control(trace = 0,
method = "nlminb"),
convPars = list(convMethod = "dePril")
)
pois <- glm(formula = form, data = fertility, family = poisson())
## compare residuals: nothing much you can deduce
par(mfrow = c(1, 2))
res_wei <- residuals(wei, type = "pearson")
qqnorm(res_wei, ylim = range(res_wei), main = "Weibull Renewal Model")
qqline(res_wei, ylim = range(res_wei))
grid()
qqnorm(residuals(pois), ylim = range(res_wei), main = "GLM Poisson")
qqline(residuals(pois), ylim = range(res_wei))
grid()
## comparing expected and predicted frequencies
## inspired from Cameron (2013) Chapter 5.3.4
breaks_ <- c(0:5, 7, 9)
pears <- compareToGLM(poisson_model = pois,
breaks = breaks_, weibull = wei)
frequency_plot(pears$Counts, pears$Actual,
dplyr::select(pears, contains("_predicted"))
)
## run the formal chi-sq test gof
test <- chiSq_gof(wei, breaks = breaks_)
|
4a2830483ac4fa359d41e12faddd2fda3de5afbd | f249db6d617a9e49bd0f6c651c381856810a232a | /tests/testthat/test-utils-ui.R | e14098a3aad6a5cba755e0aed3c8b14ce68b34b1 | [] | no_license | cran/googlesheets4 | 043cd153df6d4c0e2644ab063a20fcd67ede336c | 6c30c0777e26beb747a1a54559931587aa21d1e4 | refs/heads/master | 2023-06-23T09:54:39.732668 | 2023-06-11T03:00:02 | 2023-06-11T03:00:02 | 236,607,618 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 693 | r | test-utils-ui.R | test_that("gs4_quiet() falls back to NA if googlesheets4_quiet is unset", {
withr::with_options(
list(googlesheets4_quiet = NULL),
expect_true(is.na(gs4_quiet()))
)
})
test_that("gs4_abort() throws classed condition", {
expect_error(gs4_abort("oops"), class = "googlesheets4_error")
expect_gs4_error(gs4_abort("oops"))
expect_gs4_error(gs4_abort("oops", class = "googlesheets4_foo"))
expect_error(
gs4_abort("oops", class = "googlesheets4_foo"),
class = "googlesheets4_foo"
)
})
test_that("abort_unsupported_conversion() works", {
x <- structure(1, class = c("a", "b", "c"))
expect_snapshot_error(
abort_unsupported_conversion(x, "target_class")
)
})
|
69156e134274d353c0f322bd20280afafeb927a6 | 41038c49343ee37cb91348fc007fc88457a1ab9d | /3.4.2 Removing Colinear Variables.R | 495c1b69aa475abaeb31539304db0947b8007989 | [] | no_license | ahmeduncc/Data-Statistics-and-Data-Mining | 460577cd52690bf7c9bab99d0794d864ab745126 | bbca512c856489654a411f0173ec7b59956c882d | refs/heads/master | 2021-06-13T15:08:08.448070 | 2017-04-04T17:38:25 | 2017-04-04T17:38:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,759 | r | 3.4.2 Removing Colinear Variables.R | # See https://beckmw.wordpress.com/2013/02/05/collinearity-and-stepwise-vif-selection/
# http://blog.minitab.com/blog/understanding-statistics/handling-multicollinearity-in-regression-analysis
require(MASS)
require(clusterGeneration)
#create fifteen 'explanatory' variables with 200
# observations each
set.seed(2)
num.vars<-15
num.obs<-200
cov.mat<-genPositiveDefMat(num.vars,covMethod="unifcorrmat")$Sigma
rand.vars<-mvrnorm(num.obs,rep(0,num.vars),Sigma=cov.mat)
#
# Now we create our response variable as a linear combination
# of the explanatory variables. First, we create a vector
# for the parameters describing the relationship of the
# response variable with the explanatory variables. Then,
# we use some matrix algebra and a randomly distributed
# error term to create the response variable. This is the
# standard form for a linear regression model.
parms<-runif(num.vars,-10,10)
y<-rand.vars %*% matrix(parms) + rnorm(num.obs,sd=20)
# We would expect a regression model to indicate each
# of the fifteen explanatory variables are significantly
# related to the response variable, since we know the true
# relationship of y with each of the variables. However,
# our explanatory variables are correlated. What happens
# when we create the model?
lm.dat <-data.frame(y,rand.vars)
form.in<-paste('y ~',paste(names(lm.dat)[-1],collapse='+'))
mod1<-lm(form.in,data=lm.dat)
summary(mod1)
x <- rand.vars
# remove collinear variables and recreate lm
terms_to_keep <- vif_func(in_frame=x,thresh=5,trace=T)
form.in <- paste('y ~',paste(terms_to_keep,collapse='+'))
mod2<-lm(form.in,data=lm.dat)
summary(mod2)
# vif_func ----------------------------------------------------------------
vif_func<-function(in_frame,thresh=10,trace=T,...){
require(fmsb)
if(class(in_frame) != 'data.frame') in_frame<-data.frame(in_frame)
#get initial vif value for all comparisons of variables
vif_init<-NULL
var_names <- names(in_frame)
for(val in var_names){
regressors <- var_names[-which(var_names == val)]
form <- paste(regressors, collapse = '+')
form_in <- formula(paste(val, '~', form))
vif_init<-rbind(vif_init, c(val, VIF(lm(form_in, data = in_frame, ...))))
}
vif_max<-max(as.numeric(vif_init[,2]))
if(vif_max < thresh){
if(trace==T){ #print output of each iteration
prmatrix(vif_init,collab=c('var','vif'),rowlab=rep('',nrow(vif_init)),quote=F)
cat('\n')
cat(paste('All variables have VIF < ', thresh,', max VIF ',round(vif_max,2), sep=''),'\n\n')
}
return(var_names)
}
else{
in_dat<-in_frame
#backwards selection of explanatory variables, stops when all VIF values are below 'thresh'
while(vif_max >= thresh){
vif_vals<-NULL
var_names <- names(in_dat)
for(val in var_names){
regressors <- var_names[-which(var_names == val)]
form <- paste(regressors, collapse = '+')
form_in <- formula(paste(val, '~', form))
vif_add<-VIF(lm(form_in, data = in_dat, ...))
vif_vals<-rbind(vif_vals,c(val,vif_add))
}
max_row<-which(vif_vals[,2] == max(as.numeric(vif_vals[,2])))[1]
vif_max<-as.numeric(vif_vals[max_row,2])
if(vif_max<thresh) break
if(trace==T){ #print output of each iteration
prmatrix(vif_vals,collab=c('var','vif'),rowlab=rep('',nrow(vif_vals)),quote=F)
cat('\n')
cat('removed: ',vif_vals[max_row,1],vif_max,'\n\n')
flush.console()
}
in_dat<-in_dat[,!names(in_dat) %in% vif_vals[max_row,1]]
}
return(names(in_dat))
}
}
|
01945fcec6cc27b6b0848b372686e573e14b15e6 | e2cf03cc7bf8f207d389b3a0a4149de0ba3c7c33 | /covid_chiba_deaths_v2.R | 531c72567f36b051cd2fb7e744f8a6af03d5e186 | [] | no_license | fusion0202/RScript | 1abb4c2d73ba3db35cb734697c273a66ff593f3f | 842775a6b0e589aa924ed8f6ddf32b08a25c7008 | refs/heads/master | 2021-12-10T09:10:15.888686 | 2021-11-26T09:23:40 | 2021-11-26T09:23:40 | 1,852,999 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,239 | r | covid_chiba_deaths_v2.R | library(ggplot2)
library(ggthemes)
library(scales)
library(openxlsx)
library(tidyverse)
mdf <- data.frame(date = seq(as.Date("2020-02-01"), by = "day", length.out = 365 * 2))
#
# Deaths
#
URL1 <- "https://raw.githubusercontent.com/fusion0202/RScript/master/covid_chiba_deaths.csv"
d <- read.csv(URL1)
df <- data.frame(table(d$Date))
df <- df[-1,]
colnames(df) <- c("date", "deaths")
df %>%
mutate(date = str_replace(date, "日", "")) %>%
mutate(date = str_replace(date, "月", "-")) %>%
mutate(date = str_replace(date, "年", "-")) %>%
mutate(date = as.Date(date)) -> df2
mdf <- left_join(mdf, df2, by = c("date" = "date"))
#
# Cases
#
URL2 <- "https://www.pref.chiba.lg.jp/shippei/press/2019/documents/1103kansensya.xlsx"
s1 <- read.xlsx(URL2, sheet = 1)
s1 <- s1[-c(1:4), 8]
s1 <- as.Date(as.numeric(s1), origin = "1899-12-30")
s1 <- s1[!is.na(s1)]
d1 <- as.data.frame(table(s1))
d1$s1 <- as.Date(as.vector(d1$s1))
s2 <- read.xlsx(URL2, sheet = 2)
s2 <- s2[-1, 7]
s2 <- as.Date(as.numeric(s2), origin = "1899-12-30")
s2 <- s2[!is.na(s2)]
d2 <- as.data.frame(table(s2))
d2$s2 <- as.Date(as.vector(d2$s2))
dt <- left_join(d1, d2, by = c("s1" = "s2"))
dt$Freq.y[which(is.na(dt$Freq.y))] <- 0
dt$cases <- dt$Freq.x + dt$Freq.y
dt <- dt[,-c(2,3)]
mdf <- left_join(mdf, dt, by = c("date" = "s1"))
mdf$deaths[which(is.na(mdf$deaths))] <- 0
mdf$cases[which(is.na(mdf$cases))] <- 0
#
# trim dataframe:mdf
#
ddf <- mdf[mdf$date >= as.Date("2020-02-01"),]
ddf <- ddf[ddf$date <= as.Date("2021-12-01"),]
#
# plot deaths
#
datebreaks <- c(seq(as.Date("2020-02-01"), by = "month", length.out = 23))
mtitle1 <- paste0('Daily New Deaths in Chiba, from ',
df2$date[1], ' to ', df2$date[length(df2$date)])
g <- ggplot(data = ddf)
g <- g + geom_segment(aes(x = date, y = 0, xend = date, yend = deaths),
color = "blue", size = 0.5, alpha = 0.5)
g <- g + theme_light()
g <- g + scale_x_date(breaks = datebreaks, labels = date_format("%m/%d"))
g <- g + labs(title = mtitle1,
x = "Day",
y = "Deaths",
caption = paste("Data Source: ", URL1))
g <- g + theme(panel.grid.minor = element_blank(),
plot.title = element_text(size = rel(1.4)),
axis.title = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1.0)))
print(g)
#
# plot cases
#
mtitle2 <- paste0('Daily new confirmed cases in Chiba, from ', min(dt$s1), ' to ',
max(dt$s1))
g <- ggplot()
g <- g + geom_segment(data = ddf, aes(x = date, y = 0, xend = date, yend = cases),
color = "darkorange", size = 0.5, alpha = 0.8)
g <- g + theme_light()
g <- g + scale_x_date(breaks = datebreaks, labels = date_format("%m/%d"))
g <- g + scale_y_continuous(limits = c(0, 2000), breaks = seq(0, 2000, by = 500))
g <- g + labs(title = mtitle2,
x = "Day",
y = "Positive Confirmed",
caption = paste("Data Source: ", URL2) )
g <- g + theme(panel.grid.minor = element_blank(),
plot.title = element_text(size = rel(1.4)),
axis.title = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1.0)))
print(g)
|
483b6839ee767f32fa913621da76b3ec81f8a5fa | f40094eefe2f20d87522264f16b727a208fdfa4e | /Biol300Demo.R | 070e5aa16a753ac573ddf26b6c28af0c9ebe0fc0 | [] | no_license | rylanmcc/rootdata | 5ef503b5ccd47b68452b2e95c1b43b7015222467 | d1d94d679d1e2191c2638fcb14f4227fd65455bb | refs/heads/master | 2022-11-06T05:10:44.717320 | 2020-06-25T15:54:58 | 2020-06-25T15:54:58 | 259,134,194 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,051 | r | Biol300Demo.R | ### Root Study R-Script - Angert Lab
### Directed Studies by Rylan McCallum
### Simplified for Biol 300 2-way ANOVA
### load in packages
library(tidyverse)
library(ggplot2)
### load in csv with raw data
dat <- read.csv("RootData_Biol300.csv")
### Histogram displaying raw data
hist(dat$total_root_length)
#### Exploratory graphing ##########################
### Total Root length X Region
p1 <- ggplot(data=dat, aes(x=Region, y=total_root_length)) +
geom_boxplot() + xlab("Region") + ylab("Total Root Length")
p1 + theme_classic()
### Total Root length X Treatment
p2 <- ggplot(data=dat, aes(x=Treatment, y=total_root_length)) +
geom_boxplot() + xlab("Treatment") + ylab("Total Root Length")
p2 + theme_classic()
### Do regions differ in plasticity across treatments?
p3 <- ggplot(data=dat, aes(x=Region, y=total_root_length, color=Treatment)) +
geom_boxplot() + xlab("Region") + ylab("Total Root Length")
p3 + theme_classic()
#### Analysis of variance
mod.2way <- lm(total_root_length ~ Region*Treatment, data=dat)
summary(mod.2way)
|
9e2b319b7a76e405a311c2b699a03168bc612395 | 9219ee8b398e5c891a99d87ecbf8e86dccdd4d26 | /qplot.r | f07c4d964ee9b3e4f8b58ac5af4d4091d7e965a1 | [] | no_license | stuti24m/R-Basics | 7752ac1aa0b4ab7a094739a79519ad593906db8b | 8e39090a162cf64538a5d1e7caba62a7e6b79cc9 | refs/heads/master | 2022-11-22T19:42:23.166736 | 2020-07-13T15:39:29 | 2020-07-13T15:39:29 | 278,013,680 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 139 | r | qplot.r |
library(ggplot2)
?diamonds
# quick plot
qplot(data = diamonds, carat, price, colour=clarity,
facets =.~clarity)
|
e5b3e91d93d64ecac271edae4e22e903ca69d9fb | b25934d0200b909e9be4e0759ab6f1f88d659087 | /Discrepancy/R/discrepancy.R | 640db2e9918893fda14757b81192877280589a0e | [] | no_license | shahar-siegman/old_projects | 8a94d67f2fd6f6f861fc903ac246b4d468395998 | 411756f7ae03e798a8376a41ce1ec57b9eda520f | refs/heads/master | 2021-10-19T04:59:14.538249 | 2015-12-22T15:18:26 | 2015-12-22T15:18:26 | 171,240,632 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,057 | r | discrepancy.R | library("RMySQL")
# get the placement data
plcmnt_chain_date <- read.csv("data_300_placements.txt")
plcmnt_chain_date$Date <- as.Date(plcmnt_chain_date$Date)
# get the website per placement
plcmnt_id_vec=unique(plcmnt_chain_date$placementId)
con <- dbConnect(MySQL(), user="komoona", password="nvlvnjfwe",
dbname="komoona_db", host="komoonadevdb.cesnzoem9yag.us-east-1.rds.amazonaws.com")
on.exit(dbDisconnect(con))
f <- function(y) {
dbGetQuery(con,paste("SELECT layoutid, tag_url FROM kmn_layouts WHERE layoutid= '",y,"'",sep="")) # e.g. select layoutid, tag_url from kmn_layouts where layoutid="0044a86227b3126f9d03c3615712d6b5";
}
data <- lapply(plcmnt_id_vec, f)
# build data frame from list
data_df=data[[1]]
for (i in seq(2,length(data))) {
data_df <- rbind(data_df,data[[i]])
}
cbind(aggregate(plcmnt_chain_date$Impressions,plcmnt_chain_date,sum),
# next steps:
# 1. aggregate placement_chain_date by placement_id using aggregate
# 2. fetch the tag_url corresponding to each placement_id and add it as a column to the df.
|
b330169c2622f9ce0391641e044bf02f97f2f8ee | 728cd257953b215978c76b696ee9de95058ee040 | /npdi.gas.R | 362eaa1746005b5eab1f1b5f9088babc787da821 | [] | no_license | AVKorytin/Tax-Revenue-Prediction | 2e01176f2a3ca4f8f3ea862b7e6822eed665f7fe | 3a5b6c33b0fa9256a44b1b72e3f9a784e4c9069d | refs/heads/master | 2016-09-14T04:05:47.613469 | 2016-05-10T15:36:11 | 2016-05-10T15:36:11 | 58,470,742 | 0 | 0 | null | null | null | null | WINDOWS-1251 | R | false | false | 1,955 | r | npdi.gas.R | # Settings
#path = "C:/Users/Administrator/Dropbox/IPython/TaxesForecast/! НДПИ/"
path = "./"
setwd(path)
library(forecast)
library(tseries)
twoyaxisplot <- function(x1, x2, title=NULL, ylab=NULL){
par(mar=c(5,4,4,5)+.1)
plot(x1, col="blue3", xlab="", ylab=ylab); grid()
par(new=TRUE)
plot(x2, xlab="", ylab="", col="green3", plot.type="single", yaxt='n', ann=FALSE)
axis(4)
mtext(title, side=4, line=3)
}
# Read data
data = read.csv("data.gas.csv", sep = ";")
gas = ts(data[,"gas"], freq=4, start=c(2008, 1))
ndpi = ts(data[,"ndpi"], freq=4, start=c(2008, 1))
oil = ts(data[,"oil"], freq=4, start=c(2008, 1))
twoyaxisplot(gas, ndpi)
twoyaxisplot(ndpi/1000, oil, "НДПИ на газ, млрд. руб.", "Цена нефти, руб./барр.")
legend("topleft", legend=c("НДПИ на газ", "Цена нефти"), col=c("blue3", "green3"), lwd=c(2, 2), lty=c(1,1), cex=0.8, bty="n")
twoyaxisplot(ndpi/1000, gas/1000, "НДПИ на газ, млрд. руб.", "Добыча газа, трлн. куб. м")
legend("topleft", legend=c("НДПИ на газ", "Добыча газа"), col=c("blue3", "green3"), lwd=c(2, 2), lty=c(1,1), cex=0.8, bty="n")
fit <- auto.arima(ndpi, xreg=cbind(gas, oil), stepwise=FALSE, approximation=FALSE)
fit
ts.plot(fit$residuals)
ts.plot(ndpi/1000, fit$fitted/1000, col=c("blue3", "green3"), xlab="", ylab="НДПИ на газ, млрд. руб."); grid()
legend("topleft", legend=c("actual", "fitted"), col=c("blue3", "green3"), lwd=c(2, 2), lty=c(1,1), cex=0.8, bty="n")
twoyaxisplot(esn/defl, fot/defl, "ФОТ, млрд. руб.", "ЕСН, млрд. руб.")
legend("topleft", legend=c("ЕСН", "ФОТ"), col=c("blue3", "green3"), lwd=c(2, 2), lty=c(1,1), cex=0.8, bty="n")
twoyaxisplot(esn/defl, gdp/defl, "ВВП, млрд. руб.", "ЕСН, млрд. руб.")
legend("topleft", legend=c("ЕСН", "ВВП"), col=c("blue3", "green3"), lwd=c(2, 2), lty=c(1,1), cex=0.8, bty="n") |
fb00273ee83d8fb9b5fc547ed6d1c8759e07fc5e | a59b4bd2769af0a269be375c9e0e4b5ce716a915 | /MatchupScript.R | 36da23ce11f7c2aa8d52fa2a62e7bcaa12c62fd5 | [] | no_license | Henryjean/Matchups | 995a4e8a3dae4c5f1f29a2bfa51712f4e91461de | 185ab1281b920bfb2dd4adb1ee75b1e9bfef733f | refs/heads/master | 2020-06-07T07:46:50.981999 | 2019-06-20T18:17:41 | 2019-06-20T18:17:41 | 192,964,849 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,484 | r | MatchupScript.R | #Load packages
library(tidyverse)
library(nbastatR)
library(jsonlite)
library(data.table)
library(extrafont)
#set theme
theme_owen <- function () {
theme_minimal(base_size=10, base_family="Gill Sans MT") %+replace%
theme(
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = 'floralwhite', color = 'floralwhite')
)
}
get_data <- function(playerName, season, possessionMinimum) {
players <- nba_players()
id <- players$idPlayer[players$namePlayer==playerName]
url <- paste0("https://stats.nba.com/stats/leagueseasonmatchups?DateFrom=&DateTo=&LeagueID=00&OffPlayerID=", id, "&Outcome=&PORound=0&PerMode=Totals&Season=", season, "&SeasonType=Regular+Season")
json_data <- fromJSON(paste(readLines(url), collapse=""))
df.reg <- do.call(rbind.data.frame, json_data[["resultSets"]][["rowSet"]])
col.names <- json_data[["resultSets"]][["headers"]][[1]]
colnames(df.reg) <- col.names
df.reg$Season <- "Regular Season"
url <- paste0("https://stats.nba.com/stats/leagueseasonmatchups?DateFrom=&DateTo=&LeagueID=00&OffPlayerID=", id, "&Outcome=&PORound=0&PerMode=Totals&Season=", season, "&SeasonType=Playoffs")
json_data <- fromJSON(paste(readLines(url), collapse=""))
df.po <- do.call(rbind.data.frame, json_data[["resultSets"]][["rowSet"]])
col.names <- json_data[["resultSets"]][["headers"]][[1]]
colnames(df.po) <- col.names
df.po$Season <- "Playoffs"
df <- rbind(df.po, df.reg)
df$POSS <- as.numeric(as.character(df$POSS))
df$PLAYER_PTS <- as.numeric(as.character(df$PLAYER_PTS))
df$TEAM_PTS <- as.numeric(as.character(df$TEAM_PTS))
df$TOV <- as.numeric(as.character(df$TOV))
df$DEF_PLAYER_NAME <- as.character(df$DEF_PLAYER_NAME)
df <- df %>% group_by(OFF_PLAYER_NAME, DEF_PLAYER_NAME) %>%
summarise(total.poss = sum(POSS), total.player_pts = sum(PLAYER_PTS), total.team_pts = sum(TEAM_PTS)) %>%
mutate(pts.per.100 = (total.player_pts / total.poss) *100,
team.pts.per.100 = (total.team_pts / total.poss) * 100) %>%
filter(total.poss >= possessionMinimum) %>%
select(OFF_PLAYER_NAME, DEF_PLAYER_NAME, total.poss, total.player_pts, pts.per.100, total.team_pts, team.pts.per.100)
df <- df %>%
ungroup() %>%
arrange(pts.per.100) %>%
mutate(DEF_PLAYER_NAME = factor(DEF_PLAYER_NAME, unique(DEF_PLAYER_NAME)))
return(df)
}
matchupData <- get_data("Stephen Curry", "2018-19", 100)
#Make a basic chart
matchupData %>%
ggplot(aes(x = DEF_PLAYER_NAME, y = pts.per.100, size = total.poss, fill = pts.per.100)) +
geom_point(alpha = .75, shape = 21, color = 'black') +
coord_flip() +
theme_owen() +
labs(size = "Total Possessions",
title = paste0(matchupData$OFF_PLAYER_NAME, "'s Points Per 100 Possessions When Guarded By ___"),
subtitle = paste0("Among players that guarded ", word(matchupData$OFF_PLAYER_NAME, -1), " at least [Minimum] possessions (2017-2019)"),
y = "Points Per 100 Possessions",
x = "") +
scale_fill_gradient2(guide=FALSE, low = ("#0571b0"), mid = "white",
high = ("#ca0020"), midpoint = mean(matchupData$pts.per.100)) +
theme(plot.title = element_text(face = 'bold', size = 9, hjust = 0.5)) +
theme(plot.subtitle = element_text(size = 8, hjust = 0.5)) +
theme(plot.margin=unit(c(.75,.25,.5,0),"cm")) +
theme(legend.position=c(0.15, 0.84), legend.background = element_rect(fill="floralwhite"))
|
58c05f1e6f7fd9ca82079a33f75302885facc5cc | cc29b6c355435e6a63cd30bd1b3901d73381dc96 | /man/clean.Rd | 61d902020d5c050c2289cd575c6844c18a02828f | [] | no_license | jimsforks/subtools | ebd718edbf8374b9175c67e9bf3dcec0ea78cae7 | dd4a4254b3e300b78ac2bebd281e177ca04b6060 | refs/heads/master | 2022-03-18T09:14:10.753045 | 2019-11-29T15:00:57 | 2019-11-29T15:00:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,029 | rd | clean.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_subtitles.R
\name{clean_tags}
\alias{clean_tags}
\alias{clean_captions}
\alias{clean_patterns}
\title{Clean subtitles}
\usage{
clean_tags(x, format = "srt", clean.empty = TRUE)
clean_captions(x, clean.empty = TRUE)
clean_patterns(x, pattern, clean.empty = TRUE)
}
\arguments{
\item{x}{a \code{subtitles} or \code{multisubtitles} object.}
\item{format}{the original format of the \code{subtitles} objects.}
\item{clean.empty}{logical. Should empty remaining lines ("") deleted after cleaning.}
\item{pattern}{a character string containing a regular expression to be matched and cleaned.}
}
\value{
A \code{subtitles} or \code{multisubtitles} object.
}
\description{
Functions to clean subtitles. \code{clean_tags} cleans formatting tags.
\code{clean_captions} cleans close captions, i.e all text enclosed in parentheses or squared brackets.
\code{clean_patterns} provides a more general and flexible cleaning based on regular expressions.
}
|
e41b388f41a0e5ad9a8181fdc465ff2179d5d67c | 8d8d1d24986dce6b8a56ed8bcb71ada4b4eeb2bd | /man/surfersc.Rd | 35be66271c3fc7502adf250f7b459deaf34f9d50 | [
"MIT"
] | permissive | schochastics/networkdata | edaed94b788dcd925f55ae07f8a2d8b58d45ae8e | 535987d074d35206b6804e9c90dbfa4b50768632 | refs/heads/master | 2023-01-07T07:20:41.475574 | 2023-01-05T18:54:17 | 2023-01-05T18:54:17 | 226,346,857 | 142 | 17 | null | null | null | null | UTF-8 | R | false | true | 1,573 | rd | surfersc.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-freeman.R
\docType{data}
\name{surfersc}
\alias{surfersc}
\title{Windsurfers (Closeness)}
\format{
igraph object
}
\source{
http://moreno.ss.uci.edu/data.html#beach
}
\usage{
surfersc
}
\description{
This was a study of windsurfers on a beach in southern California during the fall of 1986. The windsurfing community was fairly clearly divided into at least two sub-communities. Members of each community seemed, to some degree, to limit their interaction to fellow group members. Contacts between members of the two groups occurred, but these were less frequent. Observations of 43 individuals were made for 31 days. All interpersonal contacts among collections of these individuals were recorded (see \link{surfersb}). Then all 43 individuals were interviewed following the end of observation. Data on each individual's perception of social affiliations were collected.
The perceptual data were generated by asking each subject to perform a sequence of card sorting tasks that assigned an index of the perceived closeness of every individual on the beach to each of the other individuals.
}
\references{
L. C. Freeman, S. C. Freeman and A. G. Michaelson "On Human Social Intelligence." \emph{Journal of Social and Biological Structures}, 11, 1988, 415-425.
L. C. Freeman, S. C. Freeman and A. G. Michaelson "How Humans See Social Groups: A Test of the Sailer-Gaulin Models." \emph{Journal of Quantitative Anthropology}, 1, 1989, 229-238.
}
\seealso{
\link{surfersb}
}
\keyword{datasets}
|
70b2c2ada428f2f450083d85b607c645269e6454 | 205a269537cc4bfbc526da048db8d185e1e678c9 | /man/girafe_css.Rd | 4fba36e38179e01fcd35e232c9bc5edf0b36ec40 | [] | no_license | davidgohel/ggiraph | bca2fc5c61ef7cbeecc0a0d067f7479822117ab0 | b3ce2998b57d8c8b63055499925fd9fe99f4d1a7 | refs/heads/master | 2023-09-03T00:06:10.817100 | 2023-08-30T14:35:40 | 2023-08-30T14:35:40 | 40,061,589 | 735 | 86 | null | 2023-09-03T09:50:54 | 2015-08-01T22:17:06 | R | UTF-8 | R | false | true | 1,093 | rd | girafe_css.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_css.R
\name{girafe_css}
\alias{girafe_css}
\title{CSS creation helper}
\usage{
girafe_css(
css,
text = NULL,
point = NULL,
line = NULL,
area = NULL,
image = NULL
)
}
\arguments{
\item{css}{The generic css style}
\item{text}{Override style for text elements (svg:text)}
\item{point}{Override style for point elements (svg:circle)}
\item{line}{Override style for line elements (svg:line, svg:polyline)}
\item{area}{Override style for area elements (svg:rect, svg:polygon, svg:path)}
\item{image}{Override style for image elements (svg:image)}
}
\value{
css as scalar character
}
\description{
It allows specifying individual styles for various SVG elements.
}
\examples{
library(ggiraph)
girafe_css(
css = "fill:orange;stroke:gray;",
text = "stroke:none; font-size: larger",
line = "fill:none",
area = "stroke-width:3px",
point = "stroke-width:3px",
image = "outline:2px red"
)
}
\seealso{
\code{\link[=girafe_css_bicolor]{girafe_css_bicolor()}}, \code{\link[=girafe]{girafe()}}
}
|
dac1c10cc347f9e51638b412b65555bd89a85066 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/splusTimeDate/examples/holnrwkd.Rd.R | 2a9befdaf4fe6a8f6241bc48a6c948f820782630 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 232 | r | holnrwkd.Rd.R | library(splusTimeDate)
### Name: holiday.nearest.weekday
### Title: Holiday Generating Functions
### Aliases: holiday.nearest.weekday
### Keywords: chron
### ** Examples
holiday.nearest.weekday(holiday.Christmas(1994:2005))
|
15287699f5f2e06681675ceb2e7950138f3d05ff | 0c071eb08b13a0e1d44b5eb76fa413f5758a5443 | /01_Scripts/01_data_wrangling_functions.R | 2d8d9036039cc7534672f8dff195fda66c016fb8 | [] | no_license | chrisselig/ClusteringStates | 8847f3f80c700bf7dfca04317e0c88b57eb46606 | d73848aa4a16484ce74386408565e654d1d32a68 | refs/heads/master | 2020-06-12T20:38:17.306777 | 2019-06-29T15:12:34 | 2019-06-29T15:12:34 | 194,418,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,368 | r | 01_data_wrangling_functions.R | # Script is used to create functions for data wrangling ----
# Create the arrests_tbl ----
arrests_tbl_function <- function(data = arrests_raw_tbl){
arrests_tbl <- arrests_raw_tbl %>%
select(state,Murder:Assault,Rape,UrbanPop) %>%
rename(
murder = Murder,
assault = Assault,
rape = Rape,
urbanpop = UrbanPop
) %>%
gather(key = 'variable', value = 'value',murder:urbanpop)
return(arrests_tbl)
}
# Create the arrests_stats_function ----
arrests_stats_function <- function(data = arrests_tbl){
arrests_stats_tbl <- data %>%
#gather(key = 'variable', value = 'value',murder:urbanpop) %>%
group_by(variable) %>%
summarize(
num = n(),
mean = mean(value),
median = median(value),
variance = var(value),
std = sd(value),
min = min(value),
max = max(value),
iqr = IQR(value),
skewness = skew(value),
kurtosis = kurtosi(value)
) %>%
mutate(
mean_label = str_glue("Mean: {mean}"),
median_label = str_glue("Median: {median}")
)
return(arrests_stats_tbl)
}
# Exploratory Data Analysis ----
# Boxplot for outlier detection ----
# Hierarchical Cluster Analysis ----
|
ecb2029042f896a861533d6aed85ba8e2b550a80 | d3d0c8fc2af05f1ba3ed6d29b9abaaad80b0ca33 | /R/pi.object.R | 9b1def20b9dd1151b7dc510d60ec9a8ccaa399b6 | [] | no_license | natehawk2/NateUtils | 2e5e16cb244f52e8ebf53b9ee751bb33550cc20d | 5e4991c35e5ee5d9479b23763183a2aafc026b8b | refs/heads/main | 2023-01-13T10:06:18.617234 | 2020-11-13T16:23:43 | 2020-11-13T16:23:43 | 309,730,938 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 987 | r | pi.object.R | #' pi.object
#' @param df Dataframe
#' @return Object suitable for IBM Personality Insights pull
#' @import utils
#' @export
pi.object <- function(df){
content <- "content"
qj <- "\""
space <- " "
comma <- ","
sqj <- "\'"
capture.output(as.character(
cat("{"),
#cat("\n"),
cat(qj,"contentItems", qj , ":", " [", sep = ""),
for(i in 1:nrow(df)){
if(i !=1){
cat(",")
}
cat("{")
#cat("\n")
cat(qj,content,qj,": ", qj, as.character(df$clean.emoji.text[i]), qj, comma, sep = "")
#cat("\n")
cat(qj, "contenttype", qj, ":", space, qj, "text/plain", qj, comma, sep = "")
#cat("\n")
cat(qj, "created", qj, ": ", df$created_at[i], comma, sep = "" )
#cat("\n")
cat(qj, "id", qj, ": ",qj, df$user_id[i], qj, comma, sep = "")
#cat("\n")
cat(qj, "language", qj, ": ", qj, df$lang[i], qj, sep = "")
#cat("\n")
cat("}")
},
cat("]}"),
cat(sqj),
cat(")")
))
}
|
f4617ede81de3cae930d3cf77774f46855077e02 | 5ab2f745bef6ed1c6bb7f8bd08a94296cb611833 | /man/DiffExp.Rd | 91f507599bccbd24493168292c4e4a7a0475fd79 | [
"MIT"
] | permissive | XQBai/CCNMF | f447426c6269921d110c7bd247a170ff4a9e5d88 | ac503d7dac205d4a275a4b809ddc22fb2aef258b | refs/heads/master | 2023-05-26T01:19:26.462686 | 2023-05-24T19:17:44 | 2023-05-24T19:17:44 | 237,326,870 | 10 | 2 | null | null | null | null | UTF-8 | R | false | true | 716 | rd | DiffExp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analysis.R
\name{DiffExp}
\alias{DiffExp}
\title{Differential expression for different clusters in scRNA-seq data
Selct the differential gene among clusters by t test}
\usage{
DiffExp(Data, label)
}
\arguments{
\item{Data}{the scRNA-seq matrix}
\item{label}{the clusters label of the imput data}
}
\value{
P the p_values matrix, the rows correspond to the clusters, the cloumns correspond to the genes
DEgenes, the differential genes for each clusters. the each row is the DE genes for each cluster.
}
\description{
Differential expression for different clusters in scRNA-seq data
Selct the differential gene among clusters by t test
}
|
52b78263621409c44e817f2425119c4375fa0002 | f5e66f86b5e89c21a54cd8a095de5752018a549e | /R/Kendall_Lag.R | bb939a96f85e03c14b2d3110361210d476ca2ee9 | [] | no_license | rjaneUCF/MultiHazard | 5279e4efc869e8745818fe62b37ce1d35534114c | 334b0e1aa6bd5f4dbd68876221fad3c499e691a2 | refs/heads/master | 2023-07-29T15:04:55.523801 | 2023-07-11T20:46:42 | 2023-07-11T20:46:42 | 239,541,679 | 11 | 4 | null | null | null | null | UTF-8 | R | false | false | 5,949 | r | Kendall_Lag.R | #' Kendall's tau correlation coefficient between pairs of variables over a range of lags
#'
#' Kendall's tau correlation coefficient between pairs of up to three variables over a range of lags
#'
#' @param Data A data frame with 3 columns, containing concurrent observations of three time series.
#' @param Lags Integer vector giving the lags over which to calculate coefficient. Default is a vector from \code{-6} to \code{6}.
#' @param Plot Logical; whether to show plot of Kendall's coefficient vs lag. Default is \code{TRUE}.
#' @param GAP Numeric vector of length one. Length of y-axis above and below max and min Kendall's tau values.
#' @return List comprising Kendall's tau coefficients between the variables pairs composing columns of Data with the specified lags applied to the second named variable \code{Values} and the p-values \code{Test} when testing the null hypothesis H_0: tau=0 i.e. there is no correlation between a pair of variables. Plot of the coefficient with a filled point of hypothesis test (p-value<0.05). Lag applied to variable named second in the legend.
#' @seealso \code{\link{Dataframe_Combine}}
#' @export
#' @examples
#' Kendall_Lag(Data=S20.Detrend.df,GAP=0.1)
Kendall_Lag<-function(Data,Lags=seq(-6,6,1),PLOT=TRUE,GAP=0.1){
Lag<-function(x,k){
if(k>0){
return(c(rep(NA,k),x)[1:length(x)])
} else{
return(c(x[(-k+1):length(x)],rep(NA,-k)))
}
}
correlation<-function(Data_Cor,lag){
for(i in 2:(ncol(Data_Cor))){
Data_Cor[,i]<-Lag(Data_Cor[,i],-lag[i-1])
}
Data_Cor<-na.omit(Data_Cor)
return(cor(Data_Cor[2:ncol(Data_Cor)],method = "kendall")[which(lower.tri(cor(Data_Cor[2:ncol(Data_Cor)]))==T)])
}
n<-ncol(Data)-1
if(n==3){
Var1_Var2<-numeric(length(Lags))
Var2_Var3<-numeric(length(Lags))
Var1_Var3<-numeric(length(Lags))
for(i in 1:length(Lags)){
Var1_Var2[i]<-correlation(Data_Cor=Data,c(Lags[i],0,0))[1]
Var2_Var3[i]<-correlation(Data_Cor=Data,c(0,0,Lags[i]))[2]
Var1_Var3[i]<-correlation(Data_Cor=Data,c(0,0,Lags[i]))[3]
}
correlation.test<-function(Data_Cor,lag){
for(i in 2:(ncol(Data_Cor))){
Data_Cor[,i]<-Lag(Data_Cor[,i],-lag[i-1])
}
Data_Cor<-na.omit(Data_Cor)
return(cor.test(Data_Cor[,2],Data_Cor[,3],method = "kendall")$p.value)
}
Var1_Var2_Test<-numeric(length(Lags))
Var2_Var3_Test<-numeric(length(Lags))
Var1_Var3_Test<-numeric(length(Lags))
for(i in 1:length(Lags)){
Var1_Var2_Test[i]<-correlation.test(Data_Cor=Data[,-4],c(Lags[i],0))
Var2_Var3_Test[i]<-correlation.test(Data_Cor=Data[,-3],c(0,Lags[i]))
Var1_Var3_Test[i]<-correlation.test(Data_Cor=Data[,-2],c(0,Lags[i]))
}
if(PLOT==TRUE){
yx<-max(c(Var1_Var2,Var2_Var3,Var1_Var3))-min(c(Var1_Var2,Var2_Var3,Var1_Var3))
plot(Lags,Var1_Var2,ylim=c(min(c(Var1_Var2,Var2_Var3,Var1_Var3))-GAP*yx,max(c(Var1_Var2,Var2_Var3,Var1_Var3)))+GAP*yx,type='l',xlab="Lag (days)",ylab=expression(paste("Kendall's "*tau*' coefficient')),cex.lab=1.65,cex.axis=1.65,lwd=2.5)
abline(h=0,lty=2)
lines(Lags,Var2_Var3,col=2)
lines(Lags,Var1_Var3,col=3)
points(Lags,Var1_Var2,pch=ifelse(Var1_Var2_Test<0.05,16,21),bg=ifelse(Var1_Var2_Test<0.05,1,"White"),cex=1.5)
points(Lags,Var2_Var3,pch=ifelse(Var2_Var3_Test<0.05,16,21),col=2,bg=ifelse(Var2_Var3_Test<0.05,2,"White"),cex=1.5)
points(Lags,Var1_Var3,pch=ifelse(Var1_Var3_Test<0.05,16,21),col=3,bg=ifelse(Var1_Var3_Test<0.05,3,"White"),cex=1.5)
legend("topright",c(paste(colnames(Data)[2],"_",colnames(Data)[3],sep=""),
paste(colnames(Data)[3],"_",colnames(Data)[4],sep=""),
paste(colnames(Data)[2],"_",colnames(Data)[4],sep="")),
bty="n",lwd=2.5,col=c(1,2,3))
}
Value<-list()
Value[[paste(names(Data)[2],'_',names(Data)[3],sep="")]]= Var1_Var2
Value[[paste(names(Data)[3],'_',names(Data)[4],sep="")]]= Var2_Var3
Value[[paste(names(Data)[2],'_',names(Data)[4],sep="")]]= Var1_Var3
Test<-list()
Test[[paste(names(Data)[2],'_',names(Data)[3],'_Test',sep="")]]= Var1_Var2_Test
Test[[paste(names(Data)[3],'_',names(Data)[4],'_Test',sep="")]]= Var2_Var3_Test
Test[[paste(names(Data)[2],'_',names(Data)[4],'_Test',sep="")]]= Var1_Var3_Test
}
if(n==2){
Var1_Var2 <- numeric(length(Lags))
for (i in 1:length(Lags)) {
Var1_Var2[i] <- correlation(Data_Cor = Data, c(Lags[i],0))[1]
}
correlation.test <- function(Data_Cor, lag) {
for (i in 2:(ncol(Data_Cor))) {
Data_Cor[, i] <- Lag(Data_Cor[, i], -lag[i - 1])
}
Data_Cor <- na.omit(Data_Cor)
return(cor.test(Data_Cor[, 2], Data_Cor[, 3], method = "kendall")$p.value)
}
Var1_Var2_Test <- numeric(length(Lags))
for (i in 1:length(Lags)) {
Var1_Var2_Test[i] <- correlation.test(Data_Cor = Data[,-4], c(Lags[i], 0))
}
if (PLOT == TRUE) {
yx <- max(c(Var1_Var2)) - min(c(Var1_Var2))
plot(Lags, Var1_Var2, ylim = c(min(c(Var1_Var2)) - GAP * yx, max(c(Var1_Var2)) + GAP * yx), type = "l", xlab = "Lag (days)",
ylab = expression(paste("Kendall's " * tau * " coefficient")),
cex.lab = 1.65, cex.axis = 1.65, lwd = 2.5)
abline(h = 0, lty = 2)
points(Lags, Var1_Var2, pch = ifelse(Var1_Var2_Test < 0.05, 16, 21), bg = ifelse(Var1_Var2_Test < 0.05,1, "White"), cex = 1.5)
legend("topright", c(paste(colnames(Data)[2], "_", colnames(Data)[3],sep = "")), bty = "n", lwd = 2.5, col = c(1, 2, 3))
}
Value<-list()
Value[[paste(names(Data)[2],'_',names(Data)[3],sep="")]]= Var1_Var2
Test<-list()
Test[[paste(names(Data)[2],'_',names(Data)[3],'_Test',sep="")]]= Var1_Var2_Test
}
res<-list("Value" = Value,"Test" = Test)
return(res)
}
|
cb09fc2c1a119b84814d910137198119fd1c4924 | c4773c9870ce6f02b90cf6c814fa81ad5a7a200c | /R/physical.R | b5324b124b3b8557371aecaae04798d73e709e13 | [] | no_license | RL31/30DayChartChallenge | f3664cd73d45b0f177bfa4cda57fb903bdfa4a10 | d6d7aa017a1e303ef69e6188e5c190f317c0102b | refs/heads/main | 2023-04-06T03:26:46.322866 | 2021-04-19T13:26:03 | 2021-04-19T13:26:03 | 354,943,510 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 2,122 | r | physical.R | install.packages(c("httr","jsonlite","directlabels"))
library(jsonlite)
library(httr)
library(directlabels)
library(tidyverse)
library(extrafont)
library(viridis)
loadfonts(device="pdf")
parametres<-tribble(
~nom,~lat1,~long1,~lat2,~long2,
"Séponet",44.846361 , 4.173152,44.846376 , 4.179418,
"Sépoux",44.838313, 4.176908,44.837156,4.189482,
"Taupernas",44.864356 , 4.16399, 44.854804 , 4.167595,
"Montfol",44.847806,4.154291,44.84306,4.164462,
"Gerbier",44.844612,4.216325,44.842847,4.224114
)
profil_alti<-function(NOM,LAT1,LAT2,LONG1,LONG2,SAMP=30){
sommet<-GET(
paste0("https://wxs.ign.fr/choisirgeoportail/alti/rest/elevationLine.json?sampling=",
SAMP,"&lon=",LONG1,"|",LONG2,"&lat=",LAT1,"|",LAT2,"&indent=true")) %>%
content("text") %>%
fromJSON(flatten = TRUE) %>%
as.data.frame() %>%
mutate(id=row_number(),
nom=NOM)
}
profils <- pmap_df(list(parametres$nom,
parametres$lat1,
parametres$lat2,
parametres$long1,
parametres$long2,
30),
profil_alti)
ggplot(data=profils,
aes(x=id,y=elevations.z,group=nom,color=nom))+
geom_line(size=1.5,alpha=.7)+
geom_dl(aes(label = nom), method = list(dl.trans(x = x + 0.2), "last.points", cex = 0.8)) +
scale_color_viridis(discrete = TRUE,option = "viridis")+
labs(title="Des sommets presque normaux",
subtitle="Profils altimétriques des sucs du plateau ardéchois",
x="",
y="Altitude (m)",
caption="Source: IGN, API ALTI\nTraitements et erreurs: @Re_Mi_La")+
coord_cartesian(clip="off")+
theme_minimal()+
theme(
text = element_text(family = "Calibri"),
plot.caption = element_text(size=8,face="italic" ),
plot.title = element_text(hjust=0,face="bold",size=17, color=viridis_pal()(1) ),
plot.subtitle = element_text(hjust=0, color=viridis_pal()(1)),
plot.margin = margin(1,1,1,1,unit = "cm"),
panel.grid = element_blank(),
axis.text.x = element_blank()
)+
guides(color="none")
|
756740a4fbc0bbf9a052639bc93de6fd98e9a569 | 9744a36f5ea7115ec422468bd065d2a09ec8099d | /tests/testthat.R | ae6eb7e9c15e69d28b8061050aefede0285331d9 | [] | no_license | JWiley/multilevelTools | 75554ad502e09bf686180c875f6b82c44cb3fecd | 40e5d3b9859ba1053189ea63628233cdcb41dd88 | refs/heads/main | 2023-03-01T06:15:16.620585 | 2023-02-14T23:10:31 | 2023-02-14T23:10:31 | 227,281,974 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 99 | r | testthat.R | library(testthat)
library(multilevelTools)
library(JWileymisc)
test_check("multilevelTools")
|
aef9831ec80c73fb5934a1b1a221691337f5af9f | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.customer.engagement/man/connect_create_contact_flow_module.Rd | 5a9c351b353d434fc23825488325fee33e08d931 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,534 | rd | connect_create_contact_flow_module.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/connect_operations.R
\name{connect_create_contact_flow_module}
\alias{connect_create_contact_flow_module}
\title{Creates a flow module for the specified Amazon Connect instance}
\usage{
connect_create_contact_flow_module(
InstanceId,
Name,
Description = NULL,
Content,
Tags = NULL,
ClientToken = NULL
)
}
\arguments{
\item{InstanceId}{[required] The identifier of the Amazon Connect instance. You can \href{https://docs.aws.amazon.com/connect/latest/adminguide/find-instance-arn.html}{find the instance ID}
in the Amazon Resource Name (ARN) of the instance.}
\item{Name}{[required] The name of the flow module.}
\item{Description}{The description of the flow module.}
\item{Content}{[required] The content of the flow module.}
\item{Tags}{The tags used to organize, track, or control access for this resource.
For example, \{ "tags": \{"key1":"value1", "key2":"value2"\} \}.}
\item{ClientToken}{A unique, case-sensitive identifier that you provide to ensure the
idempotency of the request. If not provided, the Amazon Web Services SDK
populates this field. For more information about idempotency, see
\href{https://aws.amazon.com/builders-library/making-retries-safe-with-idempotent-APIs/}{Making retries safe with idempotent APIs}.}
}
\description{
Creates a flow module for the specified Amazon Connect instance.
See \url{https://www.paws-r-sdk.com/docs/connect_create_contact_flow_module/} for full documentation.
}
\keyword{internal}
|
c7ac4ca3a959fc9aebdc901b67524a4bcbdb1329 | b570e60c0609c5d9eeb04a99e9d4a8c58b039717 | /paper_supp_files/my_affy_tool.R | 1f0aa54905725de3ec5503a720075cf12a5d95fa | [] | no_license | nturaga/bioc-galaxy-integration | 836087c81934e3e833f6be87471aed3b99fb0144 | 4627dc20cce1cf45909b084804db126f6dba2af9 | refs/heads/master | 2021-01-24T22:02:19.347806 | 2016-10-28T19:45:52 | 2016-10-28T19:45:52 | 55,623,322 | 9 | 9 | null | 2016-10-27T13:25:19 | 2016-04-06T16:42:40 | R | UTF-8 | R | false | false | 1,268 | r | my_affy_tool.R | ## How to run tool
# Rscript my_affy_tool.R --input input.CEL --output output.txt
# Set up R error handling to go to stderr
options(show.error.messages = F, error = function(){cat(geterrmessage(),file=stderr());q("no", 1, F)})
# Avoid crashing Galaxy with an UTF8 error on German LC settings
loc <- Sys.setlocale("LC_MESSAGES", "en_US.UTF-8")
# Import required libraries
library("getopt")
library("affy")
options(stringAsfactors = FALSE, useFancyQuotes = FALSE)
# Take in trailing command line arguments
args <- commandArgs(trailingOnly = TRUE)
# Get options using the spec as defined by the enclosed list
# Read the options from the default: commandArgs(TRUE)
option_specification = matrix(c(
'input', 'i', 2, 'character',
'output', 'o', 2, 'character'
), byrow=TRUE, ncol=4);
# Parse options
options = getopt(option_specification);
# Print options to stderr
# Useful for debugging
#cat("\n input file: ",options$input)
#cat("\n output file: ",options$output)
# Read in data
inputfile <- as.character(options$input)
data <- ReadAffy(filenames = inputfile)
# Create ExpressionSet object using RMA
eset <- rma(data)
# Output expression values
write.exprs(eset, file = options$output)
cat("\n Successfully generating expression values from CEL file. \n")
|
c966406cceb06e97a10c8893be32cc6c14206b63 | 348ee19bc3b203fffae9f51b4e9ca8f3337bfdd5 | /R/is-started.R | 58b91c75a4d4abe2ebe888b328426d267b5edcfe | [
"MIT"
] | permissive | poissonconsulting/hmstimer | cc160318ebb0a54dbc227767bc7a36d6677f3488 | 2c88234eec969b82b1d8e826954bad20a6a5029e | refs/heads/main | 2022-09-27T07:20:10.863266 | 2022-09-21T14:28:16 | 2022-09-21T14:28:16 | 186,044,769 | 3 | 0 | NOASSERTION | 2022-09-16T23:02:54 | 2019-05-10T19:38:48 | R | UTF-8 | R | false | false | 427 | r | is-started.R | #' Is hms Timer Started
#'
#' Tests if a [hms_timer()] is started (as indicated by the
#' presence of an attribute named start).
#' @inheritParams params
#' @return A flag (TRUE or FALSE).
#' @family start_stop
#' @export
#' @examples
#' tmr <- tmr_timer(start = TRUE)
#' print(tmr_is_started(tmr))
#' tmr <- tmr_stop(tmr)
#' print(tmr_is_started(tmr))
tmr_is_started <- function(x) {
chk_x(x)
!is.null(attr(x, "start"))
}
|
0de289ef9017e87b5a2808db880226678188bf38 | 9dba985428dc786b3e8d3db5fbab770add8dbe8b | /dataStructures.R | 1d0229455fea0380ebe0888f2f0a690031e0ffa1 | [] | no_license | arunatma/LearnR | 8475cbd89b22afe0d85e48508debd47f5e4c1b50 | f4d1ee2cc1f52c1721d62b8e6a5b9e542ea3a262 | refs/heads/master | 2021-01-25T03:54:41.611352 | 2015-12-09T18:27:06 | 2015-12-09T18:27:06 | 13,169,038 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,625 | r | dataStructures.R | ###############################################################################
# data_structures.r
# - Explains basic data structures in R
# - Scalars, Vectors, Matrix, Data Frame, List
#
# Author : Arunram A
# Creation : 26th Sep 2013
#
# Revision History:
#
#
###############################################################################
# Create a scalar object var_sample and assigns value 1 to it
var_sample = 1
# A scalar can contain numbers or characters
var1 = "Rlanguage"
# A scalar can contain a complex number too
var2 = 5+3i
# A vector contains a set of values
vec_1 = c(1,2,3,4,5) # Produces a vector using the combine function "c"
print(vec_1) # Prints to console, the value of object vec_1
# A vector can also be created using the vector command
vec_2 = vector(mode="numeric", length=5) # Create and Initialize
print(vec_2) # [1] 0 0 0 0 0 (a vector with 5 zeros)
vec_2 # Performs the same task as print(...)
vec_3 = vector(mode="character", length=5) # Create and Initialize
print(vec_3) # [1] "" "" "" "" "" (5 empty strings)
vec_4 = vector(mode="logical", length=5) # Create and Initialize
print(vec_4) # [1] FALSE FALSE FALSE FALSE FALSE
vec_4 = vector(mode="complex", length=3) # Create and Initialize
print(vec_4) # [1] 0+0i 0+0i 0+0i
# A vector will be able to store values of identical datatypes
# The following command assigns strings "a", "1" and "FALSE" to vec_a
vec_a = c("a", 1, FALSE)
vec_a
# The following command assigns integers 0 and 100 to vec_a
vec_a = c(FALSE, 100)
vec_a
# The following command assigns strings "TRUE", "a" and "100" to vec_a
vec_a = c(TRUE, "a", 100)
vec_a
#------------------------------------------------------------------------------
# Creation of vectors using other commands
# seq(start, end, step) Command
seq(1, 100, 3) # Generates a vector with values 1, 4, 7
seq(from=1, to=100, by=3) # same as the above
seq(1, 100, length=25) # Generates 25 values 1, 5.125, 9.250, ..., 100
seq(25) # Generates the first 25 integers
y = seq(1, 100, 3)
x = seq(along = y) # Generates a sequence of numbers equal to length(y)
x = seq(length(y)) # Same as above
# : Colon operator
1:25 # This can be assigned to an object vec_5 = 1:25
5:10 # This can also be written as seq(6) + 4
7:11
# Both seq function and : operator can be used only with double(real) datatypes
# rep(. , n) repeats . n times - used with char, real, logical and complex
rep(1, 5) # Generates 1 1 1 1 1
rep("a", 3) # Generates "a" "a" "a"
rep(3+4i, 2) # Generates 3+4i 3+4i
#------------------------------------------------------------------------------
# Matrix - Stores values in rows and columns together
# Very similar to vector - all elements in a matrix will have the same datatype
matrix(1:16, nrow=8, ncol=2) # will have 1:8 in col1 and 9:16 in col2
# Generates 8 rows with values (1,2), (3,4), (5,6) ...
matrix(1:16, nrow=8, ncol=2, byrow=TRUE)
matrix(1:16, 8, 2, byrow=TRUE) # Same as above
matrix(1:16, 8, 2, TRUE) # Same as above
matrix(seq(16), 8, 2, TRUE) # Same as above
# Matrix can also be created using cbind and rbind functions
# Column Bind
cbind(1:8, 9:16) # Same as matrix(1:16, nrow=8, ncol=2)
# Row Bind
rbind(1:8, 9:16) # Same as matrix(1:16, nrow=2, ncol=8, byrow=TRUE)
vec_1 = c("l", "e", "a", "r", "n")
vec_2 = c("R", "c", "o", "d", "e")
char_mat = cbind(vec_1, vec_2) # Creates matrix with 2 columns, 5 char. each
char_mat # Display the matrix
vec_3 = c("q", "u", "i", "c", "k")
char_mat = cbind(char_mat, vec_3) # Combines a matrix with a column
char_mat # Display the matrix
# Can this character matrix be combined with integer values?
char_mat = cbind(char_mat, 1:5) # Combines the matrix with vector 1:5
# 1 to 5 are converted to strings "1" "2" "3" "4" "5" and added as a column
char_mat # Display the matrix
# Special Cases
# Repeats the vector to fill up size of matrix. Gets no warning.
matrix(1:5, 5, 2)
# Repeats fill up size of matrix - to possible extent. Gets warning.
matrix(1:5, 7, 2)
# Not enough space to fill even once - gets the same warning as above
matrix(1:5, 2, 2)
#------------------------------------------------------------------------------
# Array - Similar to a matrix, but with more than 2 dimensions
# Creates a 4x5x5 dimension array and fills up numbers from 1 to 100
array(1:100, dim=c(4,5,5))
# The following command stores all elements as strings
array(c(1:98,"a","b"), dim=c(4,5,5))
# Each dimension can be given a name using "dimnames" (a list)
dim_list = list(c("D11","D12"),c("D21","D22"), c("D31","D32"))
array(1:8, dim=c(2,2,2), dimnames=dim_list)
# If the product of dim is less than length, the rem. elements are ignored.
array(1:100, dim=c(4,5,4)) # 81:100 is ignored
# If the product of dimensions is more than length, the elements get reused.
array(1:100, dim=c(4,5,6)) # 1:20 is reused again
# An existing vector can also be converted in an array
a = 1:12
dim(a) = c(2,2,3) # Now 'a' is converted to an array 2x2x3
# Here if the product of dimensions do not equal the length, throws error
dim(a) = c(2,2,2) # Error
dim(a) = c(2,2,4) # Error
# attributes - gets various attributes associated with an R object
attributes(a)
a = 1:12
dim(a) = c(2,2,3)
class(a) # Returns the class of the R object 'a' - Here "array"
dim(a) = c(2,6)
class(a) # Now the class is changed to "matrix"
dim(a) = c(2,2,3) # Let us keep this as array itself.
#------------------------------------------------------------------------------
# Data Frame - Similar to matrix - but can contain multiple datatypes
L3 = LETTERS[1:3]
# sample(L3, 10, replace=TRUE) generates a sample with size=10 from L3
char_sample = sample(L3, 10, replace=TRUE)
# Creates data frame with three columns x, y and fac
d = data.frame(x=seq(2,20,2), y=1:10, fac=char_sample)
# Data frame creation without column names. R assigns appropriate names
d = data.frame(seq(2,20,2), 1:10, char_sample)
# Data frame creation with row names
rowchars = paste("Row",1:10,sep="")
d = data.frame(x=seq(2,20,2), y=1:10, fac=char_sample, row.names=rowchars)
# The same functionality can also be obtained using
row.names(d) = rowchars # row names to existing data frame.
# All columns should be of same length in a data frame
# The following creation statement generates an error because sizes mismatch
d = data.frame(x=1:10, y=1:11, fac=char_sample, row.names=rowchars)
# attributes - gets various attributes associated with an R object
attributes(d)
names(d) # Contains the column names
row.names(d) # Contains the row names
names(d) = c("Col_1", "Col_2", "Col_3") # Column names are changed for df. d
# Accessing Elements
d[,1] # Gives all elements of 1st Column as a vector
d[2,] # Gives all elements of 2nd row as a vector
d$Col_1 # The column can also be accessed in this way
d[2,3] # Access the 2nd Row, 3rd element
#------------------------------------------------------------------------------
# List - The most comprehensive data structure in R.
# Contains any data type of any length.
l = list(c(1:10), c(3,5,7,11), c(1, 1, 2, 3, 5, 8), c("s", "a", "l", "e", "m"))
# Access using double square brackets
l[[1]] # To access the 1st element of the list - here c(1:10)
# d and a are the data frame and array as defined above.
advlist = list(l, d, a) # advlist constains a list, data frame and an array
advlist[[1]][[4]] # Gives the vector "s" "a" "l" "e" "m"
advlist[[2]] # is a data frame
advlist[[2]]$Col_3 # Access the Col_3 of the dataframe
advlist = list(list=l, df=d, array=a) # Gives names to each element
# Note: list, df, array are all keywords, but here act as mere names
# Now the elements can be accessed using the names.
advlist$list # Same as advlist[[1]]
advlist$df # Same as advlist[[2]]
names(advlist) # Gives "list", "df", "array"
# The names can also be assigned in this fashion
advlist = list(l, d, a) # advlist constains a list, data frame and an array
names(advlist) = c("list", "df", "array")
|
3fcf7cea467f4b5f47f0f2d98cdc5fdcba8b4299 | 120fe4aea288aa38b8d8596b6deb4bc35a3f5850 | /R/C3RuBPCaboxy.R | 563023b3312f512e4e68b0e9a85702de903cf306 | [] | no_license | zhouhaoran06/C3C4OptPhotosynthesis- | 9eb01609e16d8fdf3e35d32840e95be5baae385d | 74a788a75cd54a45f973858bf3d0cfc9d5ce7d1d | refs/heads/master | 2020-03-18T05:13:53.903546 | 2018-05-21T22:25:00 | 2018-05-21T22:25:00 | 134,331,577 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 3,265 | r | C3RuBPCaboxy.R | #' Calculate C3 RuBP carboxylation photosynthesis rate
#'
#' Calculate C3 RuBP carboxylation photosynthesis rate using Farqhuar photosynthesis model for C3 pathway (Farquhar GD, Von Caemmerer S,Berry JA,1980, Planta ) with optimality to stomatal resistance and energy allocation to leaf. The use of the function is as follows: C3RuBPCarboxy(Vcmax25,J25,Ca,Tleaf,Phis,VPD,StartV). The package "rootSolve" should be installed before using the function.
#' @param Vcmax25 Maximal velocity of Rubisco carboxylation at 25°C (μmol m-2 s-1).
#' @param J25 Electron transport at a specific light intensity at 25°C. The value should be adjusted at different light intensities (μmol m-2 s-1).
#' @param Ca CO2 concentration in the atmosphere (ppm).
#' @param Tleaf Leaf temperature (oC).
#' @param Phis Soil water potential (Mpa).
#' @param VPD Vapour pressure deficit (kPa).
#' @param StartV A vector that gives the start points for the estimation (c(A,rs,fl)).A is photosynthesis rate (μmol m-2 s-1),rs is stomatal resistance (μmol-1 m2 s1, the reciprocal of stomatal conductance) and fl is the energy allocation ratio of leaf to the total plant.
#'
#' @return This package will return a dataframe of the numerical solution of the model using multiroot in the package "rootSolve".
#' @return Model$root: A vector or solution of c(A,rs,fl).
#'
#' @export
C3RuBPCarboxy <- function(Vcmax25,J25,Ca,Tleaf,Phis,VPD,StartV){
#Temperature adjustment for Vcmax,K(Kc and Ko) and gammastar from 25°C to Tleaf
Vcmax0<-Vcmax25*exp(26.37-65.33/0.008314/(273+Tleaf))
k<-302*exp(32.06-79.43/0.008314/(273+Tleaf))*(1+210/(256*exp(14.68-36.38/0.008314/(273+Tleaf))))
gammastar<-36*exp(15.27-37.83/0.008314/(273+Tleaf))
rm<-(1+exp((1.4*(273+Tleaf)-437.4)/0.008314/(273+Tleaf)))/exp(18.81-49.6/0.008314/(273+Tleaf))
#Define other constants
Delta<-VPD*0.01*1.6 #change the unit
dv<-2
bv<-3.8
kc<-0.001044
rho<-18.3
#Define the model
Model<-function(x){
PhistoV <- -(-(x[3]*Delta/(1-x[3])/kc/x[2]/rho)+Phis)/dv
F1 <- x[1]^2*(-rm-x[2])-Ca*exp(-PhistoV^bv)*Vcmax0+x[1]*(Ca+exp(-PhistoV^bv)*rm*Vcmax0+exp(-PhistoV^bv)*x[2]*Vcmax0)+x[1]*k+exp(-PhistoV^bv)*Vcmax0*gammastar
F2 <- x[1]^2-x[1]*exp(-PhistoV^bv)*Vcmax0+bv*Ca*exp(-PhistoV^bv)*x[3]*Vcmax0*Delta*(PhistoV^(-1+bv))/dv/(1-x[3])/kc/(x[2]^2)/rho-x[1]*bv*exp(-PhistoV^bv)*x[3]*rm*Vcmax0*Delta*(PhistoV^(-1+bv))/dv/(1-x[3])/kc/(x[2]^2)/rho-x[1]*bv*exp(-PhistoV^bv)*x[3]*Vcmax0*Delta*(PhistoV^(-1+bv))/dv/(1-x[3])/kc/x[2]/rho-bv*exp(-PhistoV^bv)*x[3]*Vcmax0*gammastar*Delta*(PhistoV^(-1+bv))/dv/(1-x[3])/kc/(x[2]^2)/rho
F3 <- x[1]*(Ca+k-2*x[1]*rm-2*x[1]*x[2]+exp(-PhistoV^bv)*rm*Vcmax0+exp(-PhistoV^bv)*x[2]*Vcmax0)+x[3]*(1/dv*bv*Ca*exp(-PhistoV^bv)*Vcmax0*(-Delta/(1-x[3])/kc/x[2]/rho-x[3]*Delta/(1-x[3])^2/kc/x[2]/rho)*(PhistoV^(-1+bv))-1/dv*bv*x[1]*exp(-PhistoV^bv)*rm*Vcmax0*(-Delta/(1-x[3])/kc/x[2]/rho-x[3]*Delta/(1-x[3])^2/kc/x[2]/rho)*(PhistoV^(-1+bv))-1/dv*bv*x[1]*exp(-PhistoV^bv)*Vcmax0*x[2]*(-Delta/(1-x[3])/kc/x[2]/rho-x[3]*Delta/(1-x[3])^2/kc/x[2]/rho)*(PhistoV^(-1+bv))-1/dv*bv*exp(-PhistoV^bv)*Vcmax0*gammastar*(-Delta/(1-x[3])/kc/x[2]/rho-x[3]*Delta/(1-x[3])^2/kc/x[2]/rho)*(PhistoV^(-1+bv)))
c(F1=F1,F2=F2,F3=F3)
}
ss<-multiroot(f=Model,start=StartV)
return(ss)
}
|
1f9e2d64f094499fe5ff9e729eb3c72b48ef2aa4 | b67efc385e4d9757ef654e0b2d2f7393bb4e1f0d | /inst/examples/example-pattern.R | f5fda2d3a33b8fcdc6c4210cc949ae379707c48b | [] | no_license | cran/surveydata | e095b07fa8457d1ef2e9087e74e404eb5cc5a2a6 | f8e430ad3ae63e94798ed6c7a3b0b69d5a95c69a | refs/heads/master | 2023-03-16T04:05:17.109698 | 2023-03-12T17:00:02 | 2023-03-12T17:00:02 | 17,700,241 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 441 | r | example-pattern.R | # Extract the pattern from membersurvey
oldptn <- pattern(membersurvey)
oldptn
# The pattern is used to extract columns
names(membersurvey)
grep("Q20", names(membersurvey), value=TRUE)
head(membersurvey["Q20"])
head(membersurvey["Q20_other"])
# Define a new pattern
pattern(membersurvey) <- list(sep="_", exclude="")
head(membersurvey["Q20"])
# Reset original pattern
pattern(membersurvey) <- oldptn
rm(oldptn)
|
c1c40f0697669ae5f3be2c353cb04a735ffa7737 | a8fd5e3c0fff11859ba09fedabc8b07d25f96dc2 | /man/col2transp.Rd | 7a871c6fb27131e422b495d4339ecaac9911b1f8 | [] | no_license | lauratboyer/ltb.utils | c4444841936a173eb98c42b743b7d2e8a2c7f6e4 | 50fbeab0ffd77ca565c33dccd3b7c671151155b9 | refs/heads/master | 2021-04-30T16:37:51.321598 | 2017-05-13T02:16:01 | 2017-05-13T02:16:01 | 80,087,908 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 459 | rd | col2transp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/col2transp.r
\name{col2transp}
\alias{col2transp}
\title{col2transp}
\usage{
col2transp(col, tlev = 0.5)
}
\arguments{
\item{col}{colour vector to add transparency to}
\item{tlev}{transparency level desired, higher level means more transparent}
}
\description{
This function returns a transparent version of the input colour vector
}
\examples{
col2transp()
}
\keyword{graphics}
|
0669c981bc7895090b63763546723f1bff52b6af | 9bdef83f28b070321ba27709d2c7ec028474b5c3 | /R/nlp.R | acefdfeb9525b7f05af15cc748ef186c655741f6 | [] | no_license | antagomir/scripts | 8e39ce00521792aca1a8169bfda0fc744d78c285 | c0833f15c9ae35b1fd8b215e050d51475862846f | refs/heads/master | 2023-08-10T13:33:30.093782 | 2023-05-29T08:19:56 | 2023-05-29T08:19:56 | 7,307,443 | 10 | 15 | null | 2023-07-19T12:36:45 | 2012-12-24T13:17:03 | HTML | UTF-8 | R | false | false | 1,851 | r | nlp.R | # Natural Language Processing in R
# http://lincolnmullen.com/projects/dh-r/nlp.html
library(rJava)
library("NLP")
library("openNLP")
library("RWeka")
library("qdap")
library(magrittr)
if(!require("openNLPmodels.en")) {
install.packages("openNLPmodels.en",
repos = "http://datacube.wu.ac.at/",
type = "source")
}
# Example test
bio <- readLines("dh-methods-in-r/data/nlp/anb-jarena-lee.txt")
bio <- paste(bio, collapse = " ")
print(bio)
# Format required for NLP
bio <- as.String(bio)
# Annotate words and sentences
word_ann <- Maxent_Word_Token_Annotator()
sent_ann <- Maxent_Sent_Token_Annotator()
bio_annotations <- annotate(bio, list(sent_ann, word_ann))
bio_doc <- AnnotatedPlainTextDocument(bio, bio_annotations)
# Get sentences and words
sents(bio_doc) %>% head(2)
words(bio_doc) %>% head(10)
#OpenNLP can find dates, locations, money, organizations, percentages,
#people, and times. (Acceptable values are "date", "location", "money",
#"organization", "percentage", "person", "misc"
person_ann <- Maxent_Entity_Annotator(kind = "person")
location_ann <- Maxent_Entity_Annotator(kind = "location")
organization_ann <- Maxent_Entity_Annotator(kind = "organization")
pipeline <- list(sent_ann,
word_ann,
person_ann,
location_ann,
organization_ann)
bio_annotations <- annotate(bio, pipeline)
bio_doc <- AnnotatedPlainTextDocument(bio, bio_annotations)
# Extract entities from an AnnotatedPlainTextDocument
entities <- function(doc, kind) {
s <- doc$content
a <- annotations(doc)[[1]]
if(hasArg(kind)) {
k <- sapply(a$features, `[[`, "kind")
s[a[k == kind]]
} else {
s[a[a$type == "entity"]]
}
}
entities(bio_doc, kind = "person")
entities(bio_doc, kind = "location")
entities(bio_doc, kind = "organization")
|
e1557f57317bda0aa4b9c958f5d97d254e4f8dd1 | 67853b49895b7f4286e0932212d24c509fe08f9e | /man/select_degree.Rd | 7aed9e6d8c234c0fdc3fde2d8ee7810935db4e43 | [] | no_license | sere3s/takos | c2b44cd5319f1d8df60f06fda5a704905ae19cec | 002527891952b99044c577a9f0393f3057502e3b | refs/heads/master | 2021-07-24T12:16:44.269021 | 2021-07-15T15:32:59 | 2021-07-15T15:32:59 | 147,636,829 | 3 | 2 | null | 2021-05-21T09:54:31 | 2018-09-06T07:45:14 | R | UTF-8 | R | false | true | 512 | rd | select_degree.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/selectDegree.r
\name{select_degree}
\alias{select_degree}
\title{Title}
\usage{
select_degree(mat, degree = seq(0.01, 0.99, by = 0.01))
}
\arguments{
\item{mat}{matrix of the all the thermograms checked using the functiom mat.check}
\item{degree}{selected degrees of cristallinity for performing the analysis}
}
\value{
"DT" built with the values of mat according to the specified degrees
}
\description{
Title
}
|
d63dfbf6e0de121fa74c90737856c967b831d5ea | 05f59cd61540ce4c9961d91377cff9008b78c009 | /man/generate_train_test_split.Rd | 948794233dc26712f42fb2c7399bee75622c87b7 | [
"MIT"
] | permissive | rzgross/uRbanmatching | e3abddab1946e2f84c32378820585b96abe4840d | 356b3d1ac10d72e3c5a20de08903f137c2c22965 | refs/heads/master | 2023-04-12T21:20:24.938362 | 2021-05-09T19:04:41 | 2021-05-09T19:04:41 | 355,984,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 553 | rd | generate_train_test_split.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generate_train_test_split.R
\name{generate_train_test_split}
\alias{generate_train_test_split}
\title{generate_train_test_split}
\usage{
generate_train_test_split(match_list, train_fraction = 0.7)
}
\arguments{
\item{match_list}{\code{match_list} entry.}
\item{train_fraction}{Fraction (between 0 and 1) to use for training data (and the rest for test).}
}
\description{
Creates an \code{index_list} from a \code{match_list}, splitting according to \code{train_fraction}.
}
|
661f7fbf5760fc6494f5cb65c10075df4c3b8b17 | 666442d86a618b4f0b5160255c13019185d10264 | /tests/testthat/test-1-tomarkdown.R | 72ba61c52f799d5bd62d571c493f1527e0be4c88 | [
"MIT"
] | permissive | jimsforks/testdown | a776faaeb4d22728a1c6f91f21fccb726acddb01 | d111da2ef8a6d122e720b02d5b19290da372791a | refs/heads/master | 2023-03-17T07:21:18.095907 | 2021-03-04T18:20:16 | 2021-03-04T18:20:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,090 | r | test-1-tomarkdown.R | withr::with_dir(pkg, {
test_that("testdown works", {
# browser()
# oldir <- setwd(pkg)
res <- test_down(
project_name = "testthat testdown",
author = "Colin Fay",
pkg = pkg,
open = FALSE
)
#' @description Checking that the report is created
expect_path_exists(
dirname(res)
)
# setwd(oldir)
})
test_that("Files exist", {
# oldir <- setwd(pkg)
#' @description Checking that style.css is created
expect_file_exists("tests/testdown/style.css")
#' @description Checking that _bookdown.yml is created
expect_file_exists("tests/testdown/_bookdown.yml")
#' @description Checking that _output.yml is created
expect_file_exists("tests/testdown/_output.yml")
# setwd(oldir)
})
test_that("Configs are correct", {
#browser()
# oldir <- setwd(pkg)
font_m <- yaml::read_yaml(
"tests/testdown/_bookdown.yml"
)
#' @description The config filename should be the name of the package
expect_equal(
font_m$book_filename,
basename(pkg)
)
#' @description Merged files should be deleted
expect_true(
font_m$delete_merged_file
)
#' @description chapter_name should be "Test File "
expect_equal(
font_m$language$ui$chapter_name,
"Test File "
)
font_m <- yaml::read_yaml(
"tests/testdown/_output.yml"
)
#' @description toc_depth should stay 1
expect_equal(
font_m$`bookdown::gitbook`$toc_depth,
1
)
#' @description css should be style.css
expect_equal(
font_m$`bookdown::gitbook`$css,
"style.css"
)
#' @description toc$before should contain '{testdown} report'
expect_match(fixed = TRUE,
font_m$`bookdown::gitbook`$config$toc$before,
"{testdown} report"
)
#' @description toc$after should contain link to the {testdown}
expect_match(fixed = TRUE,
font_m$`bookdown::gitbook`$config$toc$after,
"https://github.com/ThinkR-open/testdown"
)
# setwd(oldir)
})
})
|
afdac8292a2e5ea1c49856f4e76f6cb3ab367d44 | 93550a01d89c03b1c7fd7d71c183c1c0abc64a25 | /CompareFiles.R | 0da77ba59cf58c9fdaa6872aad495eae0f940299 | [] | no_license | genepi-freiburg/ckdgen-annotation | c363685a5f00c0a2ef837caac40808af9ef320ff | 3ae43e8fe86b4a3d9ff94d198d133898402f31a7 | refs/heads/master | 2021-05-23T05:42:40.066723 | 2017-12-08T09:29:54 | 2017-12-08T09:29:54 | 94,869,971 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,128 | r | CompareFiles.R | args = commandArgs(trailingOnly=TRUE)
if (length(args) != 6) {
print("Usage: inFile1 chrColName posColName inFile2 chrColName posColName")
stop()
}
d1 = read.table(args[1], h=T, sep="\t")
d2 = read.table(args[4], h=T, sep="\t")
d1x = data.frame(CHR=as.factor(d1[,args[2]]),
POS=as.numeric(d1[,args[3]]))
d2x = data.frame(CHR=as.factor(d2[,args[5]]),
POS=as.numeric(d2[,args[6]]))
print("dimensions file 1")
dim(d1x)
#summary(d1x)
print("dimensions file 2")
dim(d2x)
#summary(d2x)
compare_sets = function (a, b) {
c = data.frame()
for (i in 1:nrow(a)) {
chr = a[i, "CHR"]
pos = a[i, "POS"]
b1 = subset(b, b$CHR == chr)
b1$DELTA = abs(b1$POS - pos)
b1$POS_ORIG = pos
b2 = b1[order(b1$DELTA),]
c = rbind(c, b2[1,])
}
c
}
print("compare 1 to 2")
d12 = compare_sets(d1x, d2x)
print(d12)
print("delta < 100000")
print(length(which(d12$DELTA < 100000)))
print("delta == 0")
print(length(which(d12$DELTA == 0)))
print("compare 2 to 1")
d21 = compare_sets(d2x, d1x)
print(d21)
print("delta < 100000")
print(length(which(d21$DELTA < 100000)))
print("deltta == 0")
print(length(which(d21$DELTA == 0)))
|
9082b12a062d0a5f165f3c1a7b9204fe07ab138a | 4b80c97c193bec524a24c1df7aba3af037660db8 | /R/plot_bland_altman.R | 9587a3d21546ce37802a28c1a0febe720b408f6c | [] | no_license | cran/lvmisc | 39ce176be2c08be16b5d09cbf8feba08c5a6747a | 738fa1a1f295a508764102c7bdda46c9945b2a92 | refs/heads/master | 2023-04-01T01:52:11.303753 | 2021-04-05T14:20:02 | 2021-04-05T14:20:02 | 341,248,797 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,183 | r | plot_bland_altman.R | #' Create a Bland-Altman plot
#'
#' Create a Bland-Altman plot as described by Bland & Altman (1986).
#'
#' @param x An object of class \code{lvmisc_cv} or an object containing a model.
#' @param ... Additional arguments to be passed to \code{ggplot2::aes()}.
#'
#' @return A \code{ggplot} object.
#'
#' @references \itemize{
#' \item Bland, J.M. & Altman, D.G. (1986). Statistical methods for assessing
#' agreement between two methods of clinical measurement.
#' Lancet, 8(1), 307-10. \doi{https://doi.org/10.1016/S0140-6736(86)90837-8}
#' }
#'
#' @export
#'
#' @examples
#' mtcars <- tibble::as_tibble(mtcars, rownames = "car")
#' m <- stats::lm(disp ~ mpg, mtcars)
#' cv <- loo_cv(m, mtcars, car)
#' plot_bland_altman(cv, colour = as.factor(am))
plot_bland_altman <- function(x, ...) {
data <- model_data(x)
plot_data <- data$model_data
bias <- data$bias
lower_loa <- data$loa$lower
upper_loa <- data$loa$upper
ggplot2::ggplot(plot_data) +
ggplot2::geom_point(ggplot2::aes(x = mean, y = diff, ...)) +
ggplot2::geom_hline(yintercept = bias) +
ggplot2::geom_hline(yintercept = lower_loa, linetype = "longdash") +
ggplot2::geom_hline(yintercept = upper_loa, linetype = "longdash")
}
model_data <- function(x) {
UseMethod("model_data")
}
model_data.default <- function(x) {
msg <- glue::glue(
"If you would like it to be implemented, please file an issue at \\
https://github.com/verasls/lvmisc/issues."
)
abort_no_method_for_class("model_data", class(x), msg)
}
model_data.lvmisc_cv <- function(x) {
check_args_model_data(x)
mean <- (x[[".actual"]] + x[[".predicted"]]) / 2
diff <- x[[".actual"]] - x[[".predicted"]]
bias <- bias(x[[".actual"]], x[[".predicted"]], na.rm = TRUE)
loa <- loa(x[[".actual"]], x[[".predicted"]], na.rm = TRUE)
list(
model_data = cbind(x, mean, diff),
bias = bias, loa = loa
)
}
model_data.lm <- function(x) {
check_args_model_data(x)
formula <- stats::formula(x)
outcome <- as.character(rlang::f_lhs(formula))
actual <- x$model[[outcome]]
predicted <- stats::predict(x)
mean <- (actual + predicted) / 2
diff <- actual - predicted
bias <- bias(actual, predicted, na.rm = TRUE)
loa <- loa(actual, predicted, na.rm = TRUE)
list(
model_data = tibble::tibble(mean, diff),
bias = bias, loa = loa
)
}
model_data.lmerMod <- function(x) {
check_args_model_data(x)
formula <- stats::formula(x)
outcome <- as.character(rlang::f_lhs(formula))
actual <- stats::model.frame(x)[[outcome]]
predicted <- stats::predict(x)
mean <- (actual + predicted) / 2
diff <- actual - predicted
bias <- bias(actual, predicted, na.rm = TRUE)
loa <- loa(actual, predicted, na.rm = TRUE)
list(
model_data = tibble::tibble(mean, diff),
bias = bias, loa = loa
)
}
check_args_model_data <- function(x) {
if ("lvmisc_cv" %!in% class(x) & length(class(x)) > 1) {
classes <- class(x)[class(x) %!in% c("lm", "lmerMod")]
msg <- glue::glue(
"If you would like it to be implemented, please file an issue at \\
https://github.com/verasls/lvmisc/issues."
)
abort_no_method_for_class("model_data", classes, msg)
}
}
|
e92e4422d3afd15dccdfe6ecea2a548cfc7d30bd | 1c75a24c37c82eb57a1e94d483ab241289b9c352 | /Shmueli resources/RCode_by_chapter/Ch9_0.R | 11f38ff79c3433ad16884bad361f5fa80b1ac8f5 | [] | no_license | IBSBigData/BUS212_Spring2018_Pub | e7e6bf250d56f2ae35ead1de792472deaa22aacf | 09b30d86b79b5e05c6188957b2877a22aebf9b82 | refs/heads/master | 2021-05-13T14:31:28.905257 | 2018-08-29T19:48:09 | 2018-08-29T19:48:09 | 116,742,943 | 2 | 31 | null | null | null | null | UTF-8 | R | false | false | 4,017 | r | Ch9_0.R | #### Figure 9.7
library(rpart)
library(rpart.plot)
mower.df <- read.csv("RidingMowers.csv")
# use rpart() to run a classification tree.
# define rpart.control() in rpart() to determine the depth of the tree.
class.tree <- rpart(Ownership ~ ., data = mower.df,
control = rpart.control(maxdepth = 2), method = "class")
## plot tree
# use prp() to plot the tree. You can control plotting parameters such as color, shape,
# and information displayed (which and where).
prp(class.tree, type = 1, extra = 1, split.font = 1, varlen = -10)
#### Figure 9.9
library(rpart)
library(rpart.plot)
bank.df <- read.csv("UniversalBank.csv")
bank.df <- bank.df[ , -c(1, 5)] # Drop ID and zip code columns.
# partition
set.seed(1)
train.index <- sample(c(1:dim(bank.df)[1]), dim(bank.df)[1]*0.6)
train.df <- bank.df[train.index, ]
valid.df <- bank.df[-train.index, ]
# classification tree
default.ct <- rpart(Personal.Loan ~ ., data = train.df, method = "class")
# plot tree
prp(default.ct, type = 1, extra = 1, under = TRUE, split.font = 1, varlen = -10)
#### Figure 9.10
deeper.ct <- rpart(Personal.Loan ~ ., data = train.df, method = "class", cp = 0, minsplit = 1)
# count number of leaves
length(deeper.ct$frame$var[deeper.ct$frame$var == "<leaf>"])
# plot tree
prp(deeper.ct, type = 1, extra = 1, under = TRUE, split.font = 1, varlen = -10,
box.col=ifelse(deeper.ct$frame$var == "<leaf>", 'gray', 'white'))
#### Table 9.3
# classify records in the validation data.
# set argument type = "class" in predict() to generate predicted class membership.
default.ct.point.pred.train <- predict(default.ct,train.df,type = "class")
# generate confusion matrix for training data
confusionMatrix(default.ct.point.pred.train, train.df$Personal.Loan)
### repeat the code for the validation set, and the deeper tree
#### Table 9.4
# argument xval refers to the number of folds to use in rpart's built-in
# cross-validation procedure
# argument cp sets the smallest value for the complexity parameter.
cv.ct <- rpart(Personal.Loan ~ ., data = train.df, method = "class",
cp = 0.00001, minsplit = 5, xval = 5)
# use printcp() to print the table.
printcp(cv.ct)
#### Figure 9.12
# prune by lower cp
pruned.ct <- prune(cv.ct,
cp = cv.ct$cptable[which.min(cv.ct$cptable[,"xerror"]),"CP"])
length(pruned.ct$frame$var[pruned.ct$frame$var == "<leaf>"])
prp(pruned.ct, type = 1, extra = 1, split.font = 1, varlen = -10)
#### Figure 9.13
set.seed(1)
cv.ct <- rpart(Personal.Loan ~ ., data = train.df, method = "class", cp = 0.00001, minsplit = 1, xval = 5) # minsplit is the minimum number of observations in a node for a split to be attempted. xval is number K of folds in a K-fold cross-validation.
printcp(cv.ct) # Print out the cp table of cross-validation errors. The R-squared for a regression tree is 1 minus rel error. xerror (or relative cross-validation error where "x" stands for "cross") is a scaled version of overall average of the 5 out-of-sample errors across the 5 folds.
pruned.ct <- prune(cv.ct, cp = 0.0154639)
prp(pruned.ct, type = 1, extra = 1, under = TRUE, split.font = 1, varlen = -10,
box.col=ifelse(pruned.ct$frame$var == "<leaf>", 'gray', 'white'))
####Figure 9.15
library(randomForest)
## random forest
rf <- randomForest(as.factor(Personal.Loan) ~ ., data = train.df, ntree = 500,
mtry = 4, nodesize = 5, importance = TRUE)
## variable importance plot
varImpPlot(rf, type = 1)
## confusion matrix
rf.pred <- predict(rf, valid.df)
confusionMatrix(rf.pred, valid.df$Personal.Loan)
#### Table 9.5
library(adabag)
library(rpart)
library(caret)
train.df$Personal.Loan <- as.factor(train.df$Personal.Loan)
set.seed(1)
boost <- boosting(Personal.Loan ~ ., data = train.df)
pred <- predict(boost, valid.df)
confusionMatrix(pred$class, valid.df$Personal.Loan)
|
676de2baca89995279a74bb65e89148f13a39b96 | 4b6f1e3b54c0473e159b411ff6766201ac6d3b08 | /genscen_titecrmmcLIK.R | e930088e00bf190ba5d88e84ec2c3cc65c87cff0 | [] | no_license | moreno-ursino/TITECRMMC | 2a6bb0baf893804ecea48c696882bd1af9cc27e0 | 736fabc0b1c5b063a1504485f138b71304483823 | refs/heads/master | 2021-07-02T06:46:51.803467 | 2017-09-20T15:52:59 | 2017-09-20T15:52:59 | 104,068,197 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 41,965 | r | genscen_titecrmmcLIK.R | #################################################################################################
##
## First part: generating scenario's clinical trials
## Second part: run clinical trial using TITECRMMC-likelihood (probit and empirical)
##
################################################################################################
## accrual -> exponential distribution
## visits -> at fixed time
library(markovchain)
Nsim = 2000 # number of simulations
J = 6 # number of cycles
n = 40 # number of patients
obswin = 6 # days
rate = 6 # rate for accrual
K=5 # number of doses
PI <-matrix( c( 0.892, 0.078, 0.03, 0.86, 0.09, 0.05, 0.83, 0.10, 0.07, 0.79, 0.12, 0.09, 0.74, 0.14, 0.12,
0, 0.872, 0.128, 0, 0.87, 0.13, 0, 0.84, 0.16, 0, 0.82, 0.18, 0, 0.80, 0.20,
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1),
byrow=T, nrow=3 )
accrual = "fixed"
#accrual = "poisson"
SED = 32
# generating dataset
for (tr in 1:Nsim){
data_complete = NULL
set.seed(SED)
if (accrual=="fixed") {
T_entrance = c(0,rep(obswin/rate,n-1))
} else T_entrance = c(0,rexp(n-1,rate/obswin)) # time of accrual for each patients
T_entrance = cumsum(T_entrance)#round(cumsum(T_entrance))
for (m in 1:n){ # for each patient
times <- cumsum(c(T_entrance[m]+1,rep(1,J-1)))
datap <- cbind(rep(m,J),1:J,times) # number of patients and cycle number
for (d in 1:K){
patient <- new("markovchain", states = as.character(1:3),
transitionMatrix = PI[,(3*d-2):(3*d)],
name = "Patmarkov")
grades=rmarkovchain(n = J, object = patient, t0 = "1")
datap= cbind(datap,as.numeric(grades))
}
data_complete= rbind(data_complete,datap)
}
dimnames(data_complete)[[2]] <- c("patient","cycle","time",paste('dose_',1:K,sep=''))
SED = SED + tr
eval(parse(text = paste("data",tr, " <- list(data_complete=data_complete, T_entrance=T_entrance)", sep="")))
}
save.image()
SED = 400
# generating dataset
accrual="poisson"
for (tr in 1:Nsim){
set.seed(SED)
if (accrual=="fixed") {
T_entrance = c(0,rep(obswin/rate,n-1))
} else T_entrance = c(0,rexp(n-1,rate/obswin)) # time of accrual for each patients
T_entrance = cumsum(T_entrance)#round(cumsum(T_entrance))
SED = SED + tr
eval(parse(text = paste("data",tr, "$T_entrance2 <- T_entrance", sep="")))
}
save.image()
#######################################################################################
# likelihood function PROBIT
skeleton<-function(delta, nu, K , target, a=1)
{
d<- matrix(NA, nrow=K, ncol=2)
s<- matrix(NA, nrow=K, ncol=2)
d[nu,1]<-nu
d[nu,2]<--(a+qnorm(1-target))
for (l in (nu+1):K){
d[l,1]<-l
d[l,2]<-((qnorm(1-target-delta)+a)*d[l-1,2])/(qnorm(1-target+delta)+a)
}
for (l in 0:(nu-2)){
d[nu-(l+1),1]<-(nu-(l+1))
d[nu-(l+1),2]<-((qnorm(1-target+delta)+a)*d[nu-l,2])/(qnorm(1-target-delta)+a)
}
s[,1]<-d[,1]
s[,2]<-round(1-pnorm(-(a+d[,2])), 3)
d<-round(d,5)
#cat ("k", "\t", "Doses", "\t", "Skeleton", "\n")
#for (i in 1:K) {
#cat(s[i,1], "\t" , d[i,2], "\t", s[i,2], "\n")
#}
return(list(d=d, s=s))
}
skel1 <- skeleton(0.04,2,5,0.25)$s[,2]
doselabel <- skeleton(0.04,2,5,0.25)$d[,2]
ltite3bis<-function(b,x1p,z1p,w1p,w2p,alpha0){
v<-0
for (i in (1:length(x1p))) {
v<- v + ((z1p[i]==1)*log( max(1-w1p[i]*pnorm(alpha0+b[1]*x1p[i]), 2^(-1074) ))
+ (z1p[i]==2)*log( max(w1p[i]*pnorm(alpha0+b[1]*x1p[i]) - w2p[i]*pnorm(alpha0+b[1]*x1p[i]-b[2]), 2^(-1074) ))
+ (z1p[i]==3)*log( max(w2p[i]*pnorm(alpha0+b[1]*x1p[i]-b[2]), 2^(-1074) )))
}
return(-v)
}
titecrmmc <- function(x,doselabel,y,follow,alpha0,Tmax,target1,target2){
# x dose level
# doselabel numbers used in regression
# y grade
# weight
# alpha0 intercept
x1p <- doselabel[x]
w1p <- w2p <- follow/Tmax
w1p[y==2] <- rep(1, sum(y==2) )
w2p[y==3] <- rep(1, sum(y==3) )
#est <- optim(c(1,1), ltite3, x1p=x1p, z1p=y, w1p=w1p, w2p=w2p, alpha0=alpha0)$par
est <- optim(c(1,1), ltite3bis, method = "L-BFGS-B", lower = rep(0.1, 2),
upper = rep(Inf,2), x1p=x1p, z1p=y, w1p=w1p, w2p=w2p, alpha0=alpha0)$par
p1tox<-pnorm(alpha0+est[1]*doselabel)
p2tox<-pnorm(alpha0+est[1]*doselabel-est[2])
cur1<-which(abs(p1tox-target1)==min(abs(p1tox-target1)))
cur2<-which(abs(p2tox-target2)==min(abs(p2tox-target2)))
cur<-min(cur1, cur2)
list(newdose=cur, p1tox=p1tox, p2tox=p2tox)
}
######################################################################################################
# run simulation for probit T fixed
library(dfcrm)
# J cycles
# K doses
# n sample size
#invlogit <- function(x) exp(x)/(1-exp(x))
cohort = 3
cohort2 = 1
prior = skel1 #c(0.05,0.1,0.2,0.3)
alpha0=1
target1 = 0.5
target2 = 0.25
TR = Nsim
MTD_titecrmmc <- NULL
doses_titecrmmc <- NULL
tox_titecrmmc <- NULL
xtox_titecrmmc <- NULL
ntite <- NULL
for (tr in 1:TR){
# x doses assigned
# y toxicities
eval(parse(text = paste("tox <- data",tr,"$data_complete", sep="")))
eval(parse(text = paste("T_entrance <- data",tr,"$T_entrance", sep="")))
tox <- data.frame(tox)
T_entrance <- c(T_entrance,Inf)
MTD=-1
ntitetr=0
x <- rep(1,cohort) # first dose to the first cohort
M = n/cohort # number of total cohorts
# data_reg = data_complete[1:(cohort*J),1:4]
# y <- tox[cbind(1:length(x),x)]
time = T_entrance[cohort+1] # time of entrance of the first patient of second cohort
n_fin = 0 # number of patient with complete follow up
data_reg_c <- NULL # building the dataset for TITE: complete, with patients who ended the followup
data_reg_i <- NULL # during follow up
for (l in 1:cohort){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
#dimnames(data_reg_c)[[2]] <- c("patient", "follow", "grade")
#dimnames(data_reg_i)[[2]] <- c("patient", "follow", "grade")
data_reg <- rbind(data_reg_c,data_reg_i)
# check if we can apply TITECRM or not
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage1 <- stage2 <- FALSE
} else {
if ( length(unique(data_reg[,3]))==1 ) {
stage1 <- TRUE
stage2 <- FALSE
} else {
stage1 <- FALSE
stage2 <- TRUE
}
}
pat=cohort
if (stage1) {
for(i in 2:M) {
if ( sum(data_reg[,3]==3)==length(x) ) {
MTD=0
break
} else {
if ( sum(data_reg[,3]==2)==length(x) ) {
x <- c(x,rep(x[length(x)], cohort))
} else {
x <- c(x,rep(min(max(x)+1,K), cohort))
}
}
# dose for the cohort
time = T_entrance[i*cohort+1] # time of entrance of the first of the next cohort
pat=pat+cohort
data_reg_i <- NULL # reset the temporary dataset
for (l in (n_fin+1):(i*cohort)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
break
} else {
if ( (any(data_reg[,3]==2) & any(data_reg[,3]==3)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==2)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==3)) ) {
stage2 <- TRUE
break
}
}
}
}
if (MTD==0) {
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,rep(0,40))
tox_titecrmmc <- rbind(tox_titecrmmc,rep(1,40))
ntite <- c(ntite,0)
} else {
for (i in seq(pat+1,n,1)){
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
newdose <- min(results$mtd, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
}
} else {
##### here stage 3
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
newdose <- min(results$newdose, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
}
}
##### analysis of complete data
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
MTD <- min(results$mtd, max(x)+1, K)
ntitetr=0
} else {
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
MTD <- min(results$newdose, max(x)+1, K)
}
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,x)
tox_titecrmmc <- rbind(tox_titecrmmc,data_reg[,3])
ntite <- c(ntite,ntitetr)
}
}
library(xtable)
resultsTfixprobscen1 <- list(MTD_titecrmmc = MTD_titecrmmc, doses_titecrmmc = doses_titecrmmc, tox_titecrmmc = tox_titecrmmc,
ntite=ntite)
write.table(MTD_titecrmmc, file="scenario1fixprob.txt")
write.table(ntite, file="scenario1fixprobntite.txt")
save.image()
###############################################################################################################
# for poisson accrual
library(dfcrm)
cohort = 3
cohort2 = 1
prior = skel1
alpha0=1
target1 = 0.5
target2 = 0.25
TR = Nsim
MTD_titecrmmc <- NULL
doses_titecrmmc <- NULL
tox_titecrmmc <- NULL
xtox_titecrmmc <- NULL
ntite <- NULL
for (tr in 1:TR){
# x doses assigned
# y toxicities
eval(parse(text = paste("tox <- data",tr,"$data_complete", sep="")))
# eval(parse(text = paste("T_entrance <- data",tr,"$T_entrance", sep="")))
################################## for pois
eval(parse(text = paste("T_entrance <- data",tr,"$T_entrance2", sep="")))
tpois <- NULL
for (poiss in 1:length(T_entrance) ){
times <- cumsum(c(T_entrance[poiss]+1,rep(1,J-1)))
tpois <- c(tpois,times)
}
tox <- data.frame(tox)
tox$time <- tpois
#########################################################################
tox <- data.frame(tox)
T_entrance <- c(T_entrance,Inf)
MTD=-1
ntitetr=0
x <- rep(1,cohort) # first dose to the first cohort
M = n/cohort # number of total cohorts
# data_reg = data_complete[1:(cohort*J),1:4]
# y <- tox[cbind(1:length(x),x)]
time = T_entrance[cohort+1] # time of entrance of the first patient of second cohort
n_fin = 0 # number of patient with complete follow up
data_reg_c <- NULL # building the dataset for TITE: complete, with patients who ended the followup
data_reg_i <- NULL # during follow up
for (l in 1:cohort){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
#dimnames(data_reg_c)[[2]] <- c("patient", "follow", "grade")
#dimnames(data_reg_i)[[2]] <- c("patient", "follow", "grade")
data_reg <- rbind(data_reg_c,data_reg_i)
# check if we can apply TITECRM or not
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage1 <- stage2 <- FALSE
} else {
if ( length(unique(data_reg[,3]))==1 ) {
stage1 <- TRUE
stage2 <- FALSE
} else {
stage1 <- FALSE
stage2 <- TRUE
}
}
pat=cohort
if (stage1) {
for(i in 2:M) {
if ( sum(data_reg[,3]==3)==length(x) ) {
MTD=0
break
} else {
if ( sum(data_reg[,3]==2)==length(x) ) {
x <- c(x,rep(x[length(x)], cohort))
} else {
x <- c(x,rep(min(max(x)+1,K), cohort))
}
}
# dose for the cohort
time = T_entrance[i*cohort+1] # time of entrance of the first of the next cohort
pat=pat+cohort
data_reg_i <- NULL # reset the temporary dataset
for (l in (n_fin+1):(i*cohort)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
break
} else {
if ( (any(data_reg[,3]==2) & any(data_reg[,3]==3)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==2)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==3)) ) {
stage2 <- TRUE
break
}
}
}
}
if (MTD==0) {
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,rep(0,40))
tox_titecrmmc <- rbind(tox_titecrmmc,rep(1,40))
ntite <- c(ntite,0)
} else {
for (i in seq(pat+1,n,1)){
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
newdose <- min(results$mtd, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
}
} else {
##### here stage 3
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
newdose <- min(results$newdose, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
}
}
##### analysis of complete data
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
MTD <- min(results$mtd, max(x)+1, K)
ntitetr=0
} else {
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
MTD <- min(results$newdose, max(x)+1, K)
}
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,x)
tox_titecrmmc <- rbind(tox_titecrmmc,data_reg[,3])
ntite <- c(ntite,ntitetr)
}
}
library(xtable)
resultsTpoisprobscen1 <- list(MTD_titecrmmc = MTD_titecrmmc, doses_titecrmmc = doses_titecrmmc, tox_titecrmmc = tox_titecrmmc,
ntite=ntite)
write.table(MTD_titecrmmc, file="scenario1poisprob.txt")
write.table(ntite, file="scenario1poisprobntite.txt")
save.image()
#######################################################################################################
#######################################################################################################
#######################################################################################################
# Likelihood for empiric model
#the skeleton assumes that F(d,beta)=d^(beta))
skeleton_empiric<-function(delta, nu, K , target, a=1)
{
d<- matrix(NA, nrow=K, ncol=2)
s<- matrix(NA, nrow=K, ncol=2)
d[nu,1]<-nu
d[nu,2]<-target #-(a+qnorm(1-target))
for (l in (nu+1):K){
d[l,1]<-l
d[l,2]<-exp((log(target+delta)*log(d[l-1,2]))/log(target-delta))
}
for (l in 0:(nu-2)){
d[nu-(l+1),1]<-(nu-(l+1))
d[nu-(l+1),2]<-exp((log(target-delta)*log(d[nu-l,2]))/log(target+delta))
}
s[,1]<-d[,1]
s[,2]<-d[,2]#round(1-pnorm(-(a+d[,2])), 3)
d<-round(d,5)
#cat ("k", "\t", "Doses", "\t", "Skeleton", "\n")
#for (i in 1:K) {
#cat(s[i,1], "\t" , d[i,2], "\t", s[i,2], "\n")
#}
return(list(d=d, s=s))
}
skel1 <- doselabel <- skeleton_empiric(0.06,2,5,0.25)$d[,2]
################################) Emipiric working model
ltite3empiric<-function(b,x1p,z1p,w1p,w2p){
v<-0
for (i in (1:length(x1p))) {
v<- v + ((z1p[i]==1)*log( max(1-w1p[i]*x1p[i]^b[1], 2^(-1074) ))
+ (z1p[i]==2)*log( max(w1p[i]*x1p[i]^b[1] - w2p[i]*x1p[i]^(b[1]+b[2]), 2^(-1074) ))
+ (z1p[i]==3)*log( max(w2p[i]*x1p[i]^(b[1]+b[2]), 2^(-1074) )))
}
return(-v)
}
titecrmmc <- function(x,doselabel,y,follow,alpha0,Tmax,target1,target2){
# x dose level
# doselabel numbers used in regression
# y grade
# weight
# alpha0 intercept
x1p <- doselabel[x]
w1p <- w2p <- follow/Tmax
w1p[y==2] <- rep(1, sum(y==2) )
w2p[y==3] <- rep(1, sum(y==3) )
#est <- optim(c(1,1), ltite3, x1p=x1p, z1p=y, w1p=w1p, w2p=w2p, alpha0=alpha0)$par
est <- optim(c(1,1), ltite3empiric, method = "L-BFGS-B", lower = rep(0.1, 2),
upper = rep(Inf,2), x1p=x1p, z1p=y, w1p=w1p, w2p=w2p)$par
p1tox<-doselabel^(est[1])
p2tox<-doselabel^(est[1] + est[2])
cur1<-which(abs(p1tox-target1)==min(abs(p1tox-target1)))
cur2<-which(abs(p2tox-target2)==min(abs(p2tox-target2)))
cur<-min(cur1, cur2)
list(newdose=cur, p1tox=p1tox, p2tox=p2tox)
}
####################################################################################################
### starting simulations T fixed
library(dfcrm)
# J cycles
# K doses
# n sample size
#invlogit <- function(x) exp(x)/(1-exp(x))
cohort = 3
cohort2 = 1
prior = skel1 #c(0.05,0.1,0.2,0.3)
alpha0=1
target1 = 0.5
target2 = 0.25
TR = Nsim
MTD_titecrmmc <- NULL
doses_titecrmmc <- NULL
tox_titecrmmc <- NULL
xtox_titecrmmc <- NULL
ntite <- NULL
for (tr in 1:TR){
# x doses assigned
# y toxicities
eval(parse(text = paste("tox <- data",tr,"$data_complete", sep="")))
eval(parse(text = paste("T_entrance <- data",tr,"$T_entrance", sep="")))
tox <- data.frame(tox)
T_entrance <- c(T_entrance,Inf)
MTD=-1
ntitetr=0
x <- rep(1,cohort) # first dose to the first cohort
M = n/cohort # number of total cohorts
# data_reg = data_complete[1:(cohort*J),1:4]
# y <- tox[cbind(1:length(x),x)]
time = T_entrance[cohort+1] # time of entrance of the first patient of second cohort
n_fin = 0 # number of patient with complete follow up
data_reg_c <- NULL # building the dataset for TITE: complete, with patients who ended the followup
data_reg_i <- NULL # during follow up
for (l in 1:cohort){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
#dimnames(data_reg_c)[[2]] <- c("patient", "follow", "grade")
#dimnames(data_reg_i)[[2]] <- c("patient", "follow", "grade")
data_reg <- rbind(data_reg_c,data_reg_i)
# check if we can apply TITECRM or not
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage1 <- stage2 <- FALSE
} else {
if ( length(unique(data_reg[,3]))==1 ) {
stage1 <- TRUE
stage2 <- FALSE
} else {
stage1 <- FALSE
stage2 <- TRUE
}
}
pat=cohort
if (stage1) {
for(i in 2:M) {
if ( sum(data_reg[,3]==3)==length(x) ) {
MTD=0
break
} else {
if ( sum(data_reg[,3]==2)==length(x) ) {
x <- c(x,rep(x[length(x)], cohort))
} else {
x <- c(x,rep(min(max(x)+1,K), cohort))
}
}
# dose for the cohort
time = T_entrance[i*cohort+1] # time of entrance of the first of the next cohort
pat=pat+cohort
data_reg_i <- NULL # reset the temporary dataset
for (l in (n_fin+1):(i*cohort)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
break
} else {
if ( (any(data_reg[,3]==2) & any(data_reg[,3]==3)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==2)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==3)) ) {
stage2 <- TRUE
break
}
}
}
}
if (MTD==0) {
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,rep(0,40))
tox_titecrmmc <- rbind(tox_titecrmmc,rep(1,40))
ntite <- c(ntite,0)
} else {
for (i in seq(pat+1,n,1)){
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
newdose <- min(results$mtd, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
}
} else {
##### here stage 3
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
newdose <- min(results$newdose, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
}
}
##### analysis of complete data
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
MTD <- min(results$mtd, max(x)+1, K)
ntitetr=0
} else {
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
MTD <- min(results$newdose, max(x)+1, K)
}
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,x)
tox_titecrmmc <- rbind(tox_titecrmmc,data_reg[,3])
ntite <- c(ntite,ntitetr)
}
}
library(xtable)
resultsTfixempscen1 <- list(MTD_titecrmmc = MTD_titecrmmc, doses_titecrmmc = doses_titecrmmc, tox_titecrmmc = tox_titecrmmc,
ntite=ntite)
write.table(MTD_titecrmmc, file="scenario1fixemp.txt")
write.table(ntite, file="scenario1fixempntite.txt")
save.image()
######################## pois accrual #####################################################################
library(dfcrm)
# J cycles
# K doses
# n sample size
#invlogit <- function(x) exp(x)/(1-exp(x))
cohort = 3
cohort2 = 1
prior = skel1 #c(0.05,0.1,0.2,0.3)
alpha0=1
target1 = 0.5
target2 = 0.25
TR = Nsim
MTD_titecrmmc <- NULL
doses_titecrmmc <- NULL
tox_titecrmmc <- NULL
xtox_titecrmmc <- NULL
ntite <- NULL
for (tr in 1:TR){
# x doses assigned
# y toxicities
eval(parse(text = paste("tox <- data",tr,"$data_complete", sep="")))
# eval(parse(text = paste("T_entrance <- data",tr,"$T_entrance", sep="")))
################################## for pois
eval(parse(text = paste("T_entrance <- data",tr,"$T_entrance2", sep="")))
tpois <- NULL
for (poiss in 1:length(T_entrance) ){
times <- cumsum(c(T_entrance[poiss]+1,rep(1,J-1)))
tpois <- c(tpois,times)
}
tox <- data.frame(tox)
tox$time <- tpois
#########################################################################
tox <- data.frame(tox)
T_entrance <- c(T_entrance,Inf)
MTD=-1
ntitetr=0
x <- rep(1,cohort) # first dose to the first cohort
M = n/cohort # number of total cohorts
# data_reg = data_complete[1:(cohort*J),1:4]
# y <- tox[cbind(1:length(x),x)]
time = T_entrance[cohort+1] # time of entrance of the first patient of second cohort
n_fin = 0 # number of patient with complete follow up
data_reg_c <- NULL # building the dataset for TITE: complete, with patients who ended the followup
data_reg_i <- NULL # during follow up
for (l in 1:cohort){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
#dimnames(data_reg_c)[[2]] <- c("patient", "follow", "grade")
#dimnames(data_reg_i)[[2]] <- c("patient", "follow", "grade")
data_reg <- rbind(data_reg_c,data_reg_i)
# check if we can apply TITECRM or not
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage1 <- stage2 <- FALSE
} else {
if ( length(unique(data_reg[,3]))==1 ) {
stage1 <- TRUE
stage2 <- FALSE
} else {
stage1 <- FALSE
stage2 <- TRUE
}
}
pat=cohort
if (stage1) {
for(i in 2:M) {
if ( sum(data_reg[,3]==3)==length(x) ) {
MTD=0
break
} else {
if ( sum(data_reg[,3]==2)==length(x) ) {
x <- c(x,rep(x[length(x)], cohort))
} else {
x <- c(x,rep(min(max(x)+1,K), cohort))
}
}
# dose for the cohort
time = T_entrance[i*cohort+1] # time of entrance of the first of the next cohort
pat=pat+cohort
data_reg_i <- NULL # reset the temporary dataset
for (l in (n_fin+1):(i*cohort)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
break
} else {
if ( (any(data_reg[,3]==2) & any(data_reg[,3]==3)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==2)) |
(any(data_reg[,3]==1) & any(data_reg[,3]==3)) ) {
stage2 <- TRUE
break
}
}
}
}
if (MTD==0) {
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,rep(0,40))
tox_titecrmmc <- rbind(tox_titecrmmc,rep(1,40))
ntite <- c(ntite,0)
} else {
for (i in seq(pat+1,n,1)){
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
newdose <- min(results$mtd, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
if (any( data_reg[,3]==1) & any(data_reg[which(data_reg[,3]==1),2]==J) &
any(data_reg[,3]==2) & any(data_reg[which(data_reg[,3]==2),2]==J) &
any(data_reg[,3]==3) & any(data_reg[which(data_reg[,3]==3),2]==J)) {
stage2 <- FALSE
ntitetr = length(x)
}
} else {
##### here stage 3
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
newdose <- min(results$newdose, max(x)+1, K)
# check on the skipping dose
x <- c(x,rep(newdose,cohort2))
time = T_entrance[i*cohort2+1] # first patient of the next cohort
data_reg_i <- NULL
for (l in (n_fin+1):(i*cohort2)){
pos <- which( tox$time[((l-1)*J+1):(l*J)] <= time )
pos <- max(pos,0.1)
if (pos == J) {
follow = J
grade = tox[l*J, (x[l]+3)]
n_fin = l
data_reg_c <- rbind( data_reg_c, c(l,follow,grade))
}
else {
follow = pos
grade = tox[((l-1)*J+ceiling(pos)), (x[l]+3)]
data_reg_i <- rbind( data_reg_i, c(l,follow,grade))
}
}
data_reg <- rbind(data_reg_c,data_reg_i)
}
}
##### analysis of complete data
if (stage2) {
if (any(data_reg[,3]==3)) {
grad = 3
target_reg = target2
} else {
grad = 2
target_reg = target1
}
# data_reg <- rbind(data_reg_c,data_reg_i) # preparation of data for regression
y <- rep(0, dim(data_reg)[1])
postox <- which(data_reg[,3]==grad )
y[postox] <- rep(1, length(postox))
results <- titecrm(prior, target_reg, y, x, followup=data_reg[,2], obswin=J)
MTD <- min(results$mtd, max(x)+1, K)
ntitetr=0
} else {
results <- titecrmmc(x,doselabel,y=data_reg[,3],follow=data_reg[,2],alpha0,Tmax=J,target1,target2)
MTD <- min(results$newdose, max(x)+1, K)
}
MTD_titecrmmc <- c(MTD_titecrmmc,MTD)
doses_titecrmmc <- rbind(doses_titecrmmc,x)
tox_titecrmmc <- rbind(tox_titecrmmc,data_reg[,3])
ntite <- c(ntite,ntitetr)
}
}
library(xtable)
resultsTpoisempscen1 <- list(MTD_titecrmmc = MTD_titecrmmc, doses_titecrmmc = doses_titecrmmc, tox_titecrmmc = tox_titecrmmc,
ntite=ntite)
write.table(MTD_titecrmmc, file="scenario1poisemp.txt")
write.table(ntite, file="scenario1poisempntite.txt")
save.image()
|
6ebd461176a3270140051611e2c76d28e394930c | 989126aab750905787fd6bfddbd4f16fdbaec5e4 | /man/Indian.Rd | 90d8bfefb85b5b856ab92177bd9a630197eda0b6 | [] | no_license | johnsonjc6/BSDA | 86447864cfaaf6aa4ce6191898d1fe2cc5824fd0 | 563b15d83ec66d5be94a491d883a18179f6fc5f6 | refs/heads/master | 2020-12-11T07:26:16.236991 | 2016-04-25T23:13:40 | 2016-04-25T23:13:40 | 55,450,475 | 0 | 0 | null | 2016-04-04T22:38:42 | 2016-04-04T22:38:41 | R | UTF-8 | R | false | true | 1,082 | rd | Indian.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Indian}
\alias{Indian}
\title{Educational attainment versus per capita income and poverty rate for
American indians living on reservations}
\format{A data frame with 10 observations on the following 4 variables.
\describe{
\item{Reserv}{a factor with levels \code{Blackfeet}
\code{Fort Apache} \code{Gila River} \code{Hopi} \code{Navajo} \code{Papago}
\code{Pine Ridge} \code{Rosebud} \code{San Carlos} \code{Zuni Pueblo}}
\item{highsch}{a numeric vector}
\item{income}{a numeric vector}
\item{poverty}{a numeric vector}
}}
\description{
Data for Exercise 2.95
}
\examples{
str(Indian)
attach(Indian)
par(mfrow=c(1,2))
plot(highsch,income,xlab="Percent High School Graduates", ylab="Per capita income")
plot(highsch,poverty,xlab="Percent High School Graduates", ylab="Poverty rate")
par(mfrow=c(1,1))
cor(cbind(highsch,income,poverty))
detach(Indian)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
|
5c57863466430810c89d49a359c3e04399ac5c86 | e855a5b2db6fa230fc9cf4527c9076f4295ad779 | /scripts/getdata.R | 8c83aa81ae0c6522ce1d5a9832600a31a2b78832 | [
"MIT"
] | permissive | jasonrwang/oecd_insights | 55c93db989150c01a8869311d34b01dcc09fff4f | 019be94abd394981a8829ef42c7414ad3db478fa | refs/heads/master | 2020-04-02T07:10:05.264136 | 2018-11-09T12:45:01 | 2018-11-09T12:45:01 | 154,184,272 | 0 | 0 | MIT | 2018-11-09T12:45:03 | 2018-10-22T17:15:15 | R | UTF-8 | R | false | false | 3,801 | r | getdata.R | getdata = function() {
### getdata.R
##
## This file imports and cleans the data for all the variables and indicators we aim to examine.
## It then considates all of the data in one dataframe with rows sep
library(reshape)
# Import data, which is all from the World Bank Indicators database
age_dependency_old <- read.csv("data/API_SP.POP.DPND.OL_DS2_en_csv_v2_10184911.csv",sep=',',header=TRUE,skip=4)
age_dependency_young <- read.csv("data/API_SP.POP.DPND.YG_DS2_en_csv_v2_10199819.csv",sep=',',header=TRUE,skip=4)
age_dependency_ratio <- read.csv("data/API_SP.POP.DPND_DS2_en_csv_v2_10188478.csv",sep=',',header=TRUE,skip=4)
GDP <- read.csv("data/API_NY.GDP.MKTP.CD_DS2_en_csv_v2_10203569.csv",sep=',',header=TRUE,skip=4)
health_expenditure_capita <- read.csv("data/API_SH.XPD.EHEX.PC.CD_DS2_en_csv_v2_10203894.csv",sep=',',header=TRUE,skip=4)
health_expenditure <- read.csv("data/API_SH.XPD.CHEX.GD.ZS_DS2_en_csv_v2_10183765.csv",sep=',',header=TRUE,skip=4)
household_consumption <- read.csv("data/API_NE.CON.PRVT.CD_DS2_en_csv_v2_10184156.csv",sep=',',header=TRUE,skip=4)
GINI <- read.csv("data/API_SI.POV.GINI_DS2_en_csv_v2_10181010.csv",sep=',',header=TRUE,skip=4)
life_expectency_birth <- read.csv("data/API_SP.DYN.LE00.IN_DS2_en_csv_v2_10181296.csv",sep=',',header=TRUE,skip=4)
total_popuation <- read.csv("data/API_SP.POP.TOTL_DS2_en_csv_v2_10203548.csv",sep=',',header=TRUE,skip=4)
# Since all the data is from the same source and comes in the same format, we can clean it easily together.
# Create a list of all the names of the dataframes and the generic header names
str_of_dataframes <- c('age_dependency_old','age_dependency_young','age_dependency_ratio','GDP','health_expenditure_capita','health_expenditure','household_consumption','GINI','life_expectency_birth','total_popuation')
header_names <- c('Country_Name', 'Country_Code', 'Indicator_Name','Indicator_Code',1960:2017) # Note, these year columns will be turned into strings
## Also read in a metadata file we can use to remove aggregated countries (regions)
country_code <- read.csv("data/Metadata_Country.csv",sep=",",header=TRUE,skip=0)
# country_code = country_code[,c('Country.Code','TableName')] # Drop the irrelevant columns
# names(country_code) = c('Code','Country Name')
colnames(country_code)[1] <-"CountryCode" #change the name of the first row
country_code <- country_code[!(country_code$Region == ""), ] #keep only countries and skip aggregated inputs
## Functions to clean the data
CleanDataFrame <- function(D) {
# This simply ensures consistency
D[length(D)] <- NULL # Delete the last column, it is empty
colnames(D) <- header_names # Change the headernames to be the same
return(D)
}
OnlyCountries <- function(D) {
# Remove entries from regions of aggregated countries
D <- subset(D,D$Country_Code %in% country_code$CountryCode)
return(D)
}
# Actually clean the data: Walk through the list of indicators and clean it
for (i in 1:length(str_of_dataframes)) {
assign(str_of_dataframes[i], CleanDataFrame(get(str_of_dataframes[i])))
assign(str_of_dataframes[i], OnlyCountries(get(str_of_dataframes[i])))
}
# Combine all indicators into one DataFrame
AllInds = data.frame()
for (indicator in str_of_dataframes) {
# print(names(get(indicator)))
AllInds = rbind(AllInds, get(indicator))
}
# Reshape the combined DataFrame
AllInds_melted = melt(AllInds, id = c('Country_Name','Country_Code','Indicator_Name','Indicator_Code'), variable_name='Year')
AllInds_cast = cast(AllInds_melted, Country_Name + Year ~ Indicator_Code)
return(AllInds_cast)
} |
e7252bee5116a7a9ded7b55d1af68b73eea6a5ed | 893e5cd93fb10d65ea087050d46b9d5d130d102e | /R/cluster_vcov.r | 40af32098a414589e8d4d09ccd8fe6d6b04d1c91 | [] | no_license | kendonB/multiwayvcov | 30a9c1a40a6b454836739405f8b0c7e28d9437b6 | a7ff0079c6731c4842dcf6b78656dcef347cc3ca | refs/heads/master | 2021-01-15T17:37:22.055091 | 2015-09-08T23:53:42 | 2015-09-08T23:53:42 | 41,392,443 | 0 | 0 | null | 2015-08-25T23:08:59 | 2015-08-25T23:08:57 | R | UTF-8 | R | false | false | 11,263 | r | cluster_vcov.r | ######################################################################
#' @title Multi-way standard error clustering
#'
#' @description Return a multi-way cluster-robust variance-covariance matrix
#'
#' @param model The estimated model, usually an \code{lm} or \code{glm} class object
#' @param cluster A \code{vector}, \code{matrix}, or \code{data.frame} of cluster variables,
#' where each column is a separate variable. If the vector \code{1:nrow(data)}
#' is used, the function effectively produces a regular
#' heteroskedasticity-robust matrix.
#' @param parallel Scalar or list. If a list, use the list as a list
#' of connected processing cores/clusters. A scalar indicates no
#' parallelization. See the \bold{parallel} package.
#' @param use_white Logical or \code{NULL}. See description below.
#' @param df_correction Logical or \code{vector}. \code{TRUE} computes degrees
#' of freedom corrections, \code{FALSE} uses no corrections. A vector of length
#' \eqn{2^D - 1} will directly set the degrees of freedom corrections.
#' @param leverage Integer. EXPERIMENTAL Uses Mackinnon-White HC3-style leverage
#' adjustments. Known to work in the non-clustering case,
#' e.g., it reproduces HC3 if \code{df_correction=FALSE}. Set to 3 for HC3-style
#' and 2 for HC2-style leverage adjustments.
#' @param debug Logical. Print internal values useful for debugging to
#' the console.
#' @param force_posdef Logical. Force the eigenvalues of the variance-covariance
#' matrix to be positive.
#'
#' @keywords clustering multi-way robust standard errors
#'
#' @details
#' This function implements multi-way clustering using the method
#' suggested by Cameron, Gelbach, & Miller (2011), which involves
#' clustering on \eqn{2^D - 1} dimensional combinations, e.g.,
#' if we're cluster on firm and year, then we compute for firm,
#' year, and firm-year. Variance-covariance matrices with an odd
#' number of cluster variables are added, and those with an even
#' number are subtracted.
#'
#' The cluster variable(s) are specified by passing the entire variable(s)
#' to cluster (\code{cbind()}'ed as necessary). The cluster variables should
#' be of the same number of rows as the original data set; observations
#' omitted or excluded in the model estimation will be handled accordingly.
#'
#' Ma (2014) suggests using the White (1980) variance-covariance matrix
#' as the final, subtracted matrix when the union of the clustering
#' dimensions U results in a single observation per group in U;
#' e.g., if clustering by firm and year, there is only one observation
#' per firm-year, we subtract the White (1980) HC0 variance-covariance
#' from the sum of the firm and year vcov matrices. This is detected
#' automatically (if \code{use_white = NULL}), but you can force this one way
#' or the other by setting \code{use_white = TRUE} or \code{FALSE}.
#'
#' Some authors suggest avoiding degrees of freedom corrections with
#' multi-way clustering. By default, the function uses corrections
#' identical to Petersen (2009) corrections. Passing a numerical
#' vector to \code{df_correction} (of length \eqn{2^D - 1}) will override
#' the default, and setting \code{df_correction = FALSE} will use no correction.
#'
#' Cameron, Gelbach, & Miller (2011) futher suggest a method for forcing
#' the variance-covariance matrix to be positive semidefinite by correcting
#' the eigenvalues of the matrix. To use this method, set \code{force_posdef = TRUE}.
#' Do not use this method unless absolutely necessary! The eigen/spectral
#' decomposition used is not ideal numerically, and may introduce small
#' errors or deviations. If \code{force_posdef = TRUE}, the correction is applied
#' regardless of whether it's necessary.
#'
#' The defaults deliberately match the Stata default output for one-way and
#' Mitchell Petersen's two-way Stata code results. To match the
#' SAS default output (obtained using the class & repeated subject
#' statements, see Arellano (1987)) simply turn off the degrees of freedom correction.
#'
#' Parallelization is available via the \bold{parallel} package by passing
#' the "cluster" list (usually called \code{cl}) to the parallel argument.
#'
#' @return a \eqn{k} x \eqn{k} variance-covariance matrix of type 'matrix'
#'
#' @export
#' @author Nathaniel Graham \email{npgraham1@@gmail.com}
#'
#' @references
#' Arellano, M. (1987). PRACTITIONERS' CORNER: Computing Robust Standard Errors for
#' Within-groups Estimators. Oxford bulletin of Economics and Statistics, 49(4), 431-434.
#'
#' Cameron, A. C., Gelbach, J. B., & Miller, D. L. (2011). Robust inference with multiway
#' clustering. Journal of Business & Economic Statistics, 29(2).
#'
#' Ma, Mark (Shuai), Are We Really Doing What We Think We Are Doing? A Note on Finite-Sample
#' Estimates of Two-Way Cluster-Robust Standard Errors (April 9, 2014).
#'
#' MacKinnon, J. G., & White, H. (1985). Some heteroskedasticity-consistent covariance matrix
#' estimators with improved finite sample properties. Journal of Econometrics, 29(3), 305-325.
#'
#' Petersen, M. A. (2009). Estimating standard errors in finance panel data sets: Comparing
#' approaches. Review of financial studies, 22(1), 435-480.
#'
#' White, H. (1980). A heteroskedasticity-consistent covariance matrix estimator and a direct
#' test for heteroskedasticity. Econometrica: Journal of the Econometric Society, 817-838.
#'
#' @importFrom sandwich estfun meatHC sandwich
#' @importFrom parallel clusterExport parApply
#'
#' @examples
#' library(lmtest)
#' data(petersen)
#' m1 <- lm(y ~ x, data = petersen)
#'
#' # Cluster by firm
#' vcov_firm <- cluster.vcov(m1, petersen$firmid)
#' coeftest(m1, vcov_firm)
#'
#' # Cluster by year
#' vcov_year <- cluster.vcov(m1, petersen$year)
#' coeftest(m1, vcov_year)
#'
#' # Double cluster by firm and year
#' vcov_both <- cluster.vcov(m1, cbind(petersen$firmid, petersen$year))
#' coeftest(m1, vcov_both)
#'
#' # Replicate Mahmood Arai's double cluster by firm and year
#' vcov_both <- cluster.vcov(m1, cbind(petersen$firmid, petersen$year), use_white = FALSE)
#' coeftest(m1, vcov_both)
#'
#' # For comparison, produce White HC0 VCOV the hard way
#' vcov_hc0 <- cluster.vcov(m1, 1:nrow(petersen), df_correction = FALSE)
#' coeftest(m1, vcov_hc0)
#'
#' # Produce White HC1 VCOV the hard way
#' vcov_hc1 <- cluster.vcov(m1, 1:nrow(petersen), df_correction = TRUE)
#' coeftest(m1, vcov_hc1)
#'
#' # Produce White HC2 VCOV the hard way
#' vcov_hc2 <- cluster.vcov(m1, 1:nrow(petersen), df_correction = FALSE, leverage = 2)
#' coeftest(m1, vcov_hc2)
#'
#' # Produce White HC3 VCOV the hard way
#' vcov_hc3 <- cluster.vcov(m1, 1:nrow(petersen), df_correction = FALSE, leverage = 3)
#' coeftest(m1, vcov_hc3)
#'
#' # Go multicore using the parallel package
#' \dontrun{
#' require(parallel)
#' cl <- makeCluster(4)
#' vcov_both <- cluster.vcov(m1, cbind(petersen$firmid, petersen$year), parallel = cl)
#' stopCluster(cl)
#' coeftest(m1, vcov_both)
#' }
cluster.vcov <- function(model, cluster, parallel = FALSE, use_white = NULL,
df_correction = TRUE, leverage = FALSE, force_posdef = FALSE,
debug = FALSE) {
cluster <- as.data.frame(cluster)
cluster_dims <- ncol(cluster)
# total cluster combinations, 2^D - 1
tcc <- 2 ** cluster_dims - 1
# all cluster combinations
acc <- list()
for(i in 1:cluster_dims) {
acc <- append(acc, combn(1:cluster_dims, i, simplify = FALSE))
}
if(debug) print(acc)
# We need to subtract matrices with an even number of combinations and add
# matrices with an odd number of combinations
vcov_sign <- sapply(acc, function(i) (-1) ** (length(i) + 1))
# Drop the original cluster vars from the combinations list
acc <- acc[-1:-cluster_dims]
if(debug) print(acc)
# Handle omitted or excluded observations
if(!is.null(model$na.action)) {
if(class(model$na.action) == "exclude") {
cluster <- cluster[-model$na.action,]
esttmp <- estfun(model)[-model$na.action,]
} else if(class(model$na.action) == "omit") {
cluster <- cluster[-model$na.action,]
esttmp <- estfun(model)
}
cluster <- as.data.frame(cluster) # silly error somewhere
} else {
esttmp <- estfun(model)
}
if(debug) print(class(cluster))
# Make all combinations of cluster dimensions
if(cluster_dims > 1) {
for(i in acc) {
cluster <- cbind(cluster, Reduce(paste0, cluster[,i]))
}
}
df <- data.frame(M = integer(tcc),
N = integer(tcc),
K = integer(tcc))
for(i in 1:tcc) {
df[i, "M"] <- length(unique(cluster[,i]))
df[i, "N"] <- length(cluster[,i])
df[i, "K"] <- model$rank
}
if(df_correction == TRUE) {
df$dfc <- (df$M / (df$M - 1)) * ((df$N - 1) / (df$N - df$K))
} else if(length(df_correction) > 1) {
df$dfc <- df_correction
} else {
df$dfc <- 1
}
if(is.null(use_white)) {
if(cluster_dims > 1 && df[tcc, "M"] == prod(df[-tcc, "M"])) {
use_white <- TRUE
} else {
use_white <- FALSE
}
}
if(use_white) {
df <- df[-tcc,]
tcc <- tcc - 1
}
if(debug) {
print(acc)
print(paste("Original Cluster Dimensions", cluster_dims))
print(paste("Theoretical Cluster Combinations", tcc))
print(paste("Use White VCOV for final matrix?", use_white))
}
if(leverage) {
if("x" %in% names(model)) {
X <- model$x
} else {
X <- model.matrix(model)
}
ixtx <- solve(crossprod(X))
h <- 1 - vapply(1:df[1, "N"], function(i) X[i,] %*% ixtx %*% as.matrix(X[i,]), numeric(1))
if(leverage == 3) {
esttmp <- esttmp / h
} else if(leverage == 2) {
esttmp <- esttmp / sqrt(h)
}
}
uj <- list()
if(length(parallel) > 1) {
clusterExport(parallel, varlist = c("cluster", "model"), envir = environment())
}
for(i in 1:tcc) {
cluster[,i] <- factor(cluster[,i])
if(length(parallel) > 1) {
uj[[i]] <- crossprod(parApply(parallel, esttmp, 2,
function(x) tapply(x, cluster[,i], sum)))
} else {
uj[[i]] <- crossprod(apply(esttmp, 2, function(x) tapply(x, cluster[,i], sum)))
}
}
if(debug) {
print(df)
print(uj)
print(vcov_sign)
}
vcov_matrices <- list()
for(i in 1:tcc) {
vcov_matrices[[i]] <- vcov_sign[i] * df[i, "dfc"] * (uj[[i]] / df[i, "N"])
}
if(use_white) {
i <- i + 1
vcov_matrices[[i]] <- vcov_sign[i] * meatHC(model, type = "HC0")
}
if(debug) {
print(vcov_matrices)
}
vcov_matrix <- sandwich(model, meat. = Reduce('+', vcov_matrices))
if(force_posdef) {
decomp <- eigen(vcov_matrix, symmetric = TRUE)
if(debug) print(decomp$values)
pos_eigens <- pmax(decomp$values, rep.int(0, length(decomp$values)))
vcov_matrix <- decomp$vectors %*% diag(pos_eigens) %*% t(decomp$vectors)
}
return(vcov_matrix)
}
|
55b953c796f7ff3807cdd14d8faedfc55939dd8f | e32b67f35e2972f6c1f50b322471ca89292d8aa9 | /MadModules/lib/rlib/bcp_detector.R | 573d5211379d5a10e102bf502318f28a13212420 | [
"Apache-2.0"
] | permissive | HappyFaceGoettingen/HappyFace-MadMask | f6d6d4ccb5d7c859e1b0fa342c0b047d07bc7f1e | adb50ff87d68d5f6f31e51f8e5289620abacdf81 | refs/heads/master | 2018-09-20T16:49:43.396891 | 2018-07-26T12:42:04 | 2018-07-26T12:42:04 | 118,455,364 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,697 | r | bcp_detector.R | require(rimage)
require(bcp)
require(entropy)
require(xts)
##-----------------------------------------------------------
##
## Functions
##
##-----------------------------------------------------------
check.jpeg <- function(filename){
res <- .C("get_imagesize_of_JPEG_file", as.character(filename),
width = integer(1), height = integer(1), depth = integer(1),
ret = integer(1), PACKAGE = "rimage")
if (res$ret < 0){
message("Jpeg file error [", filename, "]")
return(FALSE)
} else {
return(TRUE)
}
}
check.robj <- function(filename){
con <- gzfile(filename)
on.exit(close(con))
magic <- readChar(con, 5L, useBytes = TRUE)
if (!length(magic)){
message("empty (zero-byte) input file [", filename, "]")
return(FALSE)
} else {
return(TRUE)
}
}
img.to.color.histgram <- function(img0){
## changing img to color histgram
color.histgram0 <- hist(img0, breaks=seq(0,1, by=0.01), plot=FALSE)$density
return(color.histgram0)
}
white.histgram <- function(hist0){
## Making white histgram
color.histgram1 <- rep(10^-10, length(hist0))
color.histgram1[length(color.histgram1)] <- 100
return(color.histgram1)
}
calc.information.gain <- function(x0, x1){
## calculating information gain (KL-divergence), this is using KL.plugin method in entropy lib.
information.gain <- KL.plugin(x0, x1)
return(information.gain)
}
calc.bcp <- function(ds){
#return(bcp(xtsds)$posterior.prob)
return(bcp(ds))
}
generate.plot.analysis <- function(plot.df, filename, width, height){
message("Plotting [", filename, "] ...")
png(filename = filename, width = width, height = height)
plot(plot.df)
dev.off()
}
run.bcp.detector <- function(){
plot.analysis.file <- str.concat(output.dir, "/", file.prefix, ".png")
## loop over image files
if (file.exists(robj.detector)){
if (check.robj(robj.detector)) load(file=robj.detector)
} else {
message("[", robj.detector, "] does not exist!")
}
info.gain <- c()
rownames.info.gain <- c()
for (date.id in date.ids){
file <- str.concat(capture.dir, "/", date.id, "/", file.prefix, ".jpg")
if (!check.jpeg(file)) next
found <- FALSE
## reading cached robj
if (exists("info.gain.df")){
a.value <- info.gain.df[date.id, "info.gain"]
if ((!is.null(a.value)) && (!is.na(a.value))) {
info.gain <- append(info.gain, info.gain.df[date.id, "info.gain"])
found <- TRUE
}
}
## generating information gain
if (!found){
message("Reading [", file, "] ...")
img <- read.jpeg(file)
color.hist0 <- img.to.color.histgram(img)
color.hist1 <- white.histgram(color.hist0)
info.gain <- append(info.gain, calc.information.gain(color.hist0, color.hist1))
}
rownames.info.gain <- append(rownames.info.gain, date.id)
gc();gc()
}
## Detecting a status change point
if (is.null(info.gain)){
bcpobj <- NULL
bcp.posterior.prob <- c(0)
latest.bcp.pp <- 0
} else {
bcpobj <- calc.bcp(info.gain)
bcp.posterior.prob <- bcpobj$posterior.prob[!is.na(bcpobj$posterior.prob)]
latest.bcp.pp <- bcp.posterior.prob[length(bcp.posterior.prob)]
}
## making df
info.gain.df <- data.frame(info.gain)
rownames(info.gain.df) <- rownames.info.gain
## saving robj
message("Saving [", robj.detector, "] ...")
save(file=robj.detector, info.gain.df, bcpobj, latest.bcp.pp, bcp.posterior.prob)
##----------------------------------------
## Generating plots
##----------------------------------------
generate.plot.analysis(bcpobj, plot.analysis.file, WIDTH, HEIGHT)
}
## Start
run.bcp.detector()
|
b70ff12c40bfc93a43ef721df68b83ba6300a4fe | ad243eefefbd85b5cb94d8b4125ce7b92d0d4786 | /Sleep timing and duration.R | 636d0fa892a65a2ce35a6605e3f7620152a8a5e1 | [] | no_license | khrisyu9/atus | a178efc8e3a0742479ed24390b5ba081b699ffe2 | fa3dafcc88ba315c1f68aa1b7934191f12837b49 | refs/heads/master | 2023-02-09T08:52:07.230451 | 2020-12-27T04:42:30 | 2020-12-27T04:42:30 | 310,483,056 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,504 | r | Sleep timing and duration.R | library(dplyr)
library(tidyverse)
library(ggplot2)
library(data.table)
library(GGally)
library(gridExtra)
library(grid)
library(latex2exp)
library(weights)
library(scales)
library(stats)
# library(SDMTools)
library(knitr)
options(scipen=999)
##############################################################################
# Original time in r: as.numeric(as.POSIXct("1969-12-31 19:00:00"))
# today's 00:00:00 -- as.numeric(as.POSIXct(paste(Sys.Date(), "00:00:00")))
for (s.year in 2003:2018) {
file.name = sprintf("D:/galit/7th Report_selection compare 2 and act 2 hours bef/actlog_galit.%s.csv",s.year)
g.actlog = data.table(read.csv(file.name) %>%
mutate(start = as.POSIXct(start) +
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S"))),
stop = as.POSIXct(stop) +
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S"))))) %>%
filter(keep == 1) %>%
group_by(tucaseid) %>%
mutate(primary.sleep.number = ifelse(primary.sleep == "primary sleep", tuactivity_n, 0),
primary.sleep.number = ifelse(primary.sleep.number == max(primary.sleep.number),
primary.sleep.number, 0)) %>%
select(tucaseid, tuactivity_n, trcode, start, stop, last,
primary.sleep, keep, primary.sleep.number)
gid = unique((g.actlog %>% filter(primary.sleep == "primary sleep", last == 0))$tucaseid)
g.actlog2 = g.actlog %>% filter(!(tucaseid %in% gid)) %>% ungroup()
for (id in gid) {
data = g.actlog %>%
filter(tucaseid == id) %>% ungroup()
data0 = data %>%
group_by(tucaseid) %>%
filter(tuactivity_n >= max(primary.sleep.number)) %>%
filter(start < min(stop)+2*60*60, trcode == 10101)
if(nrow(data0)>1){
data0 = data0 %>% mutate(primary.sleep = ifelse(primary.sleep=="primary sleep",
"old primary sleep",
"other sleep"),
primary.sleep.number = 0, keep = 0) %>%
ungroup()
new_sleep = data.table(tucaseid = id, tuactivity_n = max(data0$tuactivity_n),
trcode = 10101, start = data0$start[1],
stop = data0$stop[nrow(data0)], last = max(data0$last),
primary.sleep = "primary sleep", keep = 1,
primary.sleep.number = max(data0$tuactivity_n))
result = data %>%
group_by(tucaseid) %>%
filter(tuactivity_n < new_sleep$primary.sleep.number, primary.sleep == "other sleep") %>%
ungroup()
result = rbindlist(list(result, new_sleep))
result = rbindlist(list(result, data0))
result = rbindlist(list(result, data %>%
group_by(tucaseid) %>%
filter(tuactivity_n > max(new_sleep$primary.sleep.number)) %>%
ungroup()))
g.actlog2 = g.actlog2 %>%
rbind(result)
} else{
g.actlog2 = g.actlog2 %>%
rbind(data)
}
}
gid2 = unique((g.actlog2 %>% filter(tucaseid %in% gid,
primary.sleep == "primary sleep",
last == 0))$tucaseid)
g.actlog3 = g.actlog2 %>% filter(!(tucaseid %in% gid2)) %>% ungroup()
for (id in gid2) {
data = g.actlog2 %>%
filter(tucaseid == id) %>% ungroup()
data0 = data %>%
group_by(tucaseid) %>%
filter(tuactivity_n >= max(primary.sleep.number)) %>%
filter(start < min(stop)+2*60*60, trcode == 10101)
if(nrow(data0)>1){
data0 = data0 %>% mutate(primary.sleep = ifelse(primary.sleep=="primary sleep",
"old primary sleep",
"other sleep"),
primary.sleep.number = 0, keep = 0) %>%
ungroup()
new_sleep = data.table(tucaseid = id, tuactivity_n = max(data0$tuactivity_n),
trcode = 10101, start = data0$start[1],
stop = data0$stop[nrow(data0)], last = max(data0$last),
primary.sleep = "primary sleep", keep = 1,
primary.sleep.number = max(data0$tuactivity_n))
result = data %>%
group_by(tucaseid) %>%
filter(tuactivity_n < new_sleep$primary.sleep.number, primary.sleep == "other sleep") %>%
ungroup()
result = rbindlist(list(result, new_sleep))
result = rbindlist(list(result, data0))
result = rbindlist(list(result, data %>%
group_by(tucaseid) %>%
filter(tuactivity_n > max(new_sleep$primary.sleep.number)) %>%
ungroup()))
g.actlog3 = g.actlog3 %>%
rbind(result)
} else{
g.actlog3 = g.actlog3 %>%
rbind(data)
}
}
# 9289 - 2018 people included in the dataset
# 20084 - 2003 people included in the dataset
g.primary.id = unique((g.actlog3%>%filter(primary.sleep == "primary sleep"))$tucaseid)
primary.start = g.actlog3 %>%
group_by(tucaseid) %>%
mutate(primary.sleep.number = ifelse(primary.sleep == "primary sleep", tuactivity_n, 0),
primary.sleep.number = ifelse(primary.sleep.number == max(primary.sleep.number),
primary.sleep.number, 0)) %>%
filter(primary.sleep.number>0) %>%
mutate(primary.start = start) %>%
select(tucaseid, primary.start)
g.actlog0 = g.actlog3 %>%
group_by(tucaseid) %>%
mutate(primary.sleep.number = ifelse(primary.sleep == "primary sleep", tuactivity_n, 0),
primary.sleep.number = ifelse(primary.sleep.number == max(primary.sleep.number),
primary.sleep.number, 0),
primary.sleep.start = ifelse(primary.sleep == "primary sleep", start, 0)) %>%
inner_join(primary.start, by = "tucaseid") %>%
filter(start <= primary.start) %>%
filter(stop > primary.start - 2*60*60) %>%
select(tucaseid, tuactivity_n, trcode, start, stop, last,
primary.sleep, primary.sleep.number) %>%
arrange(tucaseid,tuactivity_n) %>%
ungroup()
a = g.actlog0 %>% filter(tucaseid %in% gid2)
for (id in g.primary.id) {
g.actlog0[g.actlog0$tucaseid == id,]$start[1] = g.actlog0[g.actlog0$tucaseid == id,]$stop[
nrow(g.actlog0[g.actlog0$tucaseid == id,])-1]-2*60*60
}
file.name = sprintf("D:/galit/7th Report_selection compare 2 and act 2 hours bef/act_2_and_primary_sleep.Galit.%s.csv",s.year)
write.csv(g.actlog0 %>%
mutate(start = start -
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S"))),
stop = stop -
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S")))) %>%
arrange(tucaseid, tuactivity_n),file.name)
}
###############################################################################
################## Calculate the Weighted Mean of each Act ####################
###############################################################################
library(doParallel)
library(foreach)
# Setup cores cluster using doParallel
ncores = 4 # detectCores() - 1
cl = makeCluster(ncores)
registerDoParallel(cl)
sleep.timing.duration =
foreach(
s.year = 2003:2018, .combine = "rbind") %dopar% {
options(scipen=999)
library(survey)
library(dplyr)
library(tidyverse)
library(ggplot2)
library(data.table)
# Original time in r: as.numeric(as.POSIXct("1969-12-31 19:00:00"))
# today's 00:00:00 -- as.numeric(as.POSIXct(paste(Sys.Date(), "00:00:00")))
file = sprintf("d:/galit/%s",paste0(s.year,"atus"))
setwd("D:/galit/3rd Report_act 2 hours bef")
act_code = read.csv("code_activity.csv")
codes = act_code[,1]
activities = act_code[,2]
setwd(file)
actsum = read.csv("actsum.csv") %>%
select(tucaseid, teage, tesex, tudiaryday, trholiday, t010101, tufinlwgt) %>%
mutate(days = ifelse(tudiaryday %in% c("Sunday", "Monday", "Tuesday",
"Wednesday", "Thursday"), "Weekday", "Weekend"),
days = ifelse(trholiday == "Diary day was a holiday", "Holiday", days),
age = ifelse(teage>22,2,1),
age = ifelse(teage>30,3,age),
age = ifelse(teage>50,4,age),
age = ifelse(teage>64,5,age)) %>%
select(tucaseid, tesex, teage, age, days, tufinlwgt)
file = sprintf("D:/galit/7th Report_selection compare 2 and act 2 hours bef/act_2_bef_sleep.Galit.%s",
paste0(s.year,".csv"))
actlog = read.csv(file) %>%
mutate(start = as.POSIXct(start) +
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S"))),
stop = as.POSIXct(stop) +
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S"))))
###############################################################################
###################### Activities 2 hours before bedtime ######################
###############################################################################
sleep.timing.duration0 = actlog %>%
filter(primary.sleep == "primary sleep") %>%
inner_join(actsum, by = "tucaseid") %>%
mutate(duration = as.numeric(stop - start, units = "mins")) %>%
select(tucaseid, tesex, teage, age, days, trcode, duration, start, stop) %>%
group_by(tucaseid) %>%
filter(start == max(start)) %>%
mutate(year = s.year) %>%
ungroup()
}
stopCluster(cl)
# write.csv(sleep.timing.duration%>%
# mutate(start = start -
# as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S")))) %>%
# arrange(tucaseid),
# "D:/galit/9th Report_sleep timing and duration/sleep.timing.duration.csv")
sleep.timing.duration = read.csv("D:/galit/9th Report_sleep timing and duration/sleep.timing.duration.csv") %>%
mutate(start = as.POSIXct(start) +
as.numeric(as.POSIXct(strptime("19:00:00",format = "%H:%M:%S"))))
# Histogram: Sleep Duration in different year
for (s.year in 2003:2018) {
p1 = sleep.timing.duration %>% filter(year == s.year) %>%
ggplot(aes(x = duration/60)) +
geom_histogram(stat = "bin", binwidth = 0.5, color = "black", fill = "steelblue") +
scale_x_continuous(breaks = seq(0, 24, 1)) +
labs(x = "Duration (hours)", y = "Number of People",
title = paste0("Histogram: Sleep Duration in ", s.year)) +
theme(plot.title = element_text(hjust = 0.5))
ggplotly(p1)
}
# Histogram: Sleep Timing in different year
for (s.year in 2003:2018) {
p2 = sleep.timing.duration %>% filter(year == s.year) %>%
ggplot(aes(x = start)) +
geom_histogram(stat = "bin", binwidth = 0.5*60*60, color = "black", fill = "steelblue") +
labs(x = "Sleep Timing", y = "Number of People",
title = paste0("Histogram: Sleep Timing in ", s.year)) +
scale_x_datetime(breaks = date_breaks("1 hour"),
date_minor_breaks = "1 hour",
labels = date_format("%H:%M:%S")) +
theme(plot.title = element_text(hjust = 0.5))
ggplotly(p2)
}
|
be59c1c2fac904409935fcbb171bf01bdb299a0d | 24a3984ba7d969cfa33e6a0280f4a25ccc641c68 | /man/remove_jira_credentials.Rd | 476da4b01ecc29b9485a75e83a1b78c1d487b464 | [
"MIT"
] | permissive | amishakov/JirAgileR | eeafffd1e7696748e773d6eec0e6029eebeeccbd | 7626a419f8f9e19aa6d73bb65e7a5c1c7c4da26e | refs/heads/master | 2023-05-17T07:29:26.773829 | 2021-06-08T19:12:22 | 2021-06-08T19:12:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 702 | rd | remove_jira_credentials.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports.R
\name{remove_jira_credentials}
\alias{remove_jira_credentials}
\title{Removes previously saved JIRA credentials}
\usage{
remove_jira_credentials(verbose = FALSE)
}
\arguments{
\item{verbose}{Optional parameter to remove previously saved parameters}
}
\value{
Returns a \code{data.frame} with the saved JIRA credentials
}
\description{
Removes the JIRA credentials, that have been previously
saved into the environment under the JIRAGILER_PAT variable through
the \code{save_jira_credentials()} function.
}
\examples{
\dontrun{
save_jira_credentials(domain="https://bugreports.qt.io")
remove_jira_credentials()
}
}
|
3fca22b941acc171433a1359874e64ca1f2e2a4b | 73b7cfcdc5e5de2709dfa7c329b0502a846c6f9b | /src/05load_io.R | 56a5f3791eada5b40b287593e5cc7a868e415770 | [] | no_license | akanarek/garber_vp | 67049c052adcabe3ed2848eac27e2b36b4a702f5 | d319f42129253d7cc9ec325811839853d61e4f81 | refs/heads/master | 2021-04-15T14:19:02.070249 | 2018-01-31T16:56:58 | 2018-01-31T16:56:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,877 | r | 05load_io.R | #load output files
#CONTROL
load(paste(vpdir_out_control,"tdarray_control.RData", sep = ""))
dim(tdarray_control)
#FOLIAR
load(paste(vpdir_out_foliar,"tdarray_foliar.RData", sep = ""))
dim(tdarray_foliar)
#SEED
load(paste(vpdir_out_seed,"tdarray_seed.RData", sep = ""))
dim(tdarray_seed)
#SOIL
load(paste(vpdir_out_soil,"tdarray_soil.RData", sep = ""))
dim(tdarray_soil)
#TIME
load(paste(vpdir_output,"timearray.RData", sep = ""))
nrows<- length(timearray)
#rownames(tdarray)
#days, outputs, simulations
#read input files
#CONTROL
indata_control <- read.csv(file = paste(vpdir_in_control, "inputdata_control.csv", sep = ""), header = TRUE)
#cut out column "X"
del_col <- which(colnames(indata_control)=="X")
inputdata_control<- indata_control[,-del_col]
#FOLIAR
indata_foliar <- read.csv(file = paste(vpdir_in_foliar, "inputdata_foliar.csv", sep = ""), header = TRUE)
inputdata_foliar<- indata_foliar%>%select_if(is.numeric)%>%select(-1)
indata_seed <- read.csv(file = paste(vpdir_in_seed, "inputdata_seed.csv", sep = ""), header = TRUE)
inputdata_seed<- indata_seed%>%select_if(is.numeric)%>%select(-1)
indata_soil <- read.csv(file = paste(vpdir_in_soil, "inputdata_soil.csv", sep = ""), header = TRUE)
inputdata_soil<- indata_soil%>%select_if(is.numeric)%>%select(-1)
#extract input vectors from dataframe
for(i in 1:length(inputdata_control)){assign(names(inputdata_control)[i], inputdata_control[[i]])}
for(i in 1:length(inputdata_foliar)){assign(names(inputdata_foliar)[i], inputdata_foliar[[i]])}
for(i in 1:length(inputdata_seed)){assign(names(inputdata_seed)[i], inputdata_seed[[i]])}
for(i in 1:length(inputdata_soil)){assign(names(inputdata_soil)[i], inputdata_soil[[i]])}
#convert dataframe to list
#linputdata <- as.list(inputdata)
#withdraw miteimmtype from list
#listinput<- as.list(linputdata[c(1:5,7:16)])
|
3af9d6cd680ab49be04983944caec39366644ab8 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/registry/examples/registry.Rd.R | 90742e14cbd3cbe535fdd88a7d684c5030f8e174 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | registry.Rd.R | library(registry)
### Name: registry
### Title: Registry creator
### Aliases: registry
### Keywords: data
### ** Examples
R <- registry()
R$set_field("X", type = TRUE)
R$set_field("Y", type = "character")
R$set_field("index", type = "character", is_key = TRUE,
index_FUN = match_partial_ignorecase)
R$set_field("index2", type = "integer", is_key = TRUE)
R$set_entry(X = TRUE, Y = "bla", index = "test", index2 = 1L)
R$set_entry(X = FALSE, Y = "foo", index = c("test", "bar"), index2 = 2L)
R$get_entries("test")
R[["test", 1]]
R["test"]
R[["test"]]
|
5b4b7e8c6426d27ee603bbb19c51e203c537c5fd | 371289feb46d718787561fae2092e4686ca96843 | /intermediate_files/nfl_538_team_codes.R | 4d5212cf0df261494c358d9a36dc3daabbdc0b73 | [] | no_license | SDS410-Spring2020/NFL | 75f732882443987ce8f02e9945a0fab82eb7c7e8 | 7c084b2c914235afacdc5c10ca3a5faf20c2389c | refs/heads/master | 2022-08-22T03:03:29.587450 | 2020-05-27T17:32:10 | 2020-05-27T17:32:10 | 237,068,115 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,098 | r | nfl_538_team_codes.R | #editing team codes in 538 data to match the team codes in our data
#reading in nfl elo data from FiveThirtyEight
nfl_elo_2019 <- read.csv("nfl_elo_latest.csv")
nfl_elo <- read.csv("nfl_elo.csv")
#wrangle nfl elo data
nfl_elo <- nfl_elo %>%
filter(season >= "2010") %>%
mutate(date = as.Date(date, "%Y-%m-%d")) %>%
rename(HomeClubCode = team1,
VisitorClubCode = team2)
fivethirtyeight_nfl_codes <- distinct(nfl_elo, HomeClubCode)
#reading in scraped data of team codes from nfl website
club_codes <- read.csv("nfl_club_codes.csv")
#wrangle scraped data
club_codes <- club_codes %>%
rename(code = NFL,
team = X)
#reading in the two nfl data frames
games <- read.csv("data/games_Smith.csv")
pbp<- read.csv("data/pbp_Smith.csv")
#create merged df
combined_df <- games %>%
inner_join(pbp, by="GameKey")
#add a thursday column
fulldata_with_thurs <- combined_df %>%
mutate(is_thurs = ifelse(Game_Day == "Thursday", 1, 0))
#create df of team codes from our data
unique_mydata_codes <- distinct(fulldata_with_thurs, PossessionTeam)
#adding id column to scraped data and unique 538 data
club_codes <- tibble::rowid_to_column(club_codes, "ID")
fivethirtyeight_nfl_codes <- tibble::rowid_to_column(fivethirtyeight_nfl_codes, "ID")
#create tibble
vector1 <- seq(1, 34, 1)
vector2 <- seq(1, 34, 1)
vector3 <- seq(1, 34, 1)
m <- tibble(vector1, vector2, vector3) %>%
rename(mydata = vector1,
in_known_code = vector2,
in_538 = vector3)
index <- 0
for (i in unique_mydata_codes$PossessionTeam) {
club <- i
print(club)
in_known_code <- ifelse(club %in% club_codes$code, 1, 0)
in_538 <- ifelse(club %in% fivethirtyeight_nfl_codes$HomeClubCode, 1, 0)
m$mydata[index] <- club
m$in_known_code[index] <- in_known_code
m$in_538[index] <- in_538
index <- index + 1
}
five38 <- tibble(vector1, vector2, vector3) %>%
rename(five38_data = vector1,
in_mydata = vector2,
in_known_code = vector3)
index <- 0
for (i in fivethirtyeight_nfl_codes$HomeClubCode) {
club <- i
print(club)
in_mydata <- ifelse(club %in% unique_mydata_codes$PossessionTeam, 1, 0)
in_known_code <- ifelse(club %in% club_codes$code, 1, 0)
five38$five38_data[index] <- club
five38$in_mydata[index] <- in_mydata
five38$in_known_code[index] <- in_known_code
index <- index + 1
}
known_code <- tibble(vector1, vector2, vector3) %>%
rename(known_code_data = vector1,
in_mydata = vector2,
in_538 = vector3)
index <- 0
for (i in club_codes$code) {
club <- i
print(club)
in_mydata <- ifelse(club %in% unique_mydata_codes$PossessionTeam, 1, 0)
in_538 <- ifelse(club %in% fivethirtyeight_nfl_codes$HomeClubCode, 1, 0)
known_code$known_code_data[index] <- club
known_code$in_mydata[index] <- in_mydata
known_code$in_538[index] <- in_known_code
index <- index + 1
}
missing_codes_in_mydata <- m %>%
filter(in_known_code == 0 | in_538 == 0)
missing_codes_in_known_code <- known_code %>%
filter(in_mydata == 0 | in_538 == 0)
missing_codes_in_538 <- five38 %>%
filter(in_mydata == 0 | in_known_code == 0)
#changing code names in 538
new_nfl_elo <- nfl_elo %>%
mutate(
HomeClubCode =
case_when(
as.character(HomeClubCode) == "HOU" ~ "HST",
as.character(HomeClubCode) == "LAR" ~ "SL",
as.character(HomeClubCode) == "WSH" ~ "WAS",
as.character(HomeClubCode) == "CLE" ~ "CLV",
as.character(HomeClubCode) == "BAL" ~ "BLT",
as.character(HomeClubCode) == "ARI" ~ "ARZ",
as.character(HomeClubCode) == "LAC" & as.character(season) >= "2017" ~ "SD",
as.character(HomeClubCode) == "LAC" & as.character(season) <= "2016" ~ "LA",
TRUE ~ as.character(HomeClubCode)),
VisitorClubCode =
case_when(
as.character(VisitorClubCode) == "HOU" ~ "HST",
as.character(VisitorClubCode) == "LAR" ~ "SL",
as.character(VisitorClubCode) == "WSH" ~ "WAS",
as.character(VisitorClubCode) == "CLE" ~ "CLV",
as.character(VisitorClubCode) == "BAL" ~ "BLT",
as.character(VisitorClubCode) == "ARI" ~ "ARZ",
as.character(VisitorClubCode) == "LAC" & as.character(season) >= "2017" ~ "SD",
as.character(VisitorClubCode) == "LAC" & as.character(season) <= "2016" ~ "LA",
TRUE ~ as.character(VisitorClubCode)))
#check if we have changed all the codes in 538 correctly
fivethirtyeight_nfl_codes <- distinct(new_nfl_elo, HomeClubCode)
vector1 <- seq(1, 34, 1)
vector2 <- seq(1, 34, 1)
vector3 <- seq(1, 34, 1)
five38 <- tibble(vector1, vector2, vector3) %>%
rename(five38_data = vector1,
in_mydata = vector2,
in_known_code = vector3)
index <- 0
for (i in fivethirtyeight_nfl_codes$HomeClubCode) {
club <- i
print(club)
in_mydata <- ifelse(club %in% unique_mydata_codes$PossessionTeam, 1, 0)
in_known_code <- ifelse(club %in% club_codes$code, 1, 0)
five38$five38_data[index] <- club
five38$in_mydata[index] <- in_mydata
five38$in_known_code[index] <- in_known_code
index <- index + 1
}
#Woohoo! |
734ef59943e1712b46c26b3231e719fae66c9bcb | c3c7f2a54c4f56e8f6fdc0b34ce8915fe460d96f | /man/get_banocc_output.Rd | 487f60e97e24492d1178bd631ca934b5ce7ee330 | [
"MIT"
] | permissive | biobakery/banocc | 620b6dde0e7f6d41864b0aa411b6e0db1cf282a6 | e619c80fe49f13d50fefb6f021d672f84235b7ea | refs/heads/master | 2023-08-22T18:26:58.587398 | 2023-08-04T15:53:41 | 2023-08-04T15:53:41 | 70,067,537 | 6 | 2 | null | null | null | null | UTF-8 | R | false | true | 2,708 | rd | get_banocc_output.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_banocc_output.R
\name{get_banocc_output}
\alias{get_banocc_output}
\title{Takes a model fit from BAnOCC, evaluates convergence and generates
appropriate convergence metrics and inference}
\usage{
get_banocc_output(banoccfit, conf_alpha = 0.05, get_min_width = FALSE,
calc_snc = TRUE, eval_convergence = TRUE, verbose = FALSE,
num_level = 0)
}
\arguments{
\item{banoccfit}{Either a \code{stanfit} object (the \code{Fit} element
returned by \code{run_banocc}), or the list returned by a call to
\code{run_banocc}.}
\item{conf_alpha}{The percentage of the posterior density outside the
credible interval. That is, a \code{1-conf_alpha} * 100\% credible
interval will be returned.}
\item{get_min_width}{A boolean value: should the minimum CI width that
includes zero be calculated?}
\item{calc_snc}{Boolean: should the scaled neighborhood criterion be
calculated?}
\item{eval_convergence}{Boolean: if `TRUE`, convergence will be evaluated
using the Rhat statistic, and the fit output (estimates, credible
intervals, etc.) will be missing if this statistic does not indicate
convergence.}
\item{verbose}{Print informative statements as the function executes?}
\item{num_level}{The number of indentations to add to the output when
\code{verbose = TRUE}.}
}
\value{
Returns a named list with the following elements:
\describe{
\item{\emph{CI}}{The \code{1-conf_alpha} * 100\% credible intervals}
\item{\emph{Estimates.median}}{The correlation estimates, which are the
marginal posterior medians}
\item{\emph{Min.width}}{Only present if the \code{get_min_width}
argument is \code{TRUE}. The minimum CI width that includes zero for
each correlation.}
\item{\emph{SNC}}{Only present if the \code{calc_snc} argument is
\code{TRUE}. The scaled neighborhood criterion for each correlation.}
\item{\emph{Fit}}{The \code{stanfit} object returned by the call to
\code{run_banocc}.}
\item{\emph{Data}}{Only present if the \code{banoccfit} argument is
specified as the output of a call to \code{run_banocc}. It will be
missing if \code{banoccfit} is specified as a \code{stanfit} object.}
}
}
\description{
Takes a model fit from BAnOCC, evaluates convergence and generates
appropriate convergence metrics and inference
}
\examples{
data(compositions_null)
\dontrun{
compiled_banocc_model <- rstan::stan_model(model_code=banocc_model)
b_fit <- run_banocc(C=compositions_null,
compiled_banocc_model=compiled_banocc_model)
b_output <- get_banocc_output(banoccfit=b_fit)
}
}
\seealso{
\code{vignette("banocc-vignette")} for more examples.
}
|
ea199ea85b76b419662e57fba7b965c2d0d14b6a | 6765bbb6278f5e971c4a6d528a95753610b1d158 | /ilab part C data prep.R | 634afaf55a693fa817cbc39e2779d6aa955762d7 | [] | no_license | CorinnaMM/ilab | e11c2d00f915b7a73b603823fc94f1d826c0131b | 180dca7a0bb7a9c62b7dae9db09ba59aedc1f862 | refs/heads/master | 2020-04-02T01:31:31.407654 | 2018-10-20T09:36:34 | 2018-10-20T09:36:34 | 153,859,900 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,181 | r | ilab part C data prep.R | ####
# ilab part C data prep
####
library(lme4)
library(magrittr)
# ======= Person table
fnl_p_review_df <- readRDS("/Users/Corinna/Desktop/corinna - temp/income_imp_review16/fnl_p_review_df.rds")
# ======= trip table
trip <- read.csv("/Users/Corinna/Desktop/corinna - temp/iLAb project/ilab data/hts_hts_trp_odmatrix.csv")
## simple poisson regression
# trips_for_model <- trip %>% dplyr::mutate(count = 1, PERS_ID = paste0(SAMPLE_NO,PERSON_NO), HH_ID = SAMPLE_NO,
# WAVE = dplyr::if_else(WAVE == 2016, 0,1), month = lubridate::month(as.Date(TRAVEL_DATE,
# format = "%d/%b/%y"))) %>%
# dplyr::select(WAVE, month, TRAVEL_DATE_NUMBER, HH_ID, PERS_ID, HH_SA2_NAME, CURRENT_MODE_DESC_9, O_SA2_NAME, D_SA2_NAME, count) %>% #AGE,
# dplyr::group_by(WAVE, month, TRAVEL_DATE_NUMBER, HH_ID, PERS_ID, HH_SA2_NAME, CURRENT_MODE_DESC_9, O_SA2_NAME, D_SA2_NAME)%>% #AGE,
# dplyr::summarise(TC = sum(count)
trips_for_model <- trip %>% dplyr::filter(WAVE ==2016) %>% dplyr::mutate(count = 1, PERS_ID = paste0(SAMPLE_NO,PERSON_NO), HH_ID = SAMPLE_NO,
year = dplyr::if_else(lubridate::year(as.Date(TRAVEL_DATE,
format = "%d/%b/%y")) == 2016, 0,1), month = lubridate::month(as.Date(TRAVEL_DATE,
format = "%d/%b/%y"))) %>%
dplyr::select(year,month, TRAVEL_DATE, TRAVEL_DATE_NUMBER, HH_ID, PERS_ID, CURRENT_MODE_DESC_9,HH_SA2_NAME, count) %>%
dplyr::group_by(year, month,TRAVEL_DATE, TRAVEL_DATE_NUMBER, HH_ID, PERS_ID, CURRENT_MODE_DESC_9, HH_SA2_NAME)%>%
dplyr::summarise(TC = sum(count)) %>%
dplyr::left_join(dplyr::select(dplyr::mutate(trip,
PERS_ID = paste0(SAMPLE_NO,PERSON_NO)),
PERS_ID,DAYFACTOR,PPEX), by = "PERS_ID") %>%
dplyr::left_join(dplyr::select(dplyr::mutate(fnl_p_review_df,
PERS_ID = paste0(SAMPLE_NO,PERSON_NO)),
PERS_ID,AGE, SEX_CODE), by = "PERS_ID") %>% dplyr::filter(!(is.na(AGE))) # need to update person table to include 2017 age/sex
## add on household and are factors
hhsa2 <- rgdal::readOGR("/Users/Corinna/Desktop/corinna - temp/1270055001_sa2_2016_aust_shape", layer = "SA2_2016_AUST")
hts_sa2_map <- subset(hhsa2, SA2_5DIG16 %in% trips_for_model$HH_SA2_NAME)
plot(hts_sa2_map)
# Compute adjacency objects
nbsa2 <- spdep::poly2nb(hts_sa2_map)
grasa2 <- spdep::nb2gra(hts_sa2_map)
nbsa3 <- spdep::poly2nb(hts_sa2_map)
grasa3 <- spdep::nb2gra(hts_sa2_map)
# Fit a Poisson glmer
model1poisson <- glmer(TC ~ AGE + (TRAVEL_DATE_NUMBER-1) + (1|CURRENT_MODE_DESC_9) + (1|PERS_ID) +
(TRAVEL_DATE_NUMBER-1|HH_ID) + (1|HH_SA2_NAME), family = 'poisson', data = trips_for_model
) # weights = (DAYFACTOR*PPEX/7)
plot(model1poisson)
trips_for_model$tripPredictHHsa <- predict(model1poisson, trips_for_model)
ggplot() + theme_minimal() +
geom_point(data =trips_for_model,
aes(x = (DAYFACTOR*PPEX/7), y = TC)) +
geom_point(data = trips_for_model,
aes(x = (DAYFACTOR*PPEX/7), y = tripPredictHHsa),
color = 'blue', alpha = 0.5)
ggplot() + theme_minimal() +
geom_point(data =trips_for_model,
aes(x = AGE, y = TC, alpha = (DAYFACTOR*PPEX/7))) +
geom_point(data = trips_for_model,
aes(x = AGE, y = tripPredictHHsa),
color = 'blue', alpha = 0.5) + facet_wrap(~CURRENT_MODE_DESC_9) +geom_hline(yintercept =0)
summary(model1poisson)
# extractAndPlot(model1poisson)
# Age goes before year
# modelOut <- glmer(count ~ age + year + (year|county), family = 'poisson',
# data = ILdata)
# summary(modelOut)
modelOut <- model1poisson
# Extract out fixed effects
plot(fixef(modelOut))
# Extract out random effects
plot(ranef(modelOut))
# Run code to see one method for plotting the data
ggplot(data = trips_for_model, aes(x = (TRAVEL_DATE_NUMBER-1), y = TC, group = HH_SA2_NAME, alpha = (DAYFACTOR*PPEX/7))) +
geom_point() + facet_grid((month-1) ~ . ) +
stat_smooth( method = 'glm',
method.args = list( family = "poisson"), se = FALSE,
alpha = 0.5) +
theme_minimal()
# Run the paired-test like before
t.test(y[treat == "before"], y[treat == "after"], paired = TRUE)
# Run a repeated-measures ANOVA
anova(lmer(y ~ treat + (1|x)))
### fit a separate model for each origin destination combination
modellist <- dlply(lmm.data, .(school, class), function(x) glm(extro ~ open +
agree + social, data = x))
display(modellist[[1]])
## lets make variables that count number of trips by mode for modelling putposes
str(trip)
table(is.na(trip$CURRENT_MODE_DESC))
unique(trip$CURRENT_MODE_DESC)
trip_dm <- trip %>% dplyr::select(WAVE, SAMPLE_NO, PERSON_NO, STOP_NO, CURRENT_MODE_DESC_9, TRAVEL_DATE) %>%
dplyr::mutate(count = 1) %>% dplyr::group_by(WAVE, SAMPLE_NO, PERSON_NO, TRAVEL_DATE, CURRENT_MODE_DESC_9) %>%
dplyr::summarise(count = sum(count)) %>% dplyr::ungroup() %>% tidyr::spread("CURRENT_MODE_DESC_9","count", fill = 0)
person_trip_dm <- merge(fnl_p_review_df, trip_dm, by = c("WAVE","SAMPLE_NO", "PERSON_NO"), all.x = T)
names(person_trip_dm)
person_trip_dm <- person_trip_dm %>% dplyr::mutate(tot_trip =
(Bicycle +Bus +Ferry+ Other +Taxi +
Train + `Vehicle driver` + `Vehicle passenger` +
Walking)) %>% dplyr::mutate(tot_trip = dplyr::if_else(
is.na(tot_trip),0,tot_trip))
|
25e1ec814e7b5983759cbefc1be93977aada9222 | de8a72d48bd61f94fb81712408822aa2c0fef16d | /cachematrix.R | eabc3a995c2496d9d36c811b6d1a29fc3953e5c8 | [] | no_license | parscran/ProgrammingAssignment2 | 63a2f0ccbc8a7fd7be10d82b3a6c6714c7e04748 | 28c7e910e43c65334c5061ce7f19bcf31cd8a74f | refs/heads/master | 2021-01-19T07:07:30.283762 | 2015-01-21T08:04:26 | 2015-01-21T08:04:26 | 29,571,563 | 0 | 0 | null | 2015-01-21T04:38:29 | 2015-01-21T04:38:28 | null | UTF-8 | R | false | false | 2,719 | r | cachematrix.R | ##
## Coursera - Data Science Specilization : 2. R Programming
## Week 3 : Programming Assignment 2
##
## Author :
## parscran (parscran@gmail.com)
##
## This programming assignment is about to write an R function is able to
## cache potentially time-consuming compuatations like a Matrix inversion.
## Their may be some benefit to caching the inverse of a matrix rather than
## compute it repeatedly.
##
## Function
## - Name :
## makeCacheMatrix
## - Desciprtion :
## This creates a special "matrix" object that can cache its inverse,
## which is really a list containing a function to :
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse for the matrix
## 4. get the value of the inverse for the matrix
##
makeCacheMatrix <- function(x = matrix()) {
# Declare a local object, 'inv', for an inverse of this "matrix"
inv <- NULL
# Declare a function for setting the value of the matrix
# and initialize the inverse fot this matrix
setMtx <- function(y) {
x <<- y
inv <<- NULL
}
# Declare a function for getting the value of the matrix
getMtx <- function() x
# Declare a function for setting the value of the inverse
setInv <- function(inverse) inv <<- inverse
# Declare a function for getting the value of the inverse
getInv <- function() inv
# Return a list containing functions for the matrix and inverse
list(setMtx = setMtx,
getMtx = getMtx,
setInv = setInv,
getInv = getInv)
}
## Function
## - Name :
## cacheSolve
## - Description :
## This computes the inverse of the special "matrix" returned by
## 'makeCacheMatrix' above. If the inverse has already been calculated,
## then this function should retrieve the inverse from the cache.
##
cacheSolve <- function(x, ...) {
# Get the value of the inverse from the "makeCacheMatrix"
inv <- x$getInv()
# Check whether the inverse has already been calculated
if (!is.null(inv)) {
# Print a message for cached data
message("Getting cached data")
# Return a matrix from the cached data
return(inv)
}
# Get the value of the matrix
data <- x$getMtx()
# Calculate the value of the matrix using 'solve'
inv <- solve(data, ...)
# Set the value of the inverse for the matrix
x$setInv(inv)
# Return a matrix that is the inverse of 'x'
inv
}
|
5d0475e8b590bb5c5393b491144ad3af4a95e4bd | 788ec189c5c9d78b1e8a702a47821ca42adab401 | /man/get_locale.Rd | 532f012280a9256376f9e8cfb7a85561db7167c7 | [] | no_license | conjugateprior/rjca | e50b5c85513907a01fb96fff4879d982594c9261 | 76ac58e45684e638b337723156804ad42b41a31e | refs/heads/master | 2016-09-06T20:18:55.315629 | 2015-05-29T12:30:09 | 2015-05-29T12:30:09 | 35,891,350 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 316 | rd | get_locale.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rjca.R
\name{get_locale}
\alias{get_locale}
\title{Determine default locale}
\usage{
get_locale()
}
\value{
ISO string representation of a locale
}
\description{
Determines default locale by parsing the LC_COLLATE locale value
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.