blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da1c32131ca39777aae69fcae686ac3e954c1f2f | e92f5c95c8c17f4b2dc8d754fd212fd4fcc4c8b4 | /data-raw/oraciones.R | 6580a98f31a8ed8acf92a59583c20cc194dae5dd | [
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | cienciadedatos/datos | de1cab523ce46ed1b1d86dbb4f947ba9e58ff432 | 6008b75bfc68e2e8332fe92c37f5ec59361fa4f4 | refs/heads/main | 2023-07-20T12:01:18.032448 | 2023-07-17T12:50:37 | 2023-07-17T15:10:53 | 140,963,726 | 37 | 34 | CC0-1.0 | 2023-07-17T12:52:14 | 2018-07-14T17:07:44 | R | UTF-8 | R | false | false | 3,200 | r | oraciones.R | # From `dput(datos::oraciones)` (245b4af)
# Googling "latin small letter a with acute" and so on
# a "\u00e1"
# e "\u00e9"
# E "\u00c9"
# i "\u00ed"
# o "\u00f3"
# u "\u00fa"
#
# n \u00f1
oraciones <- c(
"Las casas est\u00e1n construidas de ladrillos de arcilla roja.",
"La caja fue arrojada al lado del cami\u00f3n estacionado.",
"El domingo es la mejor parte de la semana.",
"Agrega a la cuenta de la tienda hasta el \u00faltimo centavo.",
"Nueve hombres fueron contratados para excavar las ruinas.",
"Pega la hoja en el fondo azul oscuro.",
"Instalaron azulejos verdes en la cocina.",
"Si arrojo la taza azul al suelo se romper\u00e1.",
"Dos peces azules nadaban en el tanque.",
"El ancho camino brillaba bajo el calor del sol.",
"Una voluta de nube flotaba en el aire azul.",
"Las hojas se vuelven de color marr\u00f3n y amarillo en el oto\u00f1o.",
"\U00c9l orden\u00f3 tarta de melocot\u00f3n con helado.", # FIXME
"La mancha en el papel secante fue hecha por la tinta verde.",
"Hab\u00eda barro salpicado en la parte delantera de su camisa blanca.",
"El coj\u00edn del sof\u00e1 es de color rojo y de peso ligero.",
"El cielo de la ma\u00f1ana era claro y azul brillante.",
"El m\u00e9dico lo cur\u00f3 con estas dos pastillas.",
"Pod\u00edan re\u00edr a pesar de que estaban tristes.",
"El tercer acto era aburrido y cans\u00f3 a los actores.",
"Una grulla azul es una ave zancuda y alta.",
"Los cables expuestos deben mantenerse cubiertos.",
"El choque ocurri\u00f3 cerca del banco en la calle principal.",
"La l\u00e1mpara brillaba con una llama verde y continua.",
"El pr\u00edncipe orden\u00f3 que le cortaran la cabeza.",
"La planta creci\u00f3 grande y verde en la ventana.",
"El lazo p\u00farpura ten\u00eda diez a\u00f1os.",
"Recu\u00e9state y rel\u00e1jate en la hierba fresca y verde.",
"El lago brillaba bajo el sol c\u00e1lido y rojo.",
"Marca el lugar con un cartel pintado de rojo.",
"El humo sal\u00eda de cada grieta.",
"La cubierta del sof\u00e1 y las cortinas de la sala eran azules.",
"Ofreci\u00f3 evidencia a trav\u00e9s de tres gr\u00e1ficos.",
"Un hombre con un su\u00e9ter azul se sent\u00f3 en el escritorio.",
"El sorbo de t\u00e9 revive a su amigo cansado.",
"Una gruesa capa de pintura negra cubr\u00eda todo.",
"Dibuja el gr\u00e1fico con l\u00edneas negras gruesas.",
"La peque\u00f1a l\u00e1mpara de ne\u00f3n de color rojo se apag\u00f3.",
"Pinta los encajes en la pared de color verde opaco.",
"Despi\u00e9rtate y lev\u00e1ntate, camina hacia el verde exterior.",
"La luz verde en la caja marr\u00f3n parpadeaba.",
"Puso su \u00faltimo cartucho en la pistola y dispar\u00f3.",
"El carnero asust\u00f3 a los ni\u00f1os de la escuela.",
"Corta una delgada l\u00e1mina de la almohadilla amarilla.",
"Llovieron centavos de todos lados.",
"El cielo en el oeste se ti\u00f1e de rojo anaranjado.",
"El granizo repiqueteaba en la hierba marr\u00f3n quemada.",
"La gran manzana roja cay\u00f3 al suelo.",
"El olor de la primavera hace que los corazones j\u00f3venes salten.",
"Cada palabra y cada frase que habla es cierta."
)
use_data(oraciones, overwrite = TRUE)
|
1e0cc38d24e84ef9d55d6db4bad81026c02387ac | d8abe01116e043544cbb198ad054c7969da022eb | /readyData.R | f1791f959466e85fc1d2382137a3c3f727e775ae | [] | no_license | rpsoft/POPMI | 0b9478f1dc3f647c240acfb4280bf75f76e9e261 | 6acfa0ecf9aeb40357c40317a0afe899b86bd764 | refs/heads/master | 2023-01-21T22:54:24.921825 | 2020-12-04T23:42:46 | 2020-12-04T23:42:46 | 318,381,158 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,600 | r | readyData.R | library(tidyverse)
library(plumber)
library(future)
options(future.globals.maxSize= 891289600)
joint_probs <- readRDS("data/joint/joint_probs.rds")
joint_cvd_pop <- read_csv("data/pops/simPop2.cvd.cvd.500.14nov20.csv")
joint_mi_pop <- read_csv("data/pops/simPop2.mi.mi.500.12nov20.csv")
cvd = readRDS(file = 'data/cond/cond_prob_cvd.rds')
mi = readRDS(file = 'data/cond/cond_prob_mi.rds')
cond_probs <- list(
cvd = cvd,
mi = mi
)
weighted_risk_cvd <- read_csv("data/joint/weighted_risk_cvd.csv")
weighted_risk_mi <- read_csv("data/joint/weighted_risk_mi.csv")
# weighted_risk_cvd %>% filter( year_cat = "(1989,1994]") %>% arrange(event_1,event_2,event_3,event_4 ) %>% View
initials <- function ( terms ){
paste0(unlist(lapply ( terms, function (x){
substring(x, 1, 1)
})), collapse = "")
}
filterWeightedMSMData <- function( source, year_cat_in ){
if ( source == "mi" ){
source_data = weighted_risk_cvd
} else {
source_data = weighted_risk_mi
}
source_data <- source_data %>% filter(year_cat == year_cat_in) %>%
mutate( wr = round(wr*100,3) ) %>%
mutate( probability = wr ) %>%
mutate( event_0 = source ) %>%
mutate( event_id = "fourth_evnt") %>%
mutate( event_id = ifelse(is.na(event_4), "thrd_evnt",event_id)) %>%
mutate( event_id = ifelse(is.na(event_3), "scnd_evnt",event_id)) %>%
mutate( event_id = ifelse(is.na(event_2), "frst_evnt",event_id))
source_data <- source_data %>% select( colnames(source_data) %>% sort() )
source_data <- source_data %>% rowwise %>% mutate( eventSequence = paste0( c(event_0, event_1, event_2, event_3, event_4), collapse = ",")) %>% mutate ( eventSequence = str_replace_all(eventSequence,",NA", ""))
first_ev <- source_data %>% filter( event_id == "frst_evnt") %>% mutate( source = event_0, target=paste0("(",initials(c(event_0)),") ",event_1))
second_ev <- source_data %>% filter( event_id == "scnd_evnt") %>% mutate( source = paste0("(",initials(c(event_0)),") ",event_1), target= paste0("(",initials(c(event_0,event_1)),") ",event_2))
third_ev <- source_data %>% filter( event_id == "thrd_evnt") %>% mutate( source = paste0("(",initials(c(event_0,event_1)),") ",event_2), target=paste0("(",initials(c(event_0,event_1,event_2)),") ",event_3))
fourth_ev <- source_data %>% filter( event_id == "fourth_evnt") %>% mutate( source = paste0("(",initials(c(event_0,event_1,event_2)),") ",event_3), target=paste0("(",initials(c(event_0,event_1,event_2,event_3)),") ",event_4)) #paste0("(4) ",event_4))
all_transitions <- first_ev %>% rbind(second_ev) %>%
rbind(third_ev) %>% rbind(fourth_ev)
filtered <- source_data %>%
select(eventSequence,event_0:event_4,wr) %>%
arrange(desc(event_1),desc(event_2),desc(event_3),desc(event_4)) %>%
rowwise %>% mutate( ss = str_split(eventSequence, ",") )
root <- unlist(lapply(filtered$ss, function(x){
ss <- x[!is.na(x)]
unlist(paste0(x[1:(length(x)-1)], collapse = ","))
}))
values <- unlist(lapply(root, function(x) {
sum((filtered %>% filter( str_detect(eventSequence,x) ))$wr)
}))
transitions <- (tibble ( eventSequence = root, eventSequence2 = root, values = values) %>% distinct) %>% separate(eventSequence2, c( "event_0", "event_1", "event_2", "event_3", "event_4"), ",")
transitions <- transitions %>% mutate( year_cat = year_cat_in, wr = values, probability = values, event_id="aggr")
transitions <- transitions %>% mutate( event_id = "fourth_evnt") %>%
mutate( event_id = ifelse(is.na(event_4), "thrd_evnt",event_id)) %>%
mutate( event_id = ifelse(is.na(event_3), "scnd_evnt",event_id)) %>%
mutate( event_id = ifelse(is.na(event_2), "frst_evnt",event_id)) %>% rowwise()
first_ev <- transitions %>% filter( event_id == "frst_evnt") %>% mutate( source = event_0, target=paste0("(",initials(c(event_0)),") ",event_1)) %>% ungroup()
second_ev <- transitions %>% filter( event_id == "scnd_evnt") %>% mutate( source = paste0("(",initials(c(event_0)),") ",event_1), target= paste0("(",initials(c(event_0,event_1)),") ",event_2)) %>% ungroup()
third_ev <- transitions %>% filter( event_id == "thrd_evnt") %>% mutate( source = paste0("(",initials(c(event_0,event_1)),") ",event_2), target=paste0("(",initials(c(event_0,event_1,event_2)),") ",event_3)) %>% ungroup()
fourth_ev <- transitions %>% filter( event_id == "fourth_evnt") %>% mutate( source = paste0("(",initials(c(event_0,event_1,event_2)),") ",event_3), target=paste0("(",initials(c(event_0,event_1,event_2,event_3)),") ",event_4)) %>% ungroup()
aggr_transitions <- first_ev %>% rbind(second_ev) %>%
rbind(third_ev) %>% rbind(fourth_ev)
aggr_transitions <- aggr_transitions %>% select(all_transitions %>% colnames)
all_transitions <- all_transitions %>% rbind (aggr_transitions) %>% ungroup
all_transitions <- all_transitions %>%
arrange(desc(event_0),desc(event_1),desc(event_2),desc(event_3),desc(event_4)) %>%
mutate( target = ifelse(is.na(event_1), source , target), source = ifelse(is.na(event_1), NA , source), event_id = ifelse(is.na(event_1), "root_evnt" , event_id) ) %>%
distinct
}
filterMSMData <- function( source, sex_in, simd, hf_in, cerebro_or_mi, year_cat_in, age_in ){
if ( source == "mi" ){
source_data = cond_probs$mi
source_pop = joint_mi_pop
ids <- (source_pop %>% filter( sex == sex_in ) %>%
filter(simd_2009 == simd) %>%
filter(cerebrovasc == cerebro_or_mi) %>%
filter(hf == hf_in) %>%
filter(year_cat == year_cat_in))
} else {
source_data = cond_probs$cvd
source_pop = joint_cvd_pop
ids <- (source_pop %>% filter( sex == sex_in ) %>%
filter(simd_2009 == simd) %>%
filter(mi == cerebro_or_mi) %>%
filter(hf == hf_in) %>%
filter(year_cat == year_cat_in))
}
sel_id <- (ids %>% mutate( dff = abs(age-age_in) ) %>% arrange(dff))$id[1]
pdata <- source_data %>% ungroup %>%
filter(pid == sel_id) %>% mutate( event_0 = source) #%>% filter(event_1 == "bleeding")
pdata <- pdata %>% mutate( event_1 = as.character(event_1)) %>%
mutate( event_2 = as.character(event_2)) %>%
mutate( event_3 = as.character(event_3)) %>%
mutate( event_4 = as.character(event_4))
pdata <- pdata %>% mutate( event_1 = as.character(ifelse( is.na(event_1),"",event_1 ) )) %>%
mutate( event_2 = as.character(ifelse( is.na(event_2),"",event_2 ) )) %>%
mutate( event_3 = as.character(ifelse( is.na(event_3),"",event_3 ) )) %>%
mutate( event_4 = as.character(ifelse( is.na(event_4),"",event_4 ) ))
pdata <- pdata %>% mutate( path = paste0(event_0,"_",event_1,"_",event_2,"_",event_3,"_",event_4))
pdata <- pdata %>% mutate( path = str_replace(path,"_*$", "") )
first_ev <- pdata %>% filter( event_id == "frst_evnt") %>% mutate( source = event_0, target=paste0(event_0,"_",event_1))
second_ev <- pdata %>% filter( event_id == "scnd_evnt") %>% mutate( source = paste0(event_0,"_",event_1), target=paste0(event_0,"_",event_1,"_",event_2))
third_ev <- pdata %>% filter( event_id == "thrd_evnt") %>% mutate( source = paste0(event_0,"_",event_1,"_",event_2), target=paste0(event_0,"_",event_1,"_",event_2,"_",event_3))
fourth_ev <- pdata %>% filter( event_id == "fourth_evnt") %>% mutate( source = paste0(event_0,"_",event_1,"_",event_2,"_",event_3), target=paste0(event_0,"_",event_1,"_",event_2,"_",event_3,"_",event_4))
all_transitions <- first_ev %>% rbind(second_ev) %>% rbind(third_ev) %>% rbind(fourth_ev) %>% mutate( source = str_replace(source,"_*$", ""), target = str_replace(target,"_*$", "") )
all_transitions <- all_transitions %>% filter ( cond_prob != 1)
labels <- c(all_transitions$source, all_transitions$target) %>% unique()
all_transitions_n <- all_transitions %>% rowwise() %>% mutate( source_n = which(labels == source)[1], target_n = which(labels == target)[1] )
labels <- tibble(target=labels) %>% left_join(all_transitions_n %>% select(target,cond_prob)) %>%
mutate( ss = str_split(target,"_") ) %>% rowwise %>% mutate( labels = paste0(ss[length(ss)], ifelse(is.na(cond_prob) , "" ,paste0(" (", round(cond_prob*100,2),"%)"))) )
all_transitions_n <- all_transitions_n %>% select( all_transitions_n %>% colnames() %>% sort() )
data <- list(
data = all_transitions_n,
labels = labels$labels
)
# browser()
return(data)
}
# filterMSMData("mi", 1, 3, 1, 0, "(1989,1994]", 50) $data %>% View
|
ed4ed99c003d7bf798d16dfb4c64aff6e06123da | f78c451bc1d7892e6684f92413b20515fddc8936 | /R/get.adjacency.norm.R | e40e1d41d392acf1abf74fbc7a473793e724fc03 | [] | no_license | alexjcornish/DiseaseCellTypes | 3336f0bd93406ddad021c1ee2d6c467cc834080f | ea5d62898549eacf653f3c388d86ceb4af05eef0 | refs/heads/master | 2021-01-19T08:46:26.242215 | 2015-09-02T09:03:56 | 2015-09-02T09:03:56 | 27,534,586 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,049 | r | get.adjacency.norm.R | get.adjacency.norm <- function(
g,
edge.attr
) {
# compute a column-normalised adjacency matrix to be used to compute the RWR distance
# function outputs a row for each vertex in g and a column for each vertex in g
# a sparse matrix is always output
# g should be an undirected igraph object and edge.attr a edge attribute containing weights
# if edge.attr is NULL, then it is assumed that all edge weights equal 1
# in order to speed up the function, numbers rather than names are used to refer to vertices
# this function is designed to be as fast as possible, and therefore inputs are not checked
weights <- if (is.null(edge.attr)) 1 else get.edge.attribute(g, edge.attr)
el <- cbind(get.edgelist(g, names=F), weights) # el: edgelist with weights
el <- rbind(el, el[el[, 1] != el[, 2], c(2,1,3)])
weights.colsum <- sapply(split(el[, 3], el[, 1]), sum)
el[, 3] <- el[, 3] / weights.colsum[el[, 1]]
sparseMatrix(dims=rep(vcount(g), 2), i=el[, 2], j=el[, 1], x=el[, 3], symmetric=FALSE)
}
|
e3bc4c84f106e0a4d317f6a1a70da5aa5b4fe702 | 364c91054852403d0f124b7349e030b4bda407a1 | /dev/matchingAlgo/correlation.R | b3d18f5135e9cb448129fa8c5e4df982fd25f9ec | [] | no_license | sanathkumarbs/gradscout | d54e5ff7b6388dc3240c80fda5a130123e910735 | 9a8aca269aa10ceaadff5aa8d30064d7dc97e15b | refs/heads/master | 2021-01-11T19:05:11.470698 | 2017-05-31T05:25:26 | 2017-05-31T05:25:26 | 79,312,168 | 2 | 1 | null | 2017-04-06T01:33:21 | 2017-01-18T06:51:58 | HTML | UTF-8 | R | false | false | 432 | r | correlation.R | library(data.table) #loading data.table library in my R workspace
library(ggplot2) #loading ggplot2 library in my R workspace
library(corrplot) #loading corrplot library in my R workspace
GS <- data.table(University.Quantitative.Data) #loading dataset University.Quantitative.Data
summary(GS) #summarizing the dataframe
gs <- cor(GS) #generating the correlation values
corrplot(gs, method="number") #creating the correlation matrix |
2de49ffb9505c322c6599d4b8e24e54dc46fd5fc | af656e348cf17aaaa54ee2ec3fe83b5e7a927b39 | /Challenge_3/makeAdmixturePlot.R | 604ef5db538446daf0dd7e8e7688ae45fbd9e54d | [] | no_license | sivasubramanics/Julich_PhD | 891322a13c9d2c8e010bb386a814f56f2f4d2efd | bfb04e13ad5416ff9cfddabe36cea248b64fd68d | refs/heads/master | 2023-04-05T21:42:58.635800 | 2021-04-11T19:51:10 | 2021-04-11T19:51:10 | 356,939,155 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,565 | r | makeAdmixturePlot.R | #! /usr/bin/env Rscript
# title :makeAdmixturePlot.R
# description :This script will generate Plot for Admixture Qmatrix.
# author :c.s.sivasubramani@gmail.com
# date :11042021
# version :0.1
# usage :Rscript makeAdmixturePlot.R
# notes : This script expects additional Utility source code for processing.
# ==============================================================================
# Load the POPS utilities
source("POPSutilities.r")
# Reading Admixture Q matrix
Qmatrix = read.table("maize.3.Q")
# Reading geo coord file (2 columns separated with comma)
coord = read.table("Maize.coord")
# Output PNG File
png("AdmixtureClusters.png")
# Initial geo ploting
plot(coord, pch = 19, xlab="Longitude", ylab = "Latitude")
map(add = T, interior = F, col = "grey80")
asc.raster = "RasterMaps/North_America.asc"
# Defining grid for ancestory
grid=createGridFromAsciiRaster(asc.raster)
constraints=getConstraintsFromAsciiRaster(asc.raster,cell_value_min=0)
show.key = function(cluster=1,colorGradientsList=lColorGradients){
ncolors=length(colorGradientsList[[cluster]])
barplot(matrix(rep(1/10,10)),col=colorGradientsList[[cluster]][(ncolors-9):ncolors],main=paste("Cluster",cluster))}
layout(matrix(c(rep(1,6),2,3,4), 3, 3, byrow = FALSE), widths=c(3,1), respect = F)
par(ps = 22)
maps(matrix = Qmatrix, coord, grid, constraints, method = "max", main = "Ancestry coefficients", xlab = "Longitude", ylab = "Latitude")
par(ps = 16)
# Adding Cluster legends
for(k in 1:3){show.key(k)}
dev.off()
|
f7b12813a994b9e550ca8db3bd4fcab7e0c1d0cb | 4a0426ecc2d49c81f990d3ddc34b401f2893694f | /R/grad_descent.R | cedd78cc517b3924fc95039af1d5a152b38ccb35 | [] | no_license | Yannuo10/bis557 | 2d90922e5a05b9d3b5dae4e88cbfc666a670c867 | 913885913db2691871102904f2fcb3b24664a9f0 | refs/heads/master | 2023-02-01T05:43:09.501516 | 2020-12-19T05:05:45 | 2020-12-19T05:05:45 | 296,765,930 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 976 | r | grad_descent.R | #' @title grad_descent() function
#' @description another way to build a linear model. gradient descent for ordinary least squares
#' @param form a formula;
#' @param data a data frame used for the function;
#' @param contrasts a list of contrasts for factor variables
#' @param itr number of iteration
#' @examples
#' data(iris)
#' fit_linear_model <- grad_descent(Sepal.Length ~ ., iris, contrasts = list(Species = "contr.sum"))
#' @export
grad_descent <- function(form, data, contrasts = NULL, itr = 1e6){
df <- model.frame(form, data)
if (is.null(contrasts)) {
X <- model.matrix(form, df)
}
else (X <- model.matrix(form, df, contrasts.arg=contrasts))
y_name <- as.character(form)[2]
Y <- matrix(df[, y_name], ncol=1)
beta <- matrix(rep(1, length(colnames(X))), nrow=length(colnames(X)))
L <- 0.0001
for (i in 1:itr){
pd <- (-2)* t(X) %*% Y + 2 * t(X) %*% X %*% beta
beta <- beta - L * pd
}
ret <- list(coefficients=beta)
ret
}
|
94d1163f0aca3a00ef6a67019fba111375e3a30a | d57bfd5bbefab86d21ed46b4e15f1d489c61bcbc | /man/shypo.Rd | 85cf2b4cd036976b9601be4e12c852bdec068766 | [] | no_license | cran/smovie | 553d3d6441a762a4b538699c47d028ad4e5c995a | 9a05f94188335a1b79a98bdfa4011bbcbc3033e8 | refs/heads/master | 2021-11-24T16:55:00.478591 | 2021-10-31T04:30:02 | 2021-10-31T04:30:02 | 123,954,735 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,538 | rd | shypo.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_hypotheses.R
\name{shypo}
\alias{shypo}
\title{Testing simple hypotheses}
\usage{
shypo(
mu0 = 0,
sd = 6,
eff = sd,
n = 10,
a = mu0 + eff/2,
target_alpha = 0.05,
target_beta = 0.1,
panel_plot = TRUE,
hscale = NA,
vscale = hscale,
delta_n = 1,
delta_a = sd/(10 * sqrt(n)),
delta_eff = sd,
delta_mu0 = 1,
delta_sd = 1
)
}
\arguments{
\item{mu0}{A numeric scalar. The value of \eqn{\mu} under the null
hypothesis H0 with which to start the movie.}
\item{sd}{A positive numeric scalar. The (common) standard deviation
\eqn{\sigma} of the normal distributions of the data under the two
hypotheses.}
\item{eff}{A numeric scalar. The \emph{effect size}. The amount by which
the value of \eqn{\mu} under the alternative hypothesis is greater than
the value \code{mu0} under the null hypothesis.
That is, \code{mu1} = \code{eff} + \code{mu0}.
\code{eff} must be non-negative.}
\item{n}{A positive integer scalar. The sample size with which to start
the movie.}
\item{a}{A numeric scalar. The critical value of the test with which to
start the movie. H0 is rejected if the sample mean is greater than
\code{a}.}
\item{target_alpha}{A numeric scalar in (0,1). The target value of the
type I error to be achieved by setting \code{a} and/or \code{n}
if the user asks for this using a radio button.}
\item{target_beta}{A numeric scalar in (0,1). The target value of the
type II error to be achieved by setting \code{a} and/or \code{n}
if the user asks for this using a radio button.}
\item{panel_plot}{A logical parameter that determines whether the plot
is placed inside the panel (\code{TRUE}) or in the standard graphics
window (\code{FALSE}). If the plot is to be placed inside the panel
then the tkrplot library is required.}
\item{hscale, vscale}{Numeric scalars. Scaling parameters for the size
of the plot when \code{panel_plot = TRUE}. The default values are 1.4 on
Unix platforms and 2 on Windows platforms.}
\item{delta_mu0, delta_eff, delta_a, delta_n, delta_sd}{Numeric scalars. The
respective amounts by which the values of \code{mu0, eff, a, n} and
\code{sd} are increased (or decreased) after one click of the + (or -)
button in the parameter window.}
}
\value{
Nothing is returned, only the animation is produced.
}
\description{
A movie to illustrate statistical concepts involved in the testing
of one simple hypothesis against another. The example used is a
random sample from a normal distribution whose variance is assumed
to be known. The simple hypotheses relate to the value of the mean
\eqn{\mu}.
}
\details{
The movie is based on two plots.
The top plot shows the (normal)
probability density functions of the sample mean under the null
hypothesis H0 (mean \code{mu0}) and the alternative hypothesis H1
(mean \code{mu1}, where \code{mu1} > \code{mu0}), with the values
of \code{mu0} and \code{mu1} indicated by vertical dashed lines.
H0 is rejected if the sample mean exceeds the critical value \code{a},
which is indicated by a vertical black line.
The bottom plot shows how the probabilities of making a type I or type II
error (alpha and beta respectively) depend on the value of \code{a},
by plotting these probabilities against \code{a}.
A parameter window enables the user to change the values of \code{n},
\code{a}, \code{mu0}, \code{eff} = \code{mu1} - \code{mu0} or \code{sd}
by clicking the +/- buttons.
Radio buttons can be used either to:
\itemize{
\item{}{set \code{a} to achieve the target type I error probability
\code{target_alpha}, based on the current value of \code{n};}
\item{}{set \code{a} and (integer) \code{n} to achieve (or better) the
respective target type I and type II error probabilities of
\code{target_alpha} and \code{target_beta}.}
}
If \code{eff = 0} then a plot will be produced even though this case is
not practically meaningful. In the "set a and n to achieve target alpha
and beta" case, the plot will be the same as the case "set a and n by
hand" case.
}
\examples{
# 1. Change a (for fixed n) to achieve alpha = 0.05
# 2. Change a and n to achieve alpha <= 0.05 and beta <= 0.1
shypo(mu0 = 0, eff = 5, n = 16, a = 2.3, delta_a = 0.01)
}
\seealso{
\code{\link{movies}}: a user-friendly menu panel.
\code{\link{smovie}}: general information about smovie.
}
|
e89f0df18a1a456eb3ca87c09a2e7a877630a555 | 958cdec963f4e7ed6f2e7f264613809898ce2971 | /scripts/make_mutspec.R | 826152b13e0bc7ce8e8d8846fadba16361dd6a07 | [
"MIT"
] | permissive | merckey/mc3_icgc_variant_pipeline | 642f671a2a0e6d0d4fe93170bfff4d6a7014de17 | b01eb4b9c678467e29b899c2ed602d37d41575dc | refs/heads/master | 2020-11-24T01:22:25.745657 | 2019-10-22T17:56:49 | 2019-10-22T17:56:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,861 | r | make_mutspec.R | library(data.table)
library(UpSetR)
library(dplyr)
library(ggplot2)
library(reshape2)
library(RColorBrewer)
library(scales)
library(gplots)
#DEVELOPMENT ONLY######
#args <- c("processed_data/exome.broadbed.gaf.maf","processed_data/genome_broadbed.gaf.maf", "figures/mc3_mutspec.pdf","figures/pcawg_mutspec.pdf","processed_data/mutspecNotes.txt")
args = commandArgs(trailingOnly=TRUE)
if (length(args)!=6) {
stop("Full.tsv and output should be supplied (input file).n", call.=FALSE)
}
mc3 <- fread("processed_data/exome.broadbed.gaf.maf")
mc3 <- mc3[which(!grepl("oxog",mc3$V105)),]
pcawg <- fread("processed_data/genome_broadbed.gaf.maf",header=F,sep="\t",na.strings="NA",colClasses=list(character=c(17,33,35)))
smap = fread("/diskmnt/Projects/ICGC_MC3/ID_Mapping/MC3_PCAWG_Matched.ids.v3.txt")
ctypes = fread("/diskmnt/Projects/ICGC_MC3/ID_Mapping/PCA.sample.cancer.txt",header=F)
#Deal with PCAWG
pmaf <- merge(pcawg,smap,by.x="V12",by.y="tcga_pcawg_aliquot_id")
pmaf$char12 = substr(pmaf$mc3_exome_barcode,1,12)
pmafc <- merge(pmaf,ctypes,by.x="char12",by.y="V1")
mutspsamp <- pmafc %>% group_by(char12) %>% tally()
pmafc$tstv <- paste(pmafc$V7,">",pmafc$V9,sep="")
nonhypers <- mutspsamp[which(mutspsamp$n < 1500),]$char12
pmafc <- pmafc[which(pmafc$char12 %in% nonhypers),]
#DEAL with MC3
mc3$char12 = substr(mc3$V12,1,12)
mc3c <- merge(mc3,ctypes,by.x="char12",by.y="V1")
mutspsamp <- mc3c %>% group_by(V12) %>% tally()
mc3c$tstv <- paste(mc3c$V7,">",mc3c$V9,sep="")
mc3c <- mc3c[which(mc3c$char12 %in% nonhypers),]
#The the tally that I need.
good = c("A>C","A>G","A>T","C>A","C>G","C>T")
mytstv <- pmafc[which(pmafc$tstv %in% good),]
pcawg_tstv <- data.frame(mytstv %>% group_by(V2.y,tstv) %>% tally())
mytstv <- mc3c[which(mc3c$tstv %in% good),]
mc3_tstv <- data.frame(mytstv %>% group_by(V2.y,tstv) %>% tally())
totest <- merge(mc3_tstv,pcawg_tstv,by=c("V2.y","tstv"))
cancers <- unique(totest$V2.y)
#Cancer type level.
OUT <- NULL
TOPLOT <- NULL
for(i in cancers){
CAN <- totest[which(totest$V2.y == i),]
CAN$V2.y <- NULL
row.names(CAN) = CAN$tstv
CAN$tstv <- NULL
tCAN <- t(CAN)
cnt <- rowSums(tCAN)
tCAN[2,] = tCAN[2,]*(cnt[1]/cnt[2]) #this scales the counts to make sure we differences in shape are not due to sample count differences.
out <- chisq.test(tCAN)
# contrib <- 100*out$residuals^2/out$statistic
contrib <- 100*out$residuals^2
pf <- data.frame("Cancer"=i,t(contrib[1,]))
TOPLOT <- rbind(TOPLOT,pf)
df <- data.frame("Cancer"=i,"stat"=out$statistic,"pval"=out$p.value)
OUT <- rbind(OUT,df)
}
row.names(TOPLOT) <- TOPLOT$Cancer
TOPLOT$Cancer <- NULL
pdf("figures/balloon.mutSpec.pdf",height=6,width=6,useDingbats=F)
balloonplot(t(as.table(as.matrix(TOPLOT))), main ="ChiSq residuals^2 contribution", xlab ="", ylab="",label = FALSE, show.margins = FALSE)
dev.off()
#Look into a couple cancer typs to get numbers
i = "KICH"
##############################################################################################################3
#Now I want to do this with the unique calls
#Dont forget to remove hypermutators
full <- fread("/diskmnt/Projects/ICGC_MC3/mc3_icgc_variant_pipeline/output/full_cleaned.tsv",sep="\t", header=TRUE, na.strings="NA",colClasses=list(character=c(128,135,139)))
ctypes = fread("/diskmnt/Projects/ICGC_MC3/ID_Mapping/PCA.sample.cancer.txt",header=F)
full$match = ifelse(!is.na(full$Chromosome) & !is.na(full$"Chromosome:1"),1,0)
full$PCAWG_only = ifelse(is.na(full$Chromosome) & !is.na(full$"Chromosome:1"),1,0)
full$MC3_only = ifelse(is.na(full$"Chromosome:1") & !is.na(full$Chromosome),1,0)
fullm <- full[which(full$MC3_only == 1 | full$match == 1),]
fullp <- full[which(full$PCAWG_only == 1 | full$match == 1),]
#DEAL with PCAWG
fullp$char12 = substr(fullp$mc3_exome_barcode,1,12)
pull <- merge(fullp, ctypes, by.x="char12",by.y="V1")
mutspsamp <- pull %>% group_by(char12) %>% tally()
nonhypers <- mutspsamp[which(mutspsamp$n < 1500),]$char12 #This repesnets the 95% ile
pull$tstv <- paste(pull$"Reference_Allele:1",">",pull$"Tumor_Seq_Allele2:1",sep="")
pull <- pull[which(pull$char12 %in% nonhypers),]
#DEAL with MC3
fullm$char12 = substr(fullm$mc3_exome_barcode,1,12)
mull <- merge(fullm, ctypes, by.x="char12",by.y="V1")
mutspsamp <- mull %>% group_by(char12) %>% tally()
mull$tstv <- paste(mull$"Reference_Allele",">",mull$"Tumor_Seq_Allele2",sep="")
mull <- mull[which(pull$char12 %in% nonhypers),]
good = c("A>C","A>G","A>T","C>A","C>G","C>T")
#get counts for these
pull_tt = pull[which(pull$tstv %in% good),]
pcnt = data.frame(pull_tt %>% group_by(V2,tstv) %>% tally())
mull_tt = mull[which(mull$tstv %in% good),]
mcnt = data.frame(mull_tt %>% group_by(V2,tstv) %>% tally())
#Pull together
totull <- merge(mcnt,pcnt,by=c("V2","tstv"),all=T)
totull[is.na(totull)] <- 0
#Cancer type level.
cancers <- unique(totull$V2)
OUTULL <- NULL
TOPLOTULL <- NULL
for(i in cancers){
CAN <- totull[which(totull$V2 == i),]
CAN$V2 <- NULL
row.names(CAN) = CAN$tstv
CAN$tstv <- NULL
tCAN <- t(CAN)
cnt <- rowSums(tCAN)
tCAN[2,] = tCAN[2,]*(cnt[1]/cnt[2]) #this scales the counts to make sure we differences in shape are not due to sample count differences.
out <- chisq.test(tCAN)
# contrib <- 100*out$residuals^2/out$statistic
contrib <- 100*out$residuals^2
pf <- data.frame("Cancer"=i,t(contrib[1,]))
TOPLOTULL <- rbind(TOPLOTULL,pf)
df <- data.frame("Cancer"=i,"stat"=out$statistic,"pval"=out$p.value)
OUTULL <- rbind(OUTULL,df)
}
@SOMETHING IS GOING WRONG HERE! I DON'T' KNOW WHAT....
#####################################################################################################################33
#IT Also looks like there may be some problemsome cancertypes from the information above.
COAD,KICH,LIHC,LUAD,LUSC,OV,READ,STAD
#PCAWG sample level
mytstv <- pmafc[which(pmafc$tstv %in% good),]
pcawg_tstv_s <- data.frame(mytstv %>% group_by(char12,V2.y,tstv) %>% tally())
#MC3 samples level
mytstv <- mc3c[which(mc3c$tstv %in% good),]
mc3_tstv_s <- data.frame(mytstv %>% group_by(char12,V2.y,tstv) %>% tally())
totest_s <- merge(mc3_tstv_s,pcawg_tstv_s,by=c("char12","V2.y","tstv"),all=T)
otest_s[is.na(totest_s)] <- 0
samps <- unique(totest_s$char12)
SAMPS = NULL
for(i in samps){
S <- totest_s[which(totest_s$char12 == i),]
row.names(S) = S$tstv
code = unique(paste(S$char12,"_",S$V2.y,sep=""))
S$V2.y <- NULL
S$tstv <- NULL
S$char12 <- NULL
tS <- t(S)
print(code)
print(tS)
out <- chisq.test(tS)
df <- data.frame("ID"=code,"stat"=out$statistic,"pval"=out$p.value)
SAMPS <- rbind(SAMPS,df)
}
#This is a list of significant differences and the individual level,
samps = c("TCGA-A6-2683","TCGA-AF-2689","TCGA-BL-A13J","TCGA-AG-3885","TCGA-49-4486","TCGA-44-2659","TCGA-KN-8418")
|
00d30285617f3f666aa35dc8918582da19576195 | 2444432aa75e2a6340420a44ac91d88f204537ce | /man/gibbs_sampler.Rd | f97544ef340bfd57e2faa8ad56d9bf2684dc0a46 | [] | no_license | guhjy/bcfbma | 03bbe6aafdce9cff39722f817b90c9b9b0116e4b | fe7911143a30c49d919d7048f7646856ec8a8f82 | refs/heads/master | 2020-08-08T14:48:06.976746 | 2019-10-07T11:27:19 | 2019-10-07T11:27:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 537 | rd | gibbs_sampler.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{gibbs_sampler}
\alias{gibbs_sampler}
\title{Obtain draws from gibbs sampler}
\usage{
gibbs_sampler(overall_sum_trees_mu, overall_sum_trees_tau,
overall_sum_mat_mu, overall_sum_mat_tau, y, BIC_weights, num_iter,
burnin, num_obs, a_mu, a_tau, sigma, mu_mu_mu, mu_mu_tau, nu, lambda,
resids_mu, resids_tau, z, test_data, test_pihat, z_test, include_pi2,
num_propscores, num_test_obs)
}
\description{
Obtain draws from gibbs sampler
}
|
6d3fa7c034391717ca15faecb51e42e8a30ff9c0 | 7f2f9299ced47b047fd38491c5a49992afd3fd0e | /run_analysis.R | 2a5e484988b262c2162cc5e73f581c474b07269e | [] | no_license | hamish222/Coursera-Course-3 | edf381b5492d59e65578eb9f17bd91e22b4eaf55 | e811968f641732935c117146c54c40bb142ec49d | refs/heads/master | 2021-01-10T11:33:53.029442 | 2016-02-29T02:46:07 | 2016-02-29T02:46:07 | 52,757,817 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,580 | r | run_analysis.R | ## Set the working directory.
## setwd("C:\\Users\\Hamish\\Documents\\Educational Materials\\Coursera Data Science\\Course 3\\")
## Use source("run_analysis.R") at the command line to run this script .
## Available at https://github.com/hamish222/Coursera-Course-3/tree/master.
## Part 1
# Load test data.
testdata <- read.table("UCI HAR Dataset\\test\\X_test.txt",header=TRUE)
# Load training data.
traindata <- read.table("UCI HAR Dataset\\train\\X_train.txt",header=TRUE)
# Load and extract the variable names for the columns in testdata and traindata.
variableNames <- read.table("UCI HAR Dataset\\features.txt",header=FALSE)
variableNames <- variableNames[,2]
# Limit attention to those variables that are means or standard deviations.
variableIndices <- c(grep("mean",variableNames),grep("std",variableNames))
variableNames <- variableNames[variableIndices]
# Extract the corresponding data from testdata and traindata.
testdata <- testdata[,variableIndices]
traindata <- traindata[,variableIndices]
# Re-name the variables for the test and training data sets.
names(testdata) <- variableNames
names(traindata) <- variableNames
# Combine the avg/std data from the test and training data.
alldata <- rbind(testdata,traindata)
# Load and combine the activity data for the test and training data.
testactivities <- read.table("UCI HAR Dataset\\test\\y_test.txt",header=TRUE)
trainactivities <- read.table("UCI HAR Dataset\\train\\y_train.txt",header=TRUE)
names(testactivities) <- "activity"
names(trainactivities) <- "activity"
activities <- rbind(testactivities ,trainactivities)
# Load and combine the subject data for the test and training data.
testsubjects <- read.table("UCI HAR Dataset\\test\\subject_test.txt",header=TRUE)
trainsubjects <- read.table("UCI HAR Dataset\\train\\subject_train.txt",header=TRUE)
names(testsubjects ) <- "subject"
names(trainsubjects ) <- "subject"
subjects <- rbind(testsubjects,trainsubjects)
# Prepend the subject and activity columns to alldata.
alldata <- cbind(subjects, activities, alldata)
# Write the new data to a file.
write.table(alldata,"Course3ProjectCombinedData.txt", row.names=FALSE)
## Part 2
# Use aggregate to compute the means for each activity and subject.
summary <- aggregate(alldata, by=list(alldata$activity,alldata$subject), FUN=mean,na.rm=TRUE)
# Remove the two columns that aggregate introduces.
summary$Group.1 <- NULL
summary$Group.2 <- NULL
# Write the summary means to a file.
write.table(summary,"Course3ProjectSummaryData.txt", row.names=FALSE)
|
b0da84074d860f45426abe91edd87de46a8a0e58 | 73cbe254a53b69f07c32721a28be92970f25f758 | /load_project_data.R | c4b38b09ea745906e28e20bfecf91c20df30d092 | [] | no_license | pradhyu/ExData_Plotting1 | cd4d207696bf6d0acba2684dfd99f4034752270d | 3cdf115c040e86e5c8349af1e8cf4cead033028e | refs/heads/master | 2020-12-31T03:03:58.626195 | 2016-05-08T15:05:51 | 2016-05-08T15:05:51 | 56,355,812 | 0 | 0 | null | 2016-04-15T23:43:10 | 2016-04-15T23:43:10 | null | UTF-8 | R | false | false | 1,365 | r | load_project_data.R | # current working directory should be the repo which has the R files for this project
library(httr)
data.Url<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
# create data directory if it doesn't exists.
data.folder <- "data"
if(!file.exists(data.folder)){
dir.create(data.folder)
}
data.zip <- paste(getwd(), "/", data.folder,"/household_power_consumption.zip", sep = "")
if(!file.exists(data.zip)){
download.file(data.Url, data.zip, method="curl", mode="wb")
}
data.txt<- paste(getwd(),"/", data.folder, "/household_power_consumption.txt", sep = "")
if(!file.exists(data.txt)){
unzip(data.zip, list = FALSE, overwrite = FALSE, exdir = data.folder)
}
data.summaryFile<- paste(getwd(), "/", data.folder, "/data_summary.rds", sep = "")
if(!file.exists(data.summaryFile)){
data.loaded <- read.table(data.txt, header=TRUE, sep=";", colClasses=c("character", "character", rep("numeric",7)), na="?")
data.loaded$Time <- strptime(paste(data.loaded$Date, data.loaded$Time), "%d/%m/%Y %H:%M:%S")
data.loaded$Date <- as.Date(data.loaded$Date, "%d/%m/%Y")
data.dateRange <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data.loaded <- subset(data.loaded, Date %in% data.dateRange)
saveRDS(data.loaded, data.summaryFile)
} else {
data <- "data/data_summary.rds"
data.loaded <- readRDS(data)
}
|
6e790314905189f5143c15c6120e87aa836a7219 | f010762b3f997669ba84738fcec95e8cb5b23f60 | /allele_freqs/distance_from_H.r | ee1cdbc6f3931c5f6f8d6f38174553fedf5f17b1 | [] | no_license | nriddiford/Investigating-structural-variation-in-cancer-genomes | 7f9d26ebfad3449adee1fe6ac40ef24ca7b133b3 | 51d208f8b5e245027b31aa1e6aad4e61815e4dcb | refs/heads/master | 2021-09-05T10:17:07.740646 | 2018-01-26T10:14:21 | 2018-01-26T10:14:21 | 72,754,078 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,740 | r | distance_from_H.r | library(plotly)
library(ggplot2)
library(scales)
#setwd("/Users/Nick/iCloud/Desktop/snp_data/data")
setwd("/Users/Nick_curie/Desktop/script_test/snp_data")
path = getwd()
file.names <- dir(path, pattern =".txt")
for(i in 1:length(file.names)){
cat("Processing", file.names[i])
snps<-read.delim(file.names[i], header = T)
parts<-strsplit(file.names[i], '[.]')[[1]]
sample<-parts[1]
p <- ggplot(data = snps, aes(x = position, y = freq, colour = type ), show.legend = FALSE)
p <- p + stat_smooth(aes(fill = factor(type)), size = 0.25, alpha = 0.15, show.legend = FALSE)
p <- p + geom_line()
p <- p + scale_y_continuous(breaks = seq(0, 100, by = 25))
p <- p + facet_wrap(~chrom, scale="free_x")
p <- p + ggtitle( paste( sample ) )
outfile <- paste(sample, '_', "allele_freqs", '.pdf', sep='')
ggsave(outfile, scale = 0.9)
}
library(plotly)
library(ggplot2)
library(scales)
setwd("/Users/Nick_curie/Desktop/script_test/snp_data")
file<-'A512R21.dist_from_h.txt'
snps<-read.delim(file, header = T)
parts<-strsplit(file, '[.]')[[1]]
sample<-parts[1]
#
chrom2plot<-"2R"
#
chromosome<-subset(snps, snps$chrom == chrom2plot )
# single chrom
# ggplot
p <- ggplot(data = chromosome, aes(x = position, y = dist_ratio), show.legend = FALSE)
p <- p + stat_smooth(aes(fill = dist_ratio), size = 0.25, alpha = 0.15, show.legend = FALSE)
p <- p + geom_point(aes(colour = -dist_ratio, text = paste("count: ", count )), show.legend = FALSE)
# g <- g + scale_y_continuous(breaks = seq(0, 50, by = 10))
p <- p + ggtitle( paste( sample, chrom2plot ) )
p
# # plotly
g <- ggplot(data = chromosome, aes(x = position, y = dist_ratio), show.legend = FALSE)
g <- g + stat_smooth(aes(fill = dist_ratio), size = 0.25, alpha = 0.15, show.legend = FALSE)
g <- g + geom_point(aes(colour = -dist_ratio, text = paste("count: ", count )), show.legend = FALSE)
# g <- g + scale_y_continuous(breaks = seq(0, 50, by = 10))
g <- g + ggtitle( paste( sample, chrom2plot ) )
ggplotly(g)
# all chroms
p <- ggplot(data = snps, aes(x = position, y = dist_ratio), show.legend = FALSE)
p <- p + stat_smooth(aes(fill = dist_ratio), size = 0.25, alpha = 0.15, show.legend = FALSE)
p <- p + geom_point(aes(colour = -dist_ratio, text = paste("count: ", count )), show.legend = FALSE)
p <- p + facet_wrap(~chrom, scale="free_x")
p <- p + ggtitle( paste( sample ) )
p
# plotly
g <- ggplot(data = snps, aes(x = position, y = freq, colour = type ), show.legend = FALSE)
g <- g + stat_smooth(aes(fill = factor(type)), size = 0.25, alpha = 0.15, show.legend = FALSE)
g <- g + geom_line()
g <- g + scale_y_continuous(breaks = seq(0, 100, by = 25))
g <- g + facet_wrap(~chrom, scale="free_x")
g <- g + ggtitle( paste( sample ) )
ggplotly(p)
p
p
|
1e1b43fe43275698afc6902cbf8f097c02842797 | 37c17847af9fff4d45272b81560cdbb71f7a9135 | /Kurstag4_rskript.R | c0dd462a09ac56dfbbdba688eb3e96181f2bea0e | [] | no_license | LSBurchardt/Kurs-Statistiksoftware_R | 1df617adee82a603e4fa3f73131b80f5ddd93b6c | 6afa75454caef92d620c6cf2c8bbf13dbe2789c1 | refs/heads/master | 2020-05-24T10:09:24.695575 | 2020-05-04T14:15:04 | 2020-05-04T14:15:04 | 187,221,640 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,990 | r | Kurstag4_rskript.R | #################################################################
#Statistiksoftware R
#Dozentin: Lara S. Burchardt, l.s.burchardt@gmx.de
#
#24.05/25.05
#21.06/22.06
#
################################################################
#4. Kurstag
# 00: Pakete laden ---------------------------------------------------------------
library(tidyverse) #includes for example ggplot2, dplyr
library(psych)
library(cluster)
library(NbClust)
library(plotly)
library(nycflights13)
library(shiny)
# 01: Daten einlesen --------------------------------------------------------------
#Lade den Datensatz "Erstis"
#Lade den Datensatz "sub.df.timeseries.data" (liegt in Stine)
# 02: Fehler beheben---------------------------------------------------------------
# Führe den Code aus und verstehe und behebe (wenn möglich) die Fehler.
#1
data<-mtcars
plot(data$mpg, Data$qsec)
#2
data.2 <- iris
plot(Sepal.Length,Sepal.Width)
#3
mean(iris$Species)
#4
M <- cor(mtcars)
corrplot(M, method = "circles")
#5
output <- vector[] #leerer Vektor wird erstellt
for x in c(1:10) # Sequenz wird definiert
output[x] <- x + (x+1) # was soll getan werden?
#6
ggplot(erstis, (x= Alter, y= zuf.inh.1))+
geom_point(color= "skyblue", shape = "square")
geom_smooth(method = 'l m', se = FALSE)
#7
erstis$alter[193] <- 78
#8
mean.column <- mean(erstis[1:191,57], na.rm = TRUE)
#9
mean <- mean(erstis$alter)
if (mean > 12) {print("c is greater than 12")}
#10
letters.mat <- matrix(NA, nrow = 5, ncol = 5)
for (m in 1:6) {
for (n in 1:5){letters.mat[m,n] <- (letters[n])}
}
print(letters.mat)
# 03: Text Mining
library(tm)
library(pdftools)
library(wordcloud)
file.location <- "Wahlprogramme/fdp.pdf"
txt <- pdf_text(file.location)
#Corpus erstellen
txt_corpus <- Corpus(VectorSource(txt))
# Corpus aufreinigen
txt_corpus <- tm_map(txt_corpus, tolower) #alles klein geschrieben
txt_corpus <- tm_map(txt_corpus, removePunctuation) #Interpunktion entfernt
txt_corpus <- tm_map(txt_corpus, stripWhitespace) # Leerzeichen löschen
#Stop Wörter entfernen
txt_corpus <- tm_map(txt_corpus, removeWords, stopwords("de"))
# Inhalt des Corpus angucken
txt_corpus$content
# Dokument-TErm-Matrix erstellen
dtm <- DocumentTermMatrix(txt_corpus)
dtm <- as.matrix(dtm)
number.occurences <- colSums(dtm)
number.occurences <- sort(number.occurences, decreasing = TRUE)
#plot WordCloud
wordcloud(head(names(number.occurences),30), head(number.occurences, 30), scale = c(2,1))
# Erstelle anhand der erhaltenen Wordcloud einen Vektor mit weiteren Stopwörtern und entferne auch diese.
# Lass die Analyse erneut laufen und dir eine neue Wordcloud ausgeben.
# 04: Plotly -------------------------------------------------------
# Daten laden: "Daten/timeseries_example.rds"
a <- list(title = "Date",
showticklabels = TRUE)
g<- list(
title = "Temperature [deg C]",
showticklabels = TRUE)
p2<-plot_ly(sub.df.timeseries.data, name = "Mean Temperature", showlegend = FALSE) %>%
add_trace(x= ~sub.df.timeseries.data$date_mean, y= ~sub.df.timeseries.data$Tem_mean,type= 'scatter',
mode= 'markers', marker = list(symbol= "circle", size=4, color="grey"),
#error_y= ~list(type = "data", array=df.sub$Tem_sd,color= "grey"),
hoverinfo = 'text',
text = ~paste('</br> Mean Temp',
'</br> Date', sub.df.timeseries.data$date_mean,
'</br> Mean: ', round(sub.df.timeseries.data$Tem_mean, digits= 2) ,
'</br> Max: ', round(sub.df.timeseries.data$Tem_max, digits = 2),
'</br> Min: ', round(sub.df.timeseries.data$Tem_min, digits = 2))) %>%
layout(xaxis = a, yaxis = g, autosize = FALSE)
# Baue einen interaktiven Plot für einen Datensatz und eine Variablenkombination deiner Wahl
# 05: Shiny -----------------------------------------------------------
#https://shiny.rstudio.com/
# Öffne eine neue Datei im File Tab. Wähle dafür die Option "Shiny Web App..."
# Wie ist die App aufgebaut? Welche Teile sind zu erkennen und was macht die App?
# Probiert ein wenig herum, z.B: fügt einen weiteren Slider hinzu, erzeugt ein zweites Histogram mit
# den Daten aus dem zweiten Slider (der Datensatz "faithful" der verwendet wird, enthält eine zweite Variable)
# 06: R Markdown -------------------------------------------------------
# Oeffnet ein neues File und wählt die Option "R Markdown". Füllt die notwendigen Felder aus und erstellt das Dokument.
---
title: "R Kurs"
author: "Lara S. Burchardt"
date: "20 Juni 2019"
output: word_document
---
# Text wird einfach eingegeben. Ein Code Chunk wird mit ``` gestartet und beendet.
# Nach dem Start steht mindestens {r} hier können auch weitere Optionen eingefügt werden, der Chunk kann benannt werden
# und es kann angegeben werden, ob man den R Code im Dokument sehen soll (default bzw. include = TRUE) oder nicht (include = FALSE)
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
## R Markdown
This is an R Markdown document. Markdown is a simple formatting syntax for authoring HTML, PDF, and MS Word documents. For more details on using R Markdown see <http://rmarkdown.rstudio.com>.
When you click the **Knit** button a document will be generated that includes both content as well as the output of any embedded R code chunks within the document. You can embed an R code chunk like this:
```{r cars}
summary(cars)
```
## Including Plots
You can also embed plots, for example:
# Soll das Output gezeigt werden, aber nicht der Code, müssen die Optionen include = TRUE (default, kann also weggelassen werden) und echo = FALSE gesetzt werden.
```{r pressure, echo=FALSE}
plot(pressure)
```
Note that the `echo = FALSE` parameter was added to the code chunk to prevent printing of the R code that generated the plot.
# Erstellt ein R Markdown Dokument. Stellt euch hier ein Dokument zusammen, dass übersichtlich die Lerninhalte des Kurses darstellt.
# Erklärungen, Codeschnippsel und Beispielplots oder Tabellen z.B zum:
# 1) laden und speichern von Daten,
# 2) Datenformate, wie sie erstellt werden und wie das Output aussieht
# 3) Hilfe in R
# 4) ein Scatterplot in ggplot
# 5) ein Boxplot in ggplot
# 6) ein Histogram
# 7) weitere verwendete oder fuer euch wichtige Plots
# 8) Datenmanagment (filter, select, group_bym summarize_at, summarize_all, etc.)
# 9) Haufigkeitstabellen
# 10) lineare Regression
# 11) Korrelation
# 12) t test und ANOVA
# 13) for loops
# 14) if Abfragen
# 15) PCA und Faktoranalyse
# 16) Clusteranalyse
# 17) Hinweise zur Behebung von Fehlern
# 18) weitere Möglichkeiten in R (textmining, plotly, shiny)
# Beendet dieses Dokument als zweite Hausaufgabe. Das Dokument ist eure Hilfestellung für weitere Arbeiten in R
# und sollte deshalb so detailiert sein, wie ihr es gerne hättet und die Themen beinhalten, die für euch am wichtigsten sind.
# Abgabe: klären wir im Kurs. |
c429eecdb1fc75700def1cd15ff6bc1959194c02 | 6ac9cab2e20f60a877a88a7e85998240a1a13b32 | /exercises/EDA/CHIS.R | a2416e24f42b101d6a32087746c6d552848d2a6b | [] | no_license | ankit-sharma90/SpringboardDSCourse | 365b632efc1d44c94f0cf040a573d96be1f35c78 | a5cad3b0639055becda04b2ddb464c315f43bf95 | refs/heads/master | 2023-03-01T19:46:47.188840 | 2021-02-09T19:30:37 | 2021-02-09T19:30:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 711 | r | CHIS.R | # Plot so far
p
# Position for labels on y axis (don't change)
index <- DF_all$xmax == max(DF_all$xmax)
DF_all$yposn <- DF_all$ymin[index] + (DF_all$ymax[index] - DF_all$ymin[index])/2
# Plot 1: geom_text for BMI (i.e. the fill axis)
p1 <- p %+% DF_all +
geom_text(aes(x = max(xmax),
y = yposn,
label = FILL),
size = 3, hjust = 1,
show.legend = FALSE)
p1
# Plot 2: Position for labels on x axis
DF_all$xposn <- DF_all$xmin + (DF_all$xmax - DF_all$xmin)/2
# geom_text for ages (i.e. the x axis)
p1 %+% DF_all +
geom_text(aes(x = xposn, label = X),
y = 1, angle = 90,
size = 3, hjust = 1,
show.legend = FALSE) |
143960dfc4a0c1a87d5546c3bbf514cc640d05d0 | 1b13ba6821b4d9780cf8faceccb465c9e1255b08 | /R/Scripts/get_saker.R | 835eb91e0662f952b4da86dee588d442a42e8e5a | [] | no_license | emanlapponi/storting | 7213ad63f3a5868dc2e0f686780e393545b34b16 | b043d54ca25b4956c120580267e9b227c88f0fd0 | refs/heads/master | 2021-01-19T03:00:20.578031 | 2017-06-19T11:50:26 | 2017-06-19T11:50:26 | 54,019,041 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 552 | r | get_saker.R | rm(list = ls());gc();cat("\014")
library(dplyr); library(pbmcapply)
# Loading data ######
dagsorden <- read.csv2("./Data/dagsorden.csv", stringsAsFactors = FALSE)
url_base <- "https://data.stortinget.no/eksport/sak?sakid="
path <- "/media/martin/Data/saker_raw/"
done <- sort(gsub("[^0-9]", "", list.files(path)))
sak_ids <- sort(as.character(unique(dagsorden$sak_id)))
sak_ids <- sak_ids[which((sak_ids %in% done)==FALSE)]
for(i in sak_ids){
system(paste0("wget -O ", path, "sak_", i, ".xml ", url_base, i, " "))
Sys.sleep(abs(2+rnorm(1)))
}
|
767fd649d538c14085381fe0f7acccf3947691c1 | 276bd8306aeb9fc90c5a63f57ef21407a59dddd1 | /plot6.R | 9fad0d44b182c385a35cfc3fde45a9eeae2f28d7 | [] | no_license | MichaelSzczepaniak/ParticulateMatterStudy1999to2008 | cbcd61fc4d62994903d2b217843e5e4c2dc4bacc | 2003f8f8d6acad8edcf05bcf835d5544aa93d117 | refs/heads/master | 2016-09-06T00:55:18.122313 | 2015-08-23T23:16:48 | 2015-08-23T23:16:48 | 40,325,000 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,018 | r | plot6.R | ##
## Generates a 1 x 2 panel plot of the PM25 motor vehicle emissions
## normalized by 1999 Emissions levels for Baltimore city and Los Angeles
## county from 1999 to 2008. For details see:
## https://github.com/MichaelSzczepaniak/ParticulateMatterStudy1999to2008
##
## The criteria used for determine which records are considered
## "motor vehicle sources" is any record with an EI.Sector field value that
## contains the string "mobile" followed by the string "road" ignoring case.
##
## Sources should be subsetted to only include those that are common in each
## of the measurement years (1999, 2002, 2005, and 2008). Subsetting was not
## done here in order to allow the ON-ROAD measurement to show up in the plot
## as advised in this forum thread:
## https://class.coursera.org/exdata-031/forum/thread?thread_id=132
##
library(dplyr)
library(ggplot2)
## Load function that normalizes the NEI dataframe by grabbing records with
## sources that are common across the years 1999, 2002, 2005, 2008.
## This function is not being called because when it is, all ON-ROAD sources
## get dropped due to these sources not existing in 2008. I leave the code in
## because I believe it is the more correct way to do the analysis, but from
## the discussion boards, no one else seems to be doing it this way so I don't
## want to cause any confusion when peer review time comes around.
normalizeNEI <- function(nei) {
nei1999 <- filter(nei, year == 1999)
scc1999 <- unique(nei1999$SCC)
nei2002 <- filter(nei, year == 2002)
scc2002 <- unique(nei2002$SCC)
nei2005 <- filter(nei, year == 2005)
scc2005 <- unique(nei2005$SCC)
nei2008 <- filter(nei, year == 2008)
scc2008 <- unique(nei2008$SCC)
sccCommon <- intersect(scc1999, scc2002)
sccCommon <- intersect(sccCommon, scc2005)
sccCommon <- intersect(sccCommon, scc2008)
# sccCommon contains only the sources common to all 4 time periods
normalizedNEI <- filter(nei, SCC %in% sccCommon)
return(normalizedNEI)
}
getNeiSummary <- function(file = "summarySCC_PM25.rds") {
# save time if function has been executed already and NEI is in workspace
if(!exists("NEI")) {
NEI <- readRDS("summarySCC_PM25.rds")
}
NEI <- readRDS("summarySCC_PM25.rds")
sourceClasses <- readRDS("Source_Classification_Code.rds")
# get indices of motor vehicle sources as described above
motorVehicleIndices <- grep("(mobile)(.*)(road)",
sourceClasses$EI.Sector, ignore.case=T)
sccValues <- sourceClasses$SCC[motorVehicleIndices]
# get the Baltimore and Los Angeles data and group them by year
allMotor <- filter(NEI, SCC %in% sccValues)
motorBalt <- filter(allMotor, fips == "24510")
motorLA <- filter(allMotor, fips == "06037")
motorBaltByYear <- group_by(motorBalt, year)
motorLAByYear <- group_by(motorLA, year)
# free some memory
# rm(NEI)
rm(allMotor)
rm(motorBalt)
rm(motorLA)
# total the Baltimore emissions and add nomalized emissions column
emissionsBalt <- summarise(motorBaltByYear,
Total_Emissions = sum(Emissions, na.rm = TRUE))
emissionsBalt <- mutate(emissionsBalt, City = "Baltimore")
emissionsBalt <- emissionsBalt[, c(1, 3, 2)] # make Total_Emissions last col
# add Baltimore normalized emissions column
emissions1999 <- emissionsBalt$Total_Emissions[1]
# emissionsBalt <- mutate(emissionsBalt,
# Normalized_Emissions = (Total_Emissions /emissions1999))
emissionsBalt <- mutate(emissionsBalt,
Normalized_Emissions = (Total_Emissions))
# total the Los Angeles emissions and add nomalized emissions column
emissionsLA <- summarize(motorLAByYear,
Total_Emissions = sum(Emissions, na.rm = TRUE))
emissionsLA <- mutate(emissionsLA, City = "Los Angeles")
emissionsLA <- emissionsLA[, c(1, 3, 2)] # make Total_Emissions last col
# add normalized LA emissions column
emissions1999 <- emissionsLA$Total_Emissions[1]
# emissionsLA <- mutate(emissionsLA,
# Normalized_Emissions = (Total_Emissions /emissions1999))
emissionsLA <- mutate(emissionsLA,
Normalized_Emissions = (Total_Emissions))
# combine the Baltimore and LA data so we can plot them together
emissionsCombined <- rbind(emissionsBalt, emissionsLA)
emissionsCombined <- rename(emissionsCombined, Year = year)
emissionsCombined <- arrange(emissionsCombined, desc(City), Year)
emissionsCombined <- mutate(emissionsCombined, City = factor(City))
return(emissionsCombined)
}
createPanelPlots6 <- function(file = "plot6.png", width = 720, height = 500,
units = "px") {
emissionsCombined <- getNeiSummary()
png(file = file, width = width, height = height, units = units)
g <- ggplot(emissionsCombined,
aes(x = Year, y = Normalized_Emissions, shape = City, group = City))
plot <- g + geom_point(size = 4)
#plot <- plot + geom_smooth(size=1, linetype=3, method="lm", se=FALSE) # most folks in forum don't like this, so comment out
# plot <- plot + facet_grid(. ~ City) + geom_line()
plot <- plot + facet_wrap(~ City, nrow = 1, ncol = 2, scales = "free")
plot <- plot + geom_line()
plot <- plot + ggtitle(" Motor Vehicle Emissions 1999 to 2008: Baltimore vs. LA")
plot <- plot + coord_cartesian(xlim=c(1998, 2009))
plot <- plot + scale_x_continuous(breaks=seq(1999, 2008, 3))
# plot <- plot + scale_y_continuous(breaks=c(seq(0.2, 1.2, 0.2)))
plot <- plot + labs(y = expression(PM[2.5] * "Emissions (in tons)"))
# make the fonts a bigger so everything is more readable
plot <- plot + theme(text = element_text(size=16),
axis.text.x = element_text(angle=90, vjust=1))
plot <- plot + theme(legend.position="none")
print(plot)
dev.off()
}
createPanelPlots6() |
91dab45f8d450fe4427138bc47a70ed87d32fef7 | 0e0fb77df671d38ab424ac14d9fc91b7fd0d1318 | /R/rrna.R | f716aefae920c571b183b1aa7e071eb1381e0fb4 | [] | no_license | giraola/taxxo | 7d30f52474d6113e288dfa42297365c0e5ecd5ef | ba9a33a357b6267e11857659b28156f7540d92b1 | refs/heads/master | 2021-01-13T16:56:52.251231 | 2017-09-13T20:58:41 | 2017-09-13T20:58:41 | 77,538,782 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 8,753 | r | rrna.R | #' rrna
#'
#' Outputs a individual rRNA gene sequences and a multiple sequence alignment.
#' @param path is the full path to where genomes in fasta format are placed.
#' @param pattern a pattern (like '.fasta') for recognizing the genome files.
#' @param outdir a name for the output directory that will be created for results.
#' @param subunit takes '16S' (default) or '23S'.
#' @param multiple takes a logical (default, FALSE) for managing multiple copies of rRNA genes. If more than one copy is found, FALSE will return one random copy while TRUE will return all of them.
#' @param distance takes a logical (default, TRUE) indicating if a distance matrix is calculated and outputed or not.
#' @param phylogeny takes a logical (default, TRUE) indicating if a Neighbor-Joining phylogeny is constructed or not.
#' @param kingdom takes 'bacteria' (default) or 'archaea'.
#' @param align takes a logical (default, TRUE) indicating if sequence alignment is performed.
#' @keywords rRNA 16S 23S
#' @export
#' @examples
#' rrna(align=T,kingdom='bacteria',multiple=F,outdir='./test_genomes/rrna_result',path='./test_genomes/',pattern='.fna',subunit='16S',phylogeny=TRUE)
rrna<-function(path,
pattern,
outdir='rrna_out',
subunit='16S',
kingdom='bacteria',
align=TRUE,
multiple=FALSE,
distance=TRUE,
phylogeny=TRUE,
...)
{
# Options #
options(getClass.msg=FALSE,warn=1)
gw<-getwd()
barrnap<-paste(system.file('barrnap',package='taxxo'),'/common/bin/barrnap',sep='')
# Dependencies #
suppressMessages(require(seqinr,quietly=T))
#suppressMessages(require(msa,quietly=T))
suppressMessages(require(phangorn,quietly=T))
# Check input #
if (kingdom=='bacteria'){
king<-'bac'
} else if (kingdom=='archaea'){
king<-'arc'
} else {
stop("ERROR: Parameter kingdom must be 'archaea' or 'bacteria'")
}
if (!subunit%in%c('16S','23S','both')){
stop("ERROR: Parameter subunit must be '16S', '23S' or 'both'")
}
# Get genomes #
flist<-list.files(path=path,pattern=pattern,full.names=T)
fnams<-list.files(path=path,pattern=pattern)
outfiles<-paste(gsub(pattern,'',fnams),'.',subunit,'.fasta',sep='')
# Create output directory #
system(paste('mkdir',outdir))
system('touch rrna.err')
# Perform rRNA search with barrnap #
for (f in 1:length(flist)){
outgff<-paste(gsub('.fasta','',fnams[f]),'.tmp.gff',sep='')
cmd<-paste(barrnap,' --kingdom ',king,' ',flist[f],' > ',outgff,sep='')
system(cmd,ignore.stderr=T)
genome<-read.fasta(flist[f])
sequen<-lapply(getSequence(genome),toupper)
snames2<-gsub('>','',system(paste("grep '>' ",flist[f],sep=''),intern=T))
snames<-unlist(lapply(snames2,function(x){strsplit(x,' ')[[1]][1]}))
if (file.info(outgff)$size<=16){
mssg<-paste('No ',subunit,' gene found in genome ',fnams[f],sep='')
cat(mssg,file='rrna.err',append=T,sep='\n')
warning(mssg)
} else {
gff<-read.table(outgff,sep='\t',skip=1)
sun<-as.vector(gff[,9])
if (subunit=='16S'){
grp<-grep(subunit,sun)
if (length(grp)==0){
mssg<-paste('No ',subunit,' gene found in genome ',fnams[f],sep='')
cat(mssg,file='rrna.err',append=T,sep='\n')
warning(mssg)
} else if (length(grp)==1){
ini<-gff[grp,4]
fin<-gff[grp,5]
std<-gff[grp,7]
nam<-as.vector(gff[grp,1])
contig<-which(snames==nam)
if (std=='+'){
gene<-sequen[[contig]][ini:fin]
} else if (std=='-'){
gene<-toupper(rev(comp(sequen[[contig]][ini:fin])))
}
write.fasta(gene,names=gsub('.fasta','',outfiles[f]),file=outfiles[f])
system(paste('mv *.16S.fasta',outdir))
} else if (length(grp)>1){
if (multiple==F){
ini<-gff[grp[1],4]
fin<-gff[grp[1],5]
std<-gff[grp[1],7]
nam<-as.vector(gff[grp[1],1])
contig<-which(snames==nam)
if (std=='+'){
gene<-sequen[[contig]][ini:fin]
} else if (std=='-'){
gene<-toupper(rev(comp(sequen[[contig]][ini:fin])))
}
write.fasta(gene,names=gsub('.fasta','',outfiles[f]),file=outfiles[f])
system(paste('mv *.16S.fasta',outdir))
} else if (multiple==T){
gff2<-gff[grp,]
ini<-gff2[,4]
fin<-gff2[,5]
std<-as.vector(gff2[,7])
nam<-as.vector(gff2[,1])
for (g in 1:length(grp)){
contig<-which(snames==nam[g])
if (as.vector(as.vector(std[g]))=='+'){
gene<-sequen[[contig]][ini[g]:fin[g]]
} else {
gene<-toupper(rev(comp(sequen[[contig]][ini[g]:fin[g]])))
}
write.fasta(gene,names=gsub('fasta',g,outfiles[f]),file=outfiles[f],open='a')
}
system(paste('mv *.16S.fasta',outdir))
}
}
} else if (subunit=='23S'){
grp<-grep(subunit,sun)
if (length(grp)==0){
mssg<-paste('No ',subunit,' gene found in genome ',fnams[f],sep='')
cat(mssg,file='rrna.err',append=T,sep='\n')
warning(mssg)
} else if (length(grp)==1){
ini<-gff[grp,4]
fin<-gff[grp,5]
std<-gff[grp,7]
nam<-as.vector(gff[grp,1])
contig<-which(snames==nam)
if (std=='+'){
gene<-sequen[[contig]][ini:fin]
} else if (std=='-'){
gene<-toupper(rev(comp(sequen[[contig]][ini:fin])))
}
write.fasta(gene,names=gsub('.fasta','',outfiles[f]),file=outfiles[f])
system(paste('mv *.23S.fasta',outdir))
} else if (length(grp)>1){
if (multiple==F){
ini<-gff[grp[1],4]
fin<-gff[grp[1],5]
std<-gff[grp[1],7]
nam<-as.vector(gff[grp[1],1])
contig<-which(snames==nam)
if (std=='+'){
gene<-sequen[[contig]][ini:fin]
} else if (std=='-'){
gene<-toupper(rev(comp(sequen[[contig]][ini:fin])))
}
write.fasta(gene,names=gsub('.fasta','',outfiles[f]),file=outfiles[f])
system(paste('mv *.23S.fasta',outdir))
} else if (multiple==T){
gff2<-gff[grp,]
ini<-gff2[grp,4]
fin<-gff2[grp,5]
std<-as.vector(gff2[grp,7])
nam<-as.vector(gff2[grp,1])
for (g in 1:length(grp)){
contig<-which(snames==nam[g])
if (as.vector(std[g])=='+'){
gene<-sequen[[contig]][ini[g]:fin[g]]
} else {
gene<-toupper(rev(comp(sequen[[contig]][ini[g]:fin[g]])))
}
write.fasta(gene,names=gsub('fasta',g,outfiles[f]),file=outfiles[f],open='a')
}
system(paste('mv *.23S.fasta',outdir))
}
}
} else {
stop('Parameter subunit must be "16S" or "23S"')
}
}
}
system('rm -rf *.tmp.gff')
system(paste('mv rrna.err',outdir))
# Align sequences #
if (align==TRUE){
setwd(outdir)
if (subunit=='16S'){
system('cat *.16S.fasta > all.16S.fasta')
#namali<-gsub('>','',system("grep '>' all.16S.fasta",intern=T))
cmd<-paste(clustalo,
' -i all.16S.fasta -o alignment.16S.fasta ',
'--threads ',proc,
' --output-order=input-order',
sep='')
system(cmd)
#aux<-capture.output(
#alignment<-msa(inputSeqs='all.16S.fasta',method='ClustalOmega',type='dna',order='input'))
#aliconver<-msaConvert(alignment,type='seqinr::alignment')
#seqs<-lapply(aliconver$seq,s2c)
#write.fasta(seqs,names=namali,file='alignment.16S.fasta')
} else if (subunit=='23S'){
system('cat *.23S.fasta > all.23S.fasta')
cmd<-paste(clustalo,
' -i all.23S.fasta -o alignment.23S.fasta ',
'--threads ',proc,
' --output-order=input-order',
sep='')
system(cmd)
#namali<-gsub('>','',system("grep '>' all.23S.fasta",intern=T))
#aux<-capture.output(
#alignment<-msa(inputSeqs='all.23S.fasta',method='ClustalO',type='dna',order='input'))
#aliconver<-msaConvert(alignment,type='seqinr::alignment')
#seqs<-lapply(aliconver$seq,s2c)
#write.fasta(seqs,names=namali,file='alignment.23S.fasta')
}
}
# Distance matrix #
if (distance==TRUE){
aliconver$nam<-namali
dista1<-dist.alignment(aliconver)
dista2<-round(dista1^2,digits=3)
dnam<-paste('distance_matrix_',subunit,sep='')
assign(dnam,dista2)
save(list=dnam,file=paste(dnam,'Rdata',sep='.'))
}
# Phylogeny #
if (phylogeny==TRUE){
phydat<-msaConvert(alignment,'phangorn::phyDat')
distma<-dist.ml(phydat,model='F81')
tre<-NJ(distma)
write.tree(tre,file=paste('NJ.',subunit,'.tree.nwk',sep=''))
}
setwd(gw)
}
|
b40583aa54a76098aceb2afcf544acfe396d207a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/GPareto/examples/crit_EMI.Rd.R | 3684e95d533f0d9c2904d965e8c8eb7414b9a100 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,503 | r | crit_EMI.Rd.R | library(GPareto)
### Name: crit_EMI
### Title: Expected Maximin Improvement with m objectives
### Aliases: crit_EMI
### ** Examples
#---------------------------------------------------------------------------
# Expected Maximin Improvement surface associated with the "P1" problem at a 15 points design
#---------------------------------------------------------------------------
set.seed(25468)
library(DiceDesign)
n_var <- 2
f_name <- "P1"
n.grid <- 21
test.grid <- expand.grid(seq(0, 1, length.out = n.grid), seq(0, 1, length.out = n.grid))
n_appr <- 15
design.grid <- round(maximinESE_LHS(lhsDesign(n_appr, n_var, seed = 42)$design)$design, 1)
response.grid <- t(apply(design.grid, 1, f_name))
Front_Pareto <- t(nondominated_points(t(response.grid)))
mf1 <- km(~., design = design.grid, response = response.grid[,1])
mf2 <- km(~., design = design.grid, response = response.grid[,2])
EMI_grid <- apply(test.grid, 1, crit_EMI, model = list(mf1, mf2), paretoFront = Front_Pareto,
critcontrol = list(nb_samp = 20))
filled.contour(seq(0, 1, length.out = n.grid), seq(0, 1, length.out = n.grid), nlevels = 50,
matrix(EMI_grid, nrow = n.grid), main = "Expected Maximin Improvement",
xlab = expression(x[1]), ylab = expression(x[2]), color = terrain.colors,
plot.axes = {axis(1); axis(2);
points(design.grid[,1], design.grid[,2], pch = 21, bg = "white")
}
)
|
bd2d858fa76beb70204eadb68c2cd4963c9c713c | 5bd4b82811be11bcf9dd855e871ce8a77af7442f | /gap/man/LD22.Rd | a8a1d539c814cd8bbce44a375db108611577b273 | [] | no_license | jinghuazhao/R | a1de5df9edd46e53b9dc90090dec0bd06ee10c52 | 8269532031fd57097674a9539493d418a342907c | refs/heads/master | 2023-08-27T07:14:59.397913 | 2023-08-21T16:35:51 | 2023-08-21T16:35:51 | 61,349,892 | 10 | 8 | null | 2022-11-24T11:25:51 | 2016-06-17T06:11:36 | R | UTF-8 | R | false | true | 1,505 | rd | LD22.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/2ld.R
\name{LD22}
\alias{LD22}
\title{LD statistics for two diallelic markers}
\usage{
LD22(h, n)
}
\arguments{
\item{h}{a vector of haplotype frequencies.}
\item{n}{number of haplotypes.}
}
\value{
The returned value is a list containing:
\itemize{
\item h the original haplotype frequency vector.
\item n the number of haplotypes.
\item D the linkage disequilibrium parameter.
\item VarD the variance of D.
\item Dmax the maximum of D.
\item VarDmax the variance of Dmax.
\item Dprime the scaled disequilibrium parameter.
\item VarDprime the variance of Dprime.
\item x2 the Chi-squared statistic.
\item lor the log(OR) statistic.
\item vlor the var(log(OR)) statistic.
}
}
\description{
LD statistics for two diallelic markers
}
\details{
It is possible to perform permutation test of \eqn{r^2} by re-ordering the genotype through
R's sample function, obtaining the haplotype frequencies by \code{\link{gc.em}}
or \code{\link{genecounting}}, supplying the estimated haplotype frequencies to
the current function and record x2, and comparing the observed x2 and that from the
replicates.
}
\note{
extracted from 2ld.c, worked 28/6/03, tables are symmetric do not fix, see kbyl below
}
\examples{
\dontrun{
h <- c(0.442356,0.291532,0.245794,0.020319)
n <- 481*2
t <- LD22(h,n)
}
}
\references{
\insertRef{zabetian03}{gap}
\insertRef{zapata97}{gap}
}
\seealso{
\code{\link{LDkl}}
}
\author{
Jing Hua Zhao
}
\keyword{models}
|
e95c0a9ee76cf6d7f958b0def87cd518906e8209 | 68fbefe55a62483086fb54bb516b069cef3229f9 | /global.R | fbb481469092bb713ec78142b823e3867f7f1336 | [] | no_license | Argaadya/shiny_covid | f9ee81527662d5fbb77a11761520882403fa01a9 | a2afc1acee1ec54beb343989e3b67975756549dc | refs/heads/master | 2022-12-19T02:31:40.571583 | 2020-09-18T08:05:06 | 2020-09-18T08:05:06 | 292,202,042 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,309 | r | global.R | library(shiny)
library(shinydashboard)
library(dashboardthemes)
# data wrangling
library(dplyr)
library(tidyr)
library(lubridate)
# visualization
library(ggplot2)
library(plotly)
library(scales)
library(glue)
library(leaflet)
# Import Data from John Hopkins University
case_confirmed <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv",
check.names = F)
case_recover <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_recovered_global.csv",
check.names = F)
case_death <- read.csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv",
check.names = F)
# Data Cleansing
## Active Cases
case_confirmed <- case_confirmed %>%
pivot_longer(-c(`Province/State`, `Country/Region`, Lat, Long),
names_to = "Date", values_to = "Case") %>%
mutate(
Date = mdy(Date)
)
## Recovery
case_recover <- case_recover %>%
pivot_longer(-c(`Province/State`, `Country/Region`, Lat, Long),
names_to = "Date", values_to = "Recover") %>%
mutate(
Date = mdy(Date)
)
## Death
case_death <- case_death %>%
pivot_longer(-c(`Province/State`, `Country/Region`, Lat, Long),
names_to = "Date", values_to = "Death") %>%
mutate(
Date = mdy(Date)
)
## Merge/Join data.frame
covid <- case_confirmed %>%
left_join(case_recover) %>%
left_join(case_death) %>%
mutate(
cfr = Death/Case
) %>%
rename(State = `Province/State`,
Country = `Country/Region`
) %>%
filter(!(Country %in% c("Diamond Princess", "MS Zaandam")))
# Data from the latest date only
covid_update <- covid %>%
filter(Date == max(Date))
# Theme for Visualization
theme_algo <- theme(
panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(colour = "gray80"),
panel.grid.minor = element_blank(),
plot.title = element_text(family = "serif", size = 18)
)
|
9fb815b392a4149ec01eae8948a33671f9bd846e | e6366e79d87ff74e54e73776690265b49c0c74fa | /cachematrix.R | 3968c54afeaad9fcaa28ffb804be1057179d9303 | [] | no_license | Nicbars/ProgrammingAssignment2 | c4d90aa0b4ed40b027155263fbae05574e4fad08 | 0db75fc440504381290a6302aea09620a97d964b | refs/heads/master | 2023-08-24T07:52:59.726355 | 2021-10-27T13:02:43 | 2021-10-27T13:02:43 | 414,578,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 820 | r | cachematrix.R | ## The goal in this repository is to write functions
## using "makeCacheMatrix" to get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setmatrixinverse <- function(inverse) m <<- inverse
getmatrixinverse <- function() m
list(set = set, get = get,
setmatrixinverse = setmatrixinverse,
getmatrixinverse = getmatrixinverse)
## Use cacheSolve to compute the inverse of the matrix from the makeCacheMatrix
## When the inverse is computed, cachesolve will get the inverse from the cache
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
528a853c46888690c6531c63c3242b605d5f10ba | 40418557df079d403dcbbd0840711e6a924cafda | /deepsolar.R | 35400e19b1e04d8caf99c6e65290cf5bc1c1a889 | [] | no_license | Angel-GM/deepsolar | 4041ef9ab99559fffcd43fe0d659dcd93d15aa5d | e5802c8b697c71dd0a797af376a062b7b5c11364 | refs/heads/master | 2022-08-03T08:27:19.942354 | 2020-05-19T10:04:26 | 2020-05-19T10:04:26 | 265,210,632 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,258 | r | deepsolar.R |
library(dplyr)
library(glmnet)
library(randomForest)
library(ggplot2)
library(reshape2)
library(gridExtra)
library(cowplot)
solar = read.csv('deepsolar_ny.csv' )
set.seed(0)
# Excluding targer var and other nonfeature vars.
X = solar %>% select(-solar_system_count,-county, -state) %>% data.matrix()
y = solar$solar_system_count %>% as.vector()
# Imputting n/a with means
for(i in 1:ncol(X)){
X[is.na(X[,i]), i] <- mean(X[,i], na.rm = TRUE)
}
mu = as.vector(apply(X, 2, 'mean'))
sd = as.vector(apply(X, 2, 'sd'))
X.orig = X
for (i in c(1:n)){
X[i,] = (X[i,] - mu)/sd
}
X = X[,-1]
p = dim(X)[2]
n = dim(X)[1]
# test train split, 80%/20%
n.train = floor(.8*n)
n.test = n-n.train
M = 100
Rsq.test.rf = rep(0,M) # rf= randomForest
Rsq.train.rf = rep(0,M)
Rsq.test.en = rep(0,M) #en = elastic net
Rsq.train.en = rep(0,M)
Rsq.test.ls = rep(0,M) #ls = lasso
Rsq.train.ls = rep(0,M)
Rsq.test.rg = rep(0,M) #rg = ridge
Rsq.train.rg = rep(0,M)
for (m in c(1:M)) {
shuffled_indexes = sample(n)
train = shuffled_indexes[1:n.train]
test = shuffled_indexes[(1+n.train):n]
X.train = X[train, ]
y.train = y[train]
X.test = X[test, ]
y.test = y[test]
# fit elastic-net and calculate and record the train and test R squares
cv.fit.en = cv.glmnet(X.train, y.train, alpha = .5, nfolds = 10)
fit = glmnet(X.train, y.train, alpha = .5, lambda = cv.fit.en$lambda.min)
y.train.hat = predict(fit, newx = X.train, type = "response")
y.test.hat = predict(fit, newx = X.test, type = "response")
Rsq.test.en[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.en[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# fit lasso and calculate and record the train and test R squares
cv.fit.ls = cv.glmnet(X.train, y.train, alpha = 1, nfolds = 10)
fit = glmnet(X.train, y.train, alpha = 1, lambda = cv.fit.ls$lambda.min)
y.train.hat = predict(fit, newx = X.train, type = "response")
y.test.hat = predict(fit, newx = X.test, type = "response")
Rsq.test.ls[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.ls[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# fit ridge and calculate and record the train and test R squares
cv.fit.rg = cv.glmnet(X.train, y.train, alpha = 0, nfolds = 10)
fit = glmnet(X.train, y.train, alpha = 0, lambda = cv.fit.rg$lambda.min)
y.train.hat = predict(fit, newx = X.train, type = "response")
y.test.hat = predict(fit, newx = X.test, type = "response")
Rsq.test.rg[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.rg[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# fit RF and calculate and record the train and test R squares
rf = randomForest(X.train, y.train, mtry = sqrt(p), importance = TRUE)
y.test.hat = predict(rf, X.test)
y.train.hat = predict(rf, X.train)
Rsq.test.rf[m] = 1-mean((y.test - y.test.hat)^2)/mean((y - mean(y))^2)
Rsq.train.rf[m] = 1-mean((y.train - y.train.hat)^2)/mean((y - mean(y))^2)
# cat(sprintf("m=%3.f| Rsq.test.rf=%.2f, Rsq.test.en=%.2f| Rsq.train.rf=%.2f, Rsq.train.en=%.2f| \n", m, Rsq.test.rf[m], Rsq.test.en[m], Rsq.train.rf[m], Rsq.train.en[m]))
}
# Part b, box plots of rsq
testplot = ggplot(melt(Rsq.test), aes(x=Var2, y=value)) + geom_boxplot() + scale_y_continuous(limits = c(.4,1)) +
labs(title='Test R^2', x='Method', y="R^2")
trainplot = ggplot(melt(Rsq.train), aes(x=Var2, y=value)) + geom_boxplot() + scale_y_continuous(limits = c(.4,1)) +
labs(title='Train R^2', x='Method', y="R^2")
grid.arrange(testplot, trainplot , nrow=1)
# Part c, 10fold CV curves
plot(cv.fit.rg, sub = 'Ridge')
plot(cv.fit.en, sub = 'Elastic Net')
plot(cv.fit.ls, sub = 'Lasso')
# Bootstrapping
bootstrapSamples = 100
beta.rf.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
beta.en.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
beta.ls.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
beta.rg.bs = matrix(0, nrow = p, ncol = bootstrapSamples)
for (m in 1:bootstrapSamples){
bs_indexes = sample(n, replace=T)
X.bs = X[bs_indexes, ]
y.bs = y[bs_indexes]
# fit bs rf
rf = randomForest(X.bs, y.bs, mtry = sqrt(p), importance = TRUE)
beta.rf.bs[,m] = as.vector(rf$importance[,1])
# fit bs en
a = 0.5 # elastic-net
cv.fit = cv.glmnet(X.bs, y.bs, alpha = a, nfolds = 10)
fit = glmnet(X.bs, y.bs, alpha = a, lambda = cv.fit$lambda.min)
beta.en.bs[,m] = as.vector(fit$beta)
# fit bs ls
b = 1 # lasso
cv.fit = cv.glmnet(X.bs, y.bs, alpha = b, nfolds = 10)
fit = glmnet(X.bs, y.bs, alpha = b, lambda = cv.fit$lambda.min)
beta.ls.bs[,m] = as.vector(fit$beta)
# fit bs rg
c = 0 # rg
cv.fit = cv.glmnet(X.bs, y.bs, alpha = c, nfolds = 10)
fit = glmnet(X.bs, y.bs, alpha = c, lambda = cv.fit$lambda.min)
beta.rg.bs[,m] = as.vector(fit$beta)
cat(sprintf("Bootstrap Sample %3.f \n", m))
}
# calculate bootstrapped standard errors / alternatively you could use qunatiles to find upper and lower bounds
rf.bs.sd = apply(beta.rf.bs, 1, "sd")
en.bs.sd = apply(beta.en.bs, 1, "sd")
ls.bs.sd = apply(beta.ls.bs, 1, "sd")
rg.bs.sd = apply(beta.rg.bs, 1, "sd")
# fit rf to the whole data
rf = randomForest(X, y, mtry = sqrt(p), importance = TRUE)
# fit en to the whole data
a=0.5 # elastic-net
cv.fit.en = cv.glmnet(X, y, alpha = a, nfolds = 10)
fit.en = glmnet(X, y, alpha = a, lambda = cv.fit.en$lambda.min)
# fit ls to the whole data
b=1 # lasso
cv.fit.ls = cv.glmnet(X, y, alpha = b, nfolds = 10)
fit.ls = glmnet(X, y, alpha = b, lambda = cv.fit.ls$lambda.min)
# fit rg to the whole data
c=0 # ridge
cv.fit.rg = cv.glmnet(X, y, alpha = c, nfolds = 10)
fit.rg = glmnet(X, y, alpha = c, lambda = cv.fit.rg$lambda.min)
betaS.rf = data.frame(names(X[1,]), as.vector(rf$importance[,1]), 2*rf.bs.sd)
colnames(betaS.rf) = c( "feature", "value", "err")
betaS.en = data.frame(names(X[1,]), as.vector(fit.en$beta), 2*en.bs.sd)
colnames(betaS.en) = c( "feature", "value", "err")
betaS.ls = data.frame(names(X[1,]), as.vector(fit.ls$beta), 2*ls.bs.sd)
colnames(betaS.ls) = c( "feature", "value", "err")
betaS.rg = data.frame(names(X[1,]), as.vector(fit.rg$beta), 2*rg.bs.sd)
colnames(betaS.rg) = c( "feature", "value", "err")
rfPlot = ggplot(betaS.rf, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
enPlot = ggplot(betaS.en, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
lsPlot = ggplot(betaS.ls, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
rgPlot = ggplot(betaS.rg, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2)
grid.arrange(rfPlot, enPlot, lsPlot, rgPlot, nrow = 4)
# we need to change the order of factor levels by specifying the order explicitly.
betaS.rf$feature = factor(betaS.rf$feature, levels = betaS.rf$feature[order(betaS.rf$value, decreasing = F)])
betaS.en$feature = factor(betaS.en$feature, levels = betaS.rf$feature[order(betaS.rf$value, decreasing = F)])
betaS.ls$feature = factor(betaS.ls$feature, levels = betaS.rf$feature[order(betaS.rf$value, decreasing = F)])
betaS.rg$feature = factor(betaS.rg$feature, levels = betaS.rf$feature[order(betaS.rf$value, decreasing = F)])
rfPlot = ggplot(betaS.rf, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2) +
scale_x_discrete(name="Feature Number") +
coord_flip() +labs(title = 'Random Forest', y = "Importance") + theme( axis.text.y = element_text(size = 6))
enPlot = ggplot(betaS.en, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2) +
coord_flip() +labs(title = 'Elastic Net', y ='Importance') + scale_x_discrete(limits=rev(topN)) + theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
lsPlot = ggplot(betaS.ls, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2) +
coord_flip() +labs(title = 'Lasso', y ='Importance') + scale_x_discrete(limits=rev(topN)) + theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
rgPlot = ggplot(betaS.rg, aes(x=feature, y=value)) +
geom_bar(stat = "identity", fill="white", colour="black") +
geom_errorbar(aes(ymin=value-err, ymax=value+err), width=.2) +
coord_flip() +labs(title = 'Ridge', y ='Importance') + scale_x_discrete(limits=rev(topN)) + theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
axis.ticks.y=element_blank())
plot_grid(rfPlot, rgPlot, enPlot, lsPlot, align = "h", nrow = 1, rel_widths = c(.28, .24, .24, .24))
## Residual plots
# fit rf to the whole data
rf = randomForest(X, y, mtry = sqrt(p), importance = TRUE)
# fit en to the whole data
a=0.5 # elastic-net
cv.fit.en = cv.glmnet(X, y, alpha = a, nfolds = 10)
fit.en = glmnet(X, y, alpha = a, lambda = cv.fit.en$lambda.min)
# fit ls to the whole data
b=1 # lasso
cv.fit.ls = cv.glmnet(X, y, alpha = b, nfolds = 10)
fit.ls = glmnet(X, y, alpha = b, lambda = cv.fit.ls$lambda.min)
# fit rg to the whole data
c=0 # ridge
cv.fit.rg = cv.glmnet(X, y, alpha = c, nfolds = 10)
fit.rg = glmnet(X, y, alpha = c, lambda = cv.fit.rg$lambda.min)
rf.res = (y - rf$predicted)
rg.res = (y - (X %*% fit.rg$beta))
en.res = (y - (X %*% fit.en$beta))
ls.res = (y - (X %*% fit.ls$beta))
residuals_all = cbind(rf.res, rg.res, en.res, ls.res) %>% as.matrix()
colnames(residuals_all) = c("Random Forest", "Ridge" , "Elastic Net", "Lasso")
# Violin plot of residuals
ggplot(melt(residuals_all), aes(x=Var2, y=value)) + geom_violin() + scale_y_continuous() +
labs(title='Residuals of All Methods', x='Method', y="Residuals") +
stat_summary(fun.data=mean_sdl,mult=1, geom="crossbar", color="black")
|
ff34d55868bf21519055c0c31d39a9b9ed95820e | 356a111876127015ba131f1ff4891901016f1ad9 | /STATISTICAL_INFERENCE/ScrptR_examples_sampling distribution.R | b12991f103e379c9dcb27ff2456e0f193ce97ff1 | [] | no_license | htnani/IBSTATB | 0deb7c07b6f64861732e717c1ab0455dcc13efe0 | bd92145ed906f88db33240fe75de6da92767ca7b | refs/heads/master | 2020-04-01T20:08:21.206493 | 2017-09-12T13:52:33 | 2017-09-12T13:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,343 | r | ScrptR_examples_sampling distribution.R | library(lattice)
# Example: Sampling distribution of the variance
install.packages("lattice")
library(lattice)
n<-5
vec.var<-c(rep(0,500))
for(i in 1:500)
{x<-rnorm(n,mean=5,sd=2)
vec.var[i]<-var(x)*(n-1)/(2^2)}
qqmath(vec.var,distribution=function(p)qchisq(p,n-1),main=paste('Q-QPlot
Chi-Square,n=',n)
,xlab="Theoretical quantiles",ylab="Empirical quantiles")
#############################################
n<-15
vec.var<-c(rep(0,500))
for(i in 1:500)
{x<-rnorm(n,mean=5,sd=2)
vec.var[i]<-var(x)*(n-1)/(2^2)}
qqmath(vec.var,distribution=function(p)qchisq(p,n-1),main=paste('Q-QPlot
Chi-Square,n=',n)
,xlab="Theoretical quantiles",ylab="Empirical quantiles")
#############################################
n<-30
vec.var<-c(rep(0,500))
for(i in 1:500)
{x<-rnorm(n,mean=5,sd=2)
vec.var[i]<-var(x)*(n-1)/(2^2)}
qqmath(vec.var,distribution=function(p)qchisq(p,n-1),main=paste('Q-QPlot
Chi-Square,n=',n)
,xlab="Theoretical quantiles",ylab="Empirical quantiles")
#############################################
n<-60
vec.var<-c(rep(0,500))
for(i in 1:500)
{x<-rnorm(n,mean=5,sd=2)
vec.var[i]<-var(x)*(n-1)/(2^2)}
qqmath(vec.var,distribution=function(p)qchisq(p,n-1),main=paste('Q-QPlot
Chi-Square,n=',n)
,xlab="Theoretical quantiles",ylab="Empirical quantiles")
|
b251fc8ed150128be83e1a198b4bd2f67ed6b2e1 | 126053dbd6fae0697106d6c6ef9d33430e27848e | /Basics of Language/whileloop.R | b9bf394d67c9acbce08c00b90ee19f7d289175ed | [] | no_license | 777shipra/R-Beginners | 7cff5c02b7fe61bfcba9667f69cd894d992ee1f0 | cee55837a0fec006544a4d5aa01e3c7fc31efe8b | refs/heads/master | 2020-03-31T02:14:03.206678 | 2018-10-20T13:52:58 | 2018-10-20T13:52:58 | 151,814,057 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 268 | r | whileloop.R | #<-------------------Structure----------->
while(condition){
statements
}
#<-----------------Examples------------------->
while(TRUE){
print("hello")
}
#press esc for terminating infinite loop
counter<-1
while (counter<12){
print(counter)
counter<-counter+1
} |
38855280e8ca8cd8fd43b5b49cc6162b5ca7ea6b | 266fd6d604513251119fc891f4b389fca9b73ed0 | /R/theme.R | 70deb8bb49707f84c5e545c6cb4c9acc59f1a1c5 | [] | no_license | christopherkenny/ei | 58b511984556d4cf71d56e56333c3a83eae0d7b2 | c28b8eb59e6a2ce5fb1cbedacc770b478b1a6a06 | refs/heads/master | 2023-09-01T11:35:47.665557 | 2021-09-09T16:28:13 | 2021-09-09T16:28:13 | 400,650,628 | 0 | 0 | null | 2021-09-09T16:28:14 | 2021-08-27T22:39:11 | R | UTF-8 | R | false | false | 118 | r | theme.R | theme_ei <- function() {
ggplot2::theme_bw() +
ggplot2::theme(text = ggplot2::element_text(family = "Times"))
}
|
771767a40483dbbbfb451b4d9362b02645f06fb2 | e21e28dcbe64bff4766b3b8607ceb48e7d8727ac | /run_analysis.R | e57d39780250557c0ef1d1a54f2a810c307fb2d6 | [] | no_license | darrenredmond/getting_cleaning_data_project | 29187877a748b19bf0a6e5f0494750f9c544a9b6 | 9b1694fe8cca6d16f66e7a5bf00c38982df4b17b | refs/heads/master | 2021-01-11T09:15:59.258612 | 2016-12-22T23:43:58 | 2016-12-22T23:43:58 | 77,176,413 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,706 | r | run_analysis.R | # set the working directory.
setwd("~/dev/coursera/johns_hopkins/getting_cleaning_data_project")
# read in the feature
feature_data <- read.table('UCI HAR Dataset/features.txt')
# read in the activity data
activity_data <- read.table('UCI HAR Dataset/activity_labels.txt')
names(activity_data) <- c('y', 'activity')
# read in the test data
x_test_data <- read.table('UCI HAR Dataset/test/X_test.txt')
y_test_data <- read.table('UCI HAR Dataset/test/y_test.txt')
names(y_test_data)[1] <- 'y'
subject_test_data <- read.table('UCI HAR Dataset/test/subject_test.txt')
names(subject_test_data)[1] <- 'subject'
# read in the train data
x_train_data <- read.table('UCI HAR Dataset/train/X_train.txt')
# 4. Appropriately labels the data set with descriptive variable names.
# set the names of x to be the features
names(x_test_data) <- feature_data$V2
names(x_train_data) <- feature_data$V2
y_train_data <- read.table('UCI HAR Dataset/train/y_train.txt')
names(y_train_data)[1] <- 'y'
subject_train_data <- read.table('UCI HAR Dataset/train/subject_train.txt')
names(subject_train_data)[1] <- 'subject'
# combine the columns for test and train data
test_data <- cbind(x_test_data, y_test_data, subject_test_data)
train_data <- cbind(x_train_data, y_train_data, subject_train_data)
# 1. Merges the training and the test sets to create one data set.
# combine the row data from test and train data sets
full_data <- rbind(test_data, train_data)
# play with extracting mean and std column names.
has_mean <- function(v) {
grepl('mean', v)
}
has_std <- function(v) {
grepl('std', v)
}
valid_column <- function(v) {
has_mean(v) || has_std(v)
}
matching_columns <- Filter(valid_column, names(full_data))
names(full_data)
# create the mean and standard deviation data variables for all columns with mean/std in it.
mean_std_data <- full_data[,grepl("mean", colnames(full_data)) | grepl("std", colnames(full_data))]
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
mean_std_matrix <- rbind(apply(full_data, 2, mean), apply(full_data, 2, sd))
rownames(mean_std_matrix) <- c('mean', 'sd')
mean_std_matrix
# 3. Uses descriptive activity names to name the activities in the data set
names(full_data)
full_data <- mutate(full_data, activity_id = y)
full_data_activity <- merge(full_data, activity_data, by.x="y", by.y="y")
head(full_data_activity)
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable
# for each activity and each subject.
grouped_average_data <- full_data_activity %>%
group_by(subject, activity) %>%
summarise_each(funs(mean(., na.rm=TRUE)))
View(grouped_average_data)
|
f3792ac5613dac656a9cd82cb0946c1a1abf87c9 | 7db7ca4aea3b8d0557248beb1b4f559e6eb10809 | /R/onLoad.R | 4ee90abfe5cda01ae36d205cacd8e42cc67dcf61 | [] | no_license | tera-insights/gtBase | aabbbb7ec58bf1beb75879decf35832e628874d9 | bfbc7459923456895ab99143a5020bdfdf536720 | refs/heads/master | 2020-05-22T00:03:44.268580 | 2017-06-06T14:42:37 | 2017-06-06T14:42:37 | 25,822,663 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,869 | r | onLoad.R | .onAttach <- function(libname, pkgname) {
grokit <<- new.env()
## A list of alias names that have been generated. It is kept in the form of
## type -> vector pairs. Tame should be the type of name being generated, such
## as waypoint, input, output, etc. Each vector should be named and numeric,
## where a given name is mapped to the number of times it has been used. Each
## time it is used, its mapped value is increment so that the generated names
## are name_1, name_2, name_3, and so forth. Names should avoid ending in an
## underscore followed by a number to avoid clashing.
grokit$names <- list()
grokit$alias <- list()
## A list of name -> expressions of inputs.
grokit$expressions <- list()
## The set of Grokit PHP/C++ libraries to load for queries being ran.
grokit$libraries <- c("base", "statistics")
grokit$tasks <- list()
## A character vector of alias names in the order of which they were created.
grokit$waypoints <- character()
## A character vector of unique output names.
grokit$outputs <- character()
grokit$cluster <- list()
## Reading the various schema
grokit$schemas <- get.schema()
## These are used for the testing interface
grokit$tests <- character()
## Plug-ins are functions that automatically alter the query plan.
## See `plugins` for more information.
grokit$plugins <- list()
for (class in c("Filter", "Generated", "GF", "GI", "GIST", "GLA", "Load", "Join"))
for (stage in c("before", "after"))
grokit$plugins[[class]][[stage]] <- list()
}
.onDetach <- function(libpath) {
rm(grokit, envir = .GlobalEnv)
}
.reset <- function() {
grokit$alias <- list()
grokit$expressions <- list()
grokit$libraries <- c("base", "statistics")
grokit$tasks <- list()
grokit$waypoints <- character()
grokit$outputs <- character()
grokit$cluster <- list()
}
|
f720ed14a161634e7899b47a588cf6fcedaf83f8 | 81780d7000220293b9cecb54d4def069faa7d649 | /R/define_observation_periods.R | 6d1dd7badf034a1b625b376b466f4406159a730b | [
"Apache-2.0"
] | permissive | pwatrick/DrugRepurposingToolKit | 8ef405a602e6e100306365e3c9acf9d4cd56bc2a | 8c0f8c26013b8efec5c89afb68f182e98794bc3c | refs/heads/main | 2023-04-18T10:57:15.211297 | 2022-08-09T11:14:26 | 2022-08-09T11:14:26 | 352,338,175 | 2 | 5 | null | null | null | null | UTF-8 | R | false | false | 1,710 | r | define_observation_periods.R | #' Define baseline and treatment periods
#'
#'
#' @description
#' \code{define_observation_periods} creates baseline and treatment period columns
#'
#' @details
#' This function defines the baseline and treatment period columns from
#' clinical data from electronic health record (EHR) databases that have
#' adopted the Observational Medical Outcomes Partnership (OMOP)
#' Common Data Model (CDM) format.
#'
#' Baseline period = "start_date" to "first_{drug}_exposure".
#' Treatment period = ("first_{drug}_exposure" + 30 days) to "last_{drug}_exposure".
#' If treatment period > 12 months, then treatment period =
#' ("first_{drug}_exposure" + 30 days) to "end_date".
#'
#' Updated: 2021-03-27
#'
#'
#' @param .data A tibble
#' @export
define_observation_periods <- function(.data, ...) {
#Calculate number of days between "first_drug_exposure" and "last_drug_exposure"
.data$drug_length <- lubridate::interval(.data$first_drug_exposure,
.data$last_drug_exposure)
.data$drug_length <- lubridate::time_length(.data$drug_length, "day")
#Remove patients with "drug_length" less than 30 days
.data <- .data %>%
dplyr::filter(drug_length >= 30)
#If "last_drug_exposure" occurs before "end_date", set "final_end_date" to "last_drug_exposure", else set to "end_date"
.data <- .data %>%
dplyr::mutate(
final_end_date = dplyr::if_else(last_drug_exposure <= end_date,
last_drug_exposure,
end_date)
)
#Get final columns of interest
.data <- .data %>%
dplyr::select(person_id, start_date, first_drug_exposure, final_end_date) %>%
dplyr::distinct()
}
|
dbe2763c3124167f889d994021ba7bc8cee893ff | c5e0e4f054c443978b44e5d78bdf670b131164a9 | /Recreate_geneLenDataBase.R | 1bf2020e4a5076510f9ebe37dbd5963a1bcc88ec | [] | no_license | Quarkins/goseq_dev | 82defe34641dc3c34c0fab0594589bbcf862a6c7 | af0906a6350b9ab6101a71cb26dc7cc5fdbec060 | refs/heads/master | 2021-01-10T16:08:26.360434 | 2016-03-09T21:10:24 | 2016-03-09T21:10:24 | 53,532,080 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 651 | r | Recreate_geneLenDataBase.R | ###################################
# Description: A little script to reproduce the dataset used for geneLenDataBase
# Author: Anthony Hawkins
# Date last mdoified: 07/03/16
####################################
source("downloadLengthfromUCSC.R")
library(limma)
#Read in file with common tables
common = read.table("myfiles.txt",stringsAsFactors = FALSE)
tmp = strsplit2(common[,1],"[.]")
common_df = data.frame(Genome=tmp[,1], track =tmp[,2])
#Make the function which produces an RData with gene length info
make_data<-function(x){
downloadLengthfromUCSC(x[[1]],x[[2]])
}
#Now create the files
apply(common_df,1,make_data)
#common_df
|
b74cf0d00966e05b5b3323453f74fb47120a16c9 | 7be2f6044afbbf654bebca5caa7a7e34b0e75d42 | /R/random_paths.R | bbd34a134e14311380ab49ad60c63fd56a1eae7e | [
"MIT"
] | permissive | KWB-R/kwb.pathdict | b6228fd423dbc6de33094e997644e0a6ed86dd33 | b107485e18408a1bfb0346c785ce28d90fa6b087 | refs/heads/master | 2022-06-16T01:50:12.815755 | 2020-01-10T13:53:33 | 2020-01-10T13:53:33 | 175,963,892 | 0 | 0 | MIT | 2019-03-17T17:17:06 | 2019-03-16T11:33:35 | R | UTF-8 | R | false | false | 2,907 | r | random_paths.R | # random_paths -----------------------------------------------------------------
#' Create Random File Paths Using English Words
#'
#' @param max_depth maximum path depth
#' @param min_chars least number of characters per folder or file name
#' @param max_elements maximum number of elements (files or subfolders) in a
#' folder
#' @param depth_to_leaf_weight function that calculates a weight from the given
#' path depth. The weight is used to increase the probability of a folder
#' element to be a file and not a subdirectory. By default the weight is
#' calculated by 1.2^depth, i.e. for a folder in depth 10 it is about six
#' times (1.2^10 = 6.19) more probable of its elements to be files instead of
#' subfolders
#' @export
#' @examples
#' # Make this example reproducible
#' set.seed(12059)
#'
#' # Create random paths
#' paths <- kwb.pathdict::random_paths(max_depth = 5)
#'
#' # Show the random paths
#' paths
#'
#' # Frequency of path depths
#' table(lengths(kwb.file::split_paths(paths)))
#'
random_paths <- function(
max_depth = 5, min_chars = 5, max_elements = 10,
depth_to_leaf_weight = function(depth) 1.2^depth
)
{
random_paths_(max_depth, min_chars, max_elements, depth_to_leaf_weight)
}
# random_paths_ ----------------------------------------------------------------
#' @keywords internal
random_paths_ <- function(
max_depth = 5, min_chars = 5, max_elements = 10,
depth_to_leaf_weight = function(depth) 1.2^depth, depth = 0, leaf = FALSE,
debug_depth = 0
)
{
if (leaf || depth == max_depth) {
return(random_filenames(min_chars, size = 1))
}
parent <- sample(english_words(min_chars), 1)
(n_elements <- sample(max_elements, 1))
prob <- c(depth_to_leaf_weight(depth), 1)
is_leaf <- sample(c(TRUE, FALSE), n_elements, replace = TRUE, prob = prob)
paste0(parent, "/", unlist(lapply(seq_along(is_leaf), function(i) {
kwb.utils::catIf(
depth < debug_depth,
paste(rep(" ", depth), collapse = ""),
"Creating node ", i, "/", length(is_leaf), "\n"
)
random_paths_(
max_depth = max_depth,
min_chars = min_chars,
max_elements = max_elements,
depth_to_leaf_weight = depth_to_leaf_weight,
depth = depth + 1,
leaf = is_leaf[i]
)
})))
}
# random_filenames -------------------------------------------------------------
random_filenames <- function(min_chars = 4, max_elements = 10, size = NULL)
{
words <- english_words(min_chars)
extensions <- c("pdf", "doc", "xls", "R", "png", "jpg")
size <- kwb.utils::defaultIfNULL(size, sample(max_elements, 1))
paste0(sample(words, size), ".", sample(extensions, size, replace = TRUE))
}
# english_words ----------------------------------------------------------------
english_words <- function(min_chars = 0)
{
words <- qdapDictionaries::Fry_1000
words <- gsub("'", "_", words)
words[nchar(words) >= min_chars]
}
|
6b00c425a0da7c7468916bbce4c5b133a324167a | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052252-test.R | 9752b5c3abfa932d61b1cbbf83fa0e46a8cbd536 | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 635 | r | 1610052252-test.R | testlist <- list(rates = numeric(0), thresholds = c(-2.30331110816477e-156, -2.30331110816477e-156, NaN, 2.81199605989981e-312, NaN, -8.22918610319053e+303, 4.46108959687689e-140, 3.60297094497336e-306, -1.06556334613796e-314, 9.7020880290895e-310, -1.72138794739967e-296, 2.81199605863994e-312, NaN, -8.22918610319053e+303, 0, -1.40444775900538e+306, NaN, 7.2911220195564e-304, 1.39067116124321e-309, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
ac06f3325611e3160189dba0cd28d652910e9940 | 41ba1d5e55d42ae9acc5ef3a25b964ef6762d066 | /man/wind.dl_2.Rd | bee65aed0d8237bcd6b8904539c964a00f503780 | [] | no_license | jabiologo/rWind | 81c4b8b92435c7eded44f08e8e12f5cda91b8f3b | 5df183cd7776ec9370c857e57afe3de2b48d1d48 | refs/heads/master | 2022-02-08T04:15:08.014617 | 2022-01-24T14:56:45 | 2022-01-24T14:56:45 | 99,371,080 | 26 | 12 | null | 2021-12-21T16:09:14 | 2017-08-04T18:46:25 | R | UTF-8 | R | false | true | 3,388 | rd | wind.dl_2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wind_functions2.R
\name{wind.dl_2}
\alias{wind.dl_2}
\alias{[[.rWind_series}
\title{Wind-data download}
\usage{
wind.dl_2(time, lon1, lon2, lat1, lat2, type = "read-data", trace = 1)
\method{[[}{rWind_series}(x, i, exact = TRUE)
}
\arguments{
\item{time}{a scalar or vector of POSIXt or Date objects or an character
which can transformed into those, see example below.
There are currently these options at the GFS database for the hours:
00:00 - 03:00 - 06:00 - 09:00 - 12:00 - 15:00 - 18:00 - 21:00 (UTC) (TO).}
\item{lon1}{Western longitude}
\item{lon2}{Eastern longitude}
\item{lat1}{Southern latitude}
\item{lat2}{Northern latitude}
\item{type}{Output type. "read-data" is selected by default, creating an R
object. If you choose "csv", wind.dl create a a CSV file in your work
directory named "wind_yyyy_mm_dd_tt.csv".}
\item{trace}{if trace = 1 (by default) track downloaded files}
\item{x}{object from which to extract element(s).}
\item{i}{indices specifying elements to extract.}
\item{exact}{Controls possible partial matching (not used yet).}
}
\value{
an object of class \code{rWind_series} or .csv file/s with
U and V vector components and wind direction and speed for each coordinate
in the study area defined by lon1/lon2 and lat1/lat2.
}
\description{
wind.dl_2 downloads time-series wind data from the Global Forecast System
(GFS) of the USA's National Weather Service (NWS)
(https://www.ncei.noaa.gov/products/weather-climate-models/global-forecast).
Wind data are taken from NOAA/NCEP Global Forecast System (GFS) Atmospheric
Model collection. Geospatial resolution is 0.5 degrees (approximately 50 km),
and wind is calculated for Earth surface, at 10 m. More metadata
information:
http://oos.soest.hawaii.edu/erddap/info/NCEP_Global_Best/index.html
}
\details{
To get the same format as wind.dl, you should run \code{tidy} function from
wind.dl_2 output.
The output type is determined by type="csv" or type="read-data". If
type="csv" is selected, the function creates a "wind_yyyy_mm_dd_tt.csv" file
that is downloaded at the work directory. If type="read-data" is selected,
an \code{rWind_series} object is created.
}
\note{
wind.dl_2 requires two dates that represent the boundaries of the time
lapse to download wind series data.
U and V vector components allow you to create wind averages or tendencies
for each coordinate at the study area. Longitude coordinates are
provided by GFS dataset in 0/360 notation and transformed internally into
-180/180. "dir" denotes where the
wind/sea current is going (toward), not from where is coming.
}
\examples{
# Download wind for Iberian Peninsula region at 2015, February 12, 00:00
\dontrun{
wind.dl_2("2018/3/15 9:00:00", -10, 5, 35, 45)
library(lubridate)
dt <- seq(ymd_hms(paste(2018, 1, 1, 00, 00, 00, sep = "-")),
ymd_hms(paste(2018, 1, 2, 21, 00, 00, sep = "-")),
by = "3 hours"
)
ww <- wind.dl_2(dt, -10, 5, 35, 45)
tidy(ww)
}
}
\references{
http://www.digital-geography.com/cloud-gis-getting-weather-data/#.WDOWmbV1DCL
http://oos.soest.hawaii.edu/erddap/griddap/NCEP_Global_Best.graph
}
\seealso{
\code{\link{wind.mean}}, \code{\link{wind2raster}},
\code{\link{wind.dl}}, \code{\link{as_datetime}}, \code{\link{as.POSIXct}}
}
\author{
Javier Fernández-López (jflopez.bio@gmail.com)
}
\keyword{~gfs}
\keyword{~wind}
|
a9233cf6fd9330737ec5963a2f78dde609773464 | 1b7e5fae2c969553d2d0a8c4e01e09f295595704 | /plot2.R | 1da8392baa244b131796e97936863ae9b3db8ed5 | [] | no_license | kmaratus/ExData_Plotting1 | d839f573a5b0ef199cf61f5226e0d20fbd91294e | fa27d0d81fb44deb97463a018abc18b32bb02e54 | refs/heads/master | 2021-01-12T06:19:44.609703 | 2016-12-25T22:49:52 | 2016-12-25T22:49:52 | 77,341,737 | 0 | 0 | null | 2016-12-25T20:27:59 | 2016-12-25T20:27:58 | null | UTF-8 | R | false | false | 1,044 | r | plot2.R | ##read data header into R
filename<-"household_power_consumption.txt"
header<-read.table(filename,header=TRUE,sep=";",na.strings = "?",nrow=1)
columnnames<-names(header)
ncolumn<-length(columnnames)
##read first column to find start and end date number rows
data<-read.table(filename,header=TRUE,sep=";",na.strings = "?",colClasses = c(NA,rep("NULL",ncolumn-1)))
dataDate<-as.Date(data$Date,"%d/%m/%Y")
nstart<-which.max(dataDate=="2007-02-01")
nend<-which.max(dataDate=="2007-02-03")-1
##read data into R for the given dates
data<-read.table(filename,header=TRUE,sep=";",na.strings = "?",col.names = columnnames,skip = nstart-1,nrows = nend-nstart+1)
##make a plot and save it into PNG file
png(filename = "plot2.png",width = 480,height = 480,bg="transparent")
##combine date and time into one variable
DateTime<-as.POSIXct(paste(data$Date,as.character(data$Time)),format="%d/%m/%Y %H:%M:%S")
plot(DateTime,data$Global_active_power,type="n",xlab="",ylab="Global Active Power (kilowatts)")
lines(DateTime,data$Global_active_power)
dev.off() |
52a7b99868a9c6d91266e61f940ed4127397310d | 61c07605275b7d4d407a40c3bd754b2e686c4be3 | /fechas y tiempo.R | 46c8cc3e91cd12f500ddf2f77d37f91fdd200694 | [] | no_license | sauc117/Programacion_Actuarial_lll | 419b06660d301b0eadcc6b996425fa6c1cb78e9a | d176b02d635505f11a312baab9fa946ea37c185e | refs/heads/master | 2021-09-14T23:12:45.966516 | 2018-05-22T01:37:36 | 2018-05-22T01:37:36 | 119,411,262 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 431 | r | fechas y tiempo.R |
x <- as.Date("1970-01-01")
x
unclass(x)
unclass(as.Date("1970-01-02"))
#POSIXct es un entero muy grande, util para guardar datos en un data frame
#POSIXlt es como una lista y guarda un conjunto de informacion util como dia
# de la semana,dia del aoń, dia del mes, etc.
weekdays(as.Date("1998-07-23"))
weekdays(as.Date("1998-09-24"))
weekdays(as.Date("1998-09-24"))
a <- as.POSIXct("1998-09-24")
b <- as.POSIXlt("1998-09-24") |
1564a682be664f1a6d8bd3a97b954e7f87a79adc | 86920ac8061f5b29b86733ec332469bf83761202 | /rethinking/rethinking_var_sd_test.R | ddff2ea6d93c04847abac34e4ff9e796e95ce4a1 | [] | no_license | jebyrnes/temp_kelp_change | 64b5c54dd58213d51b356017e2512867ef156e5e | b8891d25038ffab02410e5afd4b81d0f0c4d4cea | refs/heads/master | 2021-10-11T14:11:31.283204 | 2021-10-04T19:53:09 | 2021-10-04T19:53:09 | 51,555,573 | 0 | 0 | null | 2021-10-04T19:53:10 | 2016-02-11T23:36:10 | R | UTF-8 | R | false | false | 815 | r | rethinking_var_sd_test.R | library(rethinking)
make_data <- function(slope =1, int = 1, sd_e = 5, group=1, x=1:20){
ret <- data.frame(x=x)
ret <- within(ret, {
y <- rnorm(length(x), int + slope*x, sd_e)
group <- group
})
ret
}
set.seed(31415)
my_df <- rbind(make_data(),
make_data(slope=5, sd_e=15, group=2),
make_data(slope=3, sd_e=8, group=3))
ggplot2::qplot(x, y, color=group, data=my_df)
mod <- alist(
#likelihood
y ~ dnorm(y_loc, sd_loc), # likelihood
#model
y_loc <- a[group] + b[group]*x,
sd_loc <- sd_e[group],
#priors
a[group] ~ dnorm(0,10),
b[group] ~ dnorm(0,10),
sd_e[group] ~ dcauchy(0,1) #prior for study level errors
)
fit <- map2stan(mod, data=my_df,
constraints=list(sd_e="lower=0") ,
start=list(sd_e=rep(1,3)))
|
e89666f3d7a451f325d2e8c7dad5765e09e93270 | 4f88e0337d3374fcf643717e41c8ac3d76f9709a | /R/search.R | 6ca8951bb5db551a90c2bd155c6525e2328d1a9d | [] | no_license | att/rcloud.solr | bc1283b2f50a6e68832b5132518803228bffa3e3 | af3d5d9f3d7d6b7bc1c26a7e03afee13a8ad457e | refs/heads/develop | 2022-03-26T23:26:05.358352 | 2019-12-20T16:04:15 | 2019-12-20T16:04:15 | 80,539,919 | 4 | 7 | null | 2018-02-15T14:18:53 | 2017-01-31T16:43:43 | R | UTF-8 | R | false | false | 2,712 | r | search.R |
#' Search RCloud Notebooks
#'
#' Main search function exposed as an OCAP to the client.
#'
#' @param query Search string
#' @param all_sources Logical: Search single or multiple solr instances?
#' @param sortby Passed to solr for sorting
#' @param orderby Passed to solr for sorting
#' @param start Passed to solr
#' @param pagesize Passed to solr
#' @param max_pages Sets the size of search
#' @param group.limit Passed to solr. Controls how many cells to highlight for each notebook hit.
#' @param hl.fragsize How many characters to return with the highlighting
#'
#' @return Search response after parsing
#' @export
#'
rcloud.search <-
function(query,
all_sources = FALSE,
sortby = "score",
orderby = "desc",
start = 0,
pagesize = 10,
max_pages = 20,
group.limit = 4,
hl.fragsize = 60) {
.SC$search(
query = query,
all_sources = all_sources,
sortby = sortby,
orderby = orderby,
start = start,
pagesize = pagesize,
max_pages = max_pages,
group.limit = group.limit,
hl.fragsize = hl.fragsize
)
}
#' Passthrough Notebook Search
#'
#' On description and optionally user. This does minimal processing server side to increase speed.
#'
#' @param description search string to match against description. Fuzzy matching and wildcarding is used.
#' @param user optional to specify a user
#' @inheritParams rcloud.search
#'
#' @return Search result direct from solr with no parsing
#' @export
#'
#' @examples
#' \dontrun{
#' rcloud.search.description("test", user = "juninho")
#' }
rcloud.search.description <- function(description, user = NULL, start = 0,
pagesize = 100, sortby = "description",
orderby = "desc") {
url <- rcloud.support:::getConf("solr.url")
if (is.null(url))
stop("solr is not enabled")
user <- if (!is.null(user) && user!="") paste0(" AND user: ", user, "*~") else " "
descSplit <- unlist(strsplit(description, split = c(" |\\+|\\\\|/")))
description <- paste0(descSplit, "*~", collapse = " AND ")
query <- paste0("description: (", description, ")",
user,
" AND doc_type:notebook")
solr.query <- list(
q = query,
start = start,
rows = pagesize,
indent = "true",
fl = "description,id,user,updated_at,starcount",
sort = paste(sortby, orderby)
)
# pass it straight back no post-processing
.solr.get(
solr.url = url,
query = solr.query,
solr.auth.user = rcloud.support:::getConf("solr.auth.user"),
solr.auth.pwd = rcloud.support:::getConf("solr.auth.pwd")
)
}
|
007cb0d2e6492f12e4ba9a52b0005bea87489e34 | 4c6647e21d6015b2729bc4c2f6e4c9e0ee8a13ab | /man/dictHomerMotifs.Rd | dc31ebeac5c205b7b710695b433fb1921bc7f053 | [] | no_license | mireia-bioinfo/maRge | 67cfd91807e605fce540d4c4f96e55b80e179446 | 99f5823cfb61fe0b0d5700a473f067817fa87595 | refs/heads/master | 2021-06-24T18:00:24.226520 | 2017-08-31T10:23:23 | 2017-08-31T10:23:23 | 94,434,545 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 766 | rd | dictHomerMotifs.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dictHomerMotifs.R
\docType{data}
\name{dictHomerMotifs}
\alias{dictHomerMotifs}
\title{Dictionary of Homer Motifs}
\format{A data frame with 53940 rows and 10 variables:
\describe{
\item{Motif.Name}{Name of the motif, as outputed by HOMER.}
\item{Motif.Symbol}{Section of the Motif.Name including the symbol.}
\item{external_gene_name}{Standarized gene symbol.}
\item{ensembl_gene_id}{ENSEMBL gene id.}
}}
\source{
\url{HOMER}
}
\usage{
dictHomerMotifs
}
\description{
A dataset containing the names of the motifs used by HOMER and their
corresponding symbols and ensembl_gene_id. Useful when trying to
intersect HOMER motif data with transcription data.#'
}
\keyword{datasets}
|
96c5bfd31bb7d219d2e3eb1c5406432bcafd1bac | 826cc17cd51ccbceeb0b33ee23cab81ccee3932f | /tests/testthat/test-plot_indel_contexts.R | 0bde8c251427d56f02412eb0e7bcd3b6afd32c84 | [
"MIT"
] | permissive | UMCUGenetics/MutationalPatterns | 9b9d38a7ab69d7e29d8900f11fa9fb7ef328cfb9 | ca9caf0d0ba3cd1e13cb909009dc5b3b27b84631 | refs/heads/master | 2023-04-14T23:28:50.852559 | 2022-11-22T11:37:17 | 2022-11-22T11:37:17 | 53,409,261 | 86 | 37 | MIT | 2022-11-22T11:37:18 | 2016-03-08T12:10:11 | R | UTF-8 | R | false | false | 678 | r | test-plot_indel_contexts.R | context("test-plot_indel_contexts")
## Get indel counts
indel_counts <- readRDS(system.file("states/blood_indel_counts.rds",
package = "MutationalPatterns"
))
## Plot contexts
output <- plot_indel_contexts(indel_counts)
output_same_y <- plot_indel_contexts(indel_counts, same_y = TRUE)
output_extra_labels <- plot_indel_contexts(indel_counts, extra_labels = TRUE)
output_condensed <- plot_indel_contexts(indel_counts, condensed = TRUE)
test_that("Output has correct class", {
expect_true(inherits(output, c("gg")))
expect_true(inherits(output_same_y, c("gg")))
expect_true(inherits(output_extra_labels, c("gg")))
expect_true(inherits(output_condensed, c("gg")))
})
|
5875d40220aab9f14c8448aa399ae30b0dc8607f | 44cf65e7ab4c487535d8ba91086b66b0b9523af6 | /data/Newspapers/2000.04.29.editorial.23713.0325.r | 63f2024791d2aaa1cf18455d55f5efe4a1365eaa | [] | no_license | narcis96/decrypting-alpha | f14a746ca47088ec3182d610bfb68d0d4d3b504e | 5c665107017922d0f74106c13d097bfca0516e66 | refs/heads/master | 2021-08-22T07:27:31.764027 | 2017-11-29T12:00:20 | 2017-11-29T12:00:20 | 111,142,761 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,252 | r | 2000.04.29.editorial.23713.0325.r | exista o traditie ca de sarbatori oamenii sa isi trimita felicitari .
cu timpul , s - au adaugat si mici atentii . De cind au aparut privatizatii , acestia trimit urarile de bine insotite de o ciocolata , de o cutie cu sucuri , de o sticla cu vin , de o sticla cu sampanie , de un pachet cu cafea .
uneori si obiecte promotionale .
are sau nu haz acest tip de urare , numai Dumnezeu stie !
cu siguranta ca nu este insa condamnabil .
dar de aici si pina la spaga de sarbatori , pina la peschesul primit de functionarul public cu ocazia Craciunului sau a Pastilor e totusi cale lunga .
din pacate , la redactie nu avem atit de multe teleobiective de mare putere pentru a supraveghea toate casele demnitarilor sau ale inaltilor functionari din institutiile statului .
asa ca , in loc de pinda , echipa de redactori ai " Evenimentului zilei " a recurs la o provocare .
cite doi , insotiti de un fotograf , reporterii nostri au pretins ca sint membri de partid , consilieri , petenti , simpatizanti , oameni care au o belea si vor sa foloseasca prilejul sarbatorilor ori pentru a - si arata recunostinta , ori pentru a - si intari sansele de rezolvare a problemei .
si au batut pe la porti cu plocoane .
socul a fost enorm !
cu citeva mici exceptii , alesii poporului au primit peschesurile cu o naturalete paralizanta . Se purtau de parca ar fi clipit sau ar fi respirat .
un miel de la un simpatizant al partidului e mai banal decit un " buna ziua " .
Cartonul cu sticle de vin , damigenele , mieii si alte produse ale agriculturii noastre in tranzitie devenisera elemente banale de peisaj . Miercuri , joi si vineri , la institutiile statului erau convoaie de masini venite din provincie , toate incarcate cu plocoane .
unora le - am facut o surpriza enorma .
chiar le - am pus in plicuri memorii cu diverse plingeri adresate redactiei noastre .
sa speram ca se vor indura sa miste macar un deget pentru rezolvarea lor .
operatiunea MOV ( miel , oua , vin ) ne - a mai aratat ceva .
parlamentarii si functionarii publici primesc pomana cu o anume inconstienta .
nu se intreaba de unde vin cadourile , nu se gindesc ca ar putea fi imbolnaviti sau chiar otraviti .
le iau de - a valma si nu - si fac vreo problema .
unul dintre redactorii nostri chiar exclama , ii puteam otravi pe toti !
dar cea mai complicata problema ramine cea legata de morala .
absolut nici unul nu s - a intrebat de ce sa primeasca un plocon !
e de la sine inteles ca ne aflam in fata unui reflex bine consolidat .
in Romania , asemenea " cadouri " se dau chiar din anii de dinaintea comunismului .
sint in firea lucrurilor de multa vreme .
intre rude , ploconul e chiar o traditie .
dar ca reflex la functionarul de stat e cu totul altceva .
vrem , nu vrem , ne aflam in fata unei forme discrete de coruptie .
nimeni n - o mai incrimineaza .
de la miel si vin se trece usor la televizor color , la masina si apartament .
sa nu uitam ca , intr - o tara civilizata , un om si - a ratat cariera politica fiindca nu a platit taxele pentru femeia de serviciu care i - a facut curat in casa .
la noi , toti sintem revoltati de coruptia care ne inconjoara , dar dam si primim un miel ca si cum am da sau am primi un pix .
ba , am ajuns in situatia ca un biet functionar sau un cetatean care ajunge la o " marime " sa se simta prost ca se duce cu mina goala .
adeseori ni se intimpla ca la redactie sa vina cititori care ne cer sprijinul si care considera ca ar trebui sa ne gratuleze cu un cadou .
si asta face parte tot dintr - o mentalitate balcanica , dintr - un mod de a fi cu reguli care se topesc in nereguli .
si de acolo incep aranjamentele , combinatiile , eludarea legilor si favoritismele .
daca in aceste relatii marunte se poate aranja orice , daca functionarul public poate primi cu portbagajul fara ca macar sa clipeasca inseamna ca toate celelalte lucruri mari si importante nu sint cladite decit pe nisip miscator .
in loc de mari intrebari despre lume , despre viata , moarte si inviere , in aceste zile ne - am putea intreba despre duplicitate .
pe de o parte , toti sintem de acord ca vrem un climat social mai curat , dar cind e sa traim intr - un asemenea spirit , ne coplesesc apucaturi de oameni mici si repede trecatori .
|
f9d0a586cb18c340a9fc92297003f271b59e4d30 | 29585dff702209dd446c0ab52ceea046c58e384e | /lifecontingencies/inst/doc/introToLifecontingencies.R | 203d7f281c35a8d9a3e1b035d4911f097e037f6e | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,198 | r | introToLifecontingencies.R | ## ----setup, include=FALSE------------------------------------------------
library(knitr)
rm(list=ls())
## ----load, echo=TRUE, warning=FALSE, message=FALSE-----------------------
library(lifecontingencies) #load the package
## ----finmat1, echo=TRUE, warning=FALSE, message=FALSE--------------------
capitals <- c(-1000,200,500,700)
times <- c(0,1,2,5)
#calculate a present value
presentValue(cashFlows=capitals, timeIds=times,
interestRates=0.03)
## ----finmat2, echo=TRUE, warning=FALSE, message=FALSE--------------------
ann1 <- annuity(i=0.03, n=5, k=1, type="immediate")
ann2 <- annuity(i=0.03, n=5, k=12, type="due")
c(ann1,ann2)
## ----finmat3, echo=TRUE, warning=FALSE, message=FALSE--------------------
bondPrice<-5*annuity(i=0.03,n=10)+100*1.03^-10
bondPrice
## ----demo1---------------------------------------------------------------
#create an demo lifetable
xDemo<-seq(from=0,to=5,by=1)
lxDemo<-c(100,95,90,60,30,5)
lifetableDemo<-new("lifetable",x=xDemo,
lx=lxDemo,name="Demo")
## ----demo2---------------------------------------------------------------
data(demoIta) #using the internal Italian LT data set
lxIPS55M <- with(demoIta, IPS55M)
#performing some fixings
pos2Remove <- which(lxIPS55M %in% c(0,NA))
lxIPS55M <-lxIPS55M[-pos2Remove]
xIPS55M <-seq(0,length(lxIPS55M)-1,1)
#creating the table
ips55M <- new("lifetable",x=xIPS55M,
lx=lxIPS55M,name="IPS 55 Males")
## ----demo3, tidy=TRUE----------------------------------------------------
#decrements between age 65 and 70
dxt(ips55M, x=65, t = 5)
#probabilities of death between age 80 and 85
qxt(ips55M, x=80, t=2)
#expected curtate lifetime
exn(ips55M, x=65)
## ----createacttable, tidy=FALSE------------------------------------------
#creates a new actuarial table
ips55Act<-new("actuarialtable",
x=ips55M@x,lx=ips55M@lx,
interest=0.02,name="IPS55M")
## ----pureend, tidy=FALSE-------------------------------------------------
#compute APV
APV=50e3*Exn(actuarialtable =
ips55Act,x=30,n=35)
#compute Premium
P=APV/axn(actuarialtable =
ips55Act,x=30,n=35)
c(APV,P)
## ----endowmentcalcs, tidy=FALSE------------------------------------------
#defining the ranges
interest.range<-seq(from=0.015, to=0.035,by=0.001)
term.range<-seq(from=20, to=40,by=1)
#computing APV sensitivities
apv.interest.sensitivity<-sapply(interest.range,
FUN = "AExn",actuarialtable=ips55Act,x=30,n=30)
apv.term.sensitivity<-sapply(term.range,FUN = "AExn",
actuarialtable=ips55Act,x=30)
## ----endowmentplot, tidy=FALSE, echo=FALSE,fig.width=5, fig.height=5,fig.align='center'----
par(mfrow=c(1,2))
plot(x=interest.range, y=apv.interest.sensitivity,type="l",xlab="interest rate",ylab="APV",main="APV by Interest Rate")
plot(x=term.range, y=apv.term.sensitivity,type="l",xlab="term",ylab="APV",main="APV by term")
## ----reserves, tidy=FALSE------------------------------------------------
#compute the APV and premium
APV=100e3*Axn(actuarialtable = ips55Act,x=25,n=40)
P=APV/axn(actuarialtable = ips55Act,x=25,n=40)
#define a reserve function
reserveFunction<-function(t)
100e3*Axn(actuarialtable = ips55Act,x=25+t,n=40-t) -
P *axn(actuarialtable = ips55Act,x=25+t,n=40-t)
reserve<-sapply(0:40,reserveFunction)
## ----reserves2, tidy=FALSE, echo=FALSE, fig.align='center'---------------
plot(x=0:40,y=reserve,main="Reserve",
xlab="Policy Age",ylab="Reserve outstanding",type="l")
## ----AEXn1 , tidy=FALSE, size="small"------------------------------------
#analyzing and Endowment of 100K on x=40, n=25
#compute APV
APV=AExn(actuarialtable = ips55Act,x=40,n=25)
#sampling
AEXnDistr<-rLifeContingencies(n=10e3,
lifecontingency = "AExn",x = 40,
t=25,object = ips55Act)
## ----AExn1, tidy=FALSE, size="small"-------------------------------------
#assess if the expected value match the theoretical one
t.test(x=AEXnDistr,mu = APV)
## ----AEXn2, tidy=FALSE, echo=FALSE---------------------------------------
hist(AEXnDistr, main="Endowment Actuarial Value Distribution",
probability = TRUE, col="steelblue")
## ----leecarter01, tidy=FALSE, include=FALSE, results='hide'--------------
#library(demography)
#italy.demo<-hmd.mx("ITA", username="spedicato_giorgio@yahoo.it", password="mortality")
## ----leecarter0, tidy=FALSE, warning=FALSE, message=FALSE----------------
#load the package and the italian tables
library(demography)
#italyDemo<-hmd.mx("ITA", username="yourUN",
#password="yourPW")
load(file="mortalityDatasets.RData") #load the dataset
## ----leecarter1, tidy=FALSE, warning=FALSE-------------------------------
#calibrate lee carter
italy.leecarter<-lca(data=italyDemo,series="total",
max.age=103,adjust = "none")
#perform modeling of kt series
kt.model<-auto.arima(italy.leecarter$kt)
#projecting the kt
kt.forecast<-forecast(kt.model,h=100)
## ----leecarter2, tidy=FALSE, size='tiny'---------------------------------
#indexing the kt
kt.full<-ts(union(italy.leecarter$kt, kt.forecast$mean),
start=1872)
#getting and defining the life tables matrix
mortalityTable<-exp(italy.leecarter$ax
+italy.leecarter$bx%*%t(kt.full))
rownames(mortalityTable)<-seq(from=0, to=103)
colnames(mortalityTable)<-seq(from=1872,
to=1872+dim(mortalityTable)[2]-1)
## ----leecarter2plot, tidy=FALSE, echo=FALSE------------------------------
plot.ts(kt.full, main="historical and projected KT",xlab="year",
ylab="kt",col="steelblue")
abline(v=2009,col="darkred",lwd=2.5)
## ----leecarter3, tidy=FALSE----------------------------------------------
getCohortQx<-function(yearOfBirth)
{
colIndex<-which(colnames(mortalityTable)
==yearOfBirth) #identify
#the column corresponding to the cohort
#definex the probabilities from which
#the projection is to be taken
maxLength<-min(nrow(mortalityTable)-1,
ncol(mortalityTable)-colIndex)
qxOut<-numeric(maxLength+1)
for(i in 0:maxLength)
qxOut[i+1]<-mortalityTable[i+1,colIndex+i]
#fix: we add a fictional omega age where
#death probability = 1
qxOut<-c(qxOut,1)
return(qxOut)
}
## ----leecarter4, tidy=FALSE, size='scriptsize'---------------------------
#generate the life tables
qx1920<-getCohortQx(yearOfBirth = 1920)
lt1920<-probs2lifetable(probs=qx1920,type="qx",
name="Table 1920")
at1920<-new("actuarialtable",x=lt1920@x,
lx=lt1920@lx,interest=0.015)
qx1950<-getCohortQx(yearOfBirth = 1950)
lt1950<-probs2lifetable(probs=qx1950,
type="qx",name="Table 1950")
at1950<-new("actuarialtable",x=lt1950@x,
lx=lt1950@lx,interest=0.015)
qx1980<-getCohortQx(yearOfBirth = 1980)
lt1980<-probs2lifetable(probs=qx1980,
type="qx",name="Table 1980")
at1980<-new("actuarialtable",x=lt1980@x,
lx=lt1980@lx,interest=0.015)
## ----leecarter5, tidy=FALSE, echo=TRUE-----------------------------------
cat("Results for 1920 cohort","\n")
c(exn(at1920,x=65),axn(at1920,x=65))
cat("Results for 1950 cohort","\n")
c(exn(at1950,x=65),axn(at1950,x=65))
cat("Results for 1980 cohort","\n")
c(exn(at1980,x=65),axn(at1980,x=65))
|
ae45bd86a774a50fb8363bf1bb699c9e40171b6d | fb3ab9e1dd466ec076b5dcba19fe4419a1116656 | /Day 1/1.3.R | 4478001182c60a13ecf9fce267c9e6fbf8d1317f | [] | no_license | eElec/DALab | f4e67a034467286e5d7ee12f6dcbf7ac18b8c77a | 33c1ca7a5e91beded8c6b76264f8342295b0fbe4 | refs/heads/master | 2023-02-16T14:38:45.665532 | 2021-01-14T08:15:23 | 2021-01-14T08:15:23 | 329,549,595 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 134 | r | 1.3.R | roll=1806188
name="Adrish Aditya"
branch="IT"
print("Roll: ")
print(roll)
print("Name: ")
print(name)
print("Branch: ")
print(branch) |
5ee64d20fd48dae2855e76043c79a473a2564313 | de6c16a929ddc7b6d5e212203d62d5e8151d3037 | /Tutorial4_31.R | 2459efb9b9ae7e68fb46846a8db9e377f3796f00 | [] | no_license | arthurnovello/r_course | cea600f4c08b726f73d332703bc30be6bdb2d99d | 8acfebdbf6ac660a71c0553fd17c5827a8066174 | refs/heads/master | 2021-05-05T05:32:50.772137 | 2018-02-03T14:22:42 | 2018-02-03T14:22:42 | 118,685,522 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 262 | r | Tutorial4_31.R | # ------------ Operations with matrices ------------
Games
rownames(Games)
colnames(Games)
Games["LeBronJames", "2012"]
FieldGoals
FieldGoals/Games
round(FieldGoals/Games, 1)
round(MinutesPlayed/Games)
MinutesPlayed
round(FieldGoalAttempts/FieldGoals, 3)
|
969bfce72a95d1b58340d15d8b5afa3c394d8b90 | e434c2f582adcc781e9fd0b7183899aba48e5dde | /scripts/prepdata/ausplots_sites.R | d8315ea4b916df36efa828f58ce4a50a6dcab0ce | [
"MIT"
] | permissive | rubysaltbush/flowering-period-climate | fa87156deebc524ea89fd26dc04776b6d603ecfd | b384e5f95df81dbf40f4d2f4f86dd268ea925a4b | refs/heads/main | 2023-04-17T20:07:30.215796 | 2022-05-12T06:32:51 | 2022-05-12T06:32:51 | 389,864,707 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 260 | r | ausplots_sites.R | # using ausplotsR package access site data from AusPlots and cache csv
# requires internet connection
ausplots_sites <- cache_csv("data_cache/ausplots_sites.csv", function() {
get_ausplots(site_info = TRUE, veg.vouchers = FALSE, veg.PI = FALSE)$site.info
}) |
bb2df86fe73174367710b11a0abb70666016b59f | b50cbb8ef475fbed1f4f808b619ed63297aa8de4 | /PacketMetrics/Tests.R | 940bf17d01846e9f9d122f4d39254dcdef2f00e1 | [] | no_license | jac50/FinalYearProject | 49a7826814cd10996ed16e50aed479e92bf1bfb5 | fbc314d39083fb1442f03c39c7092925f11e34b3 | refs/heads/master | 2021-01-21T12:39:37.414929 | 2014-05-05T18:18:58 | 2014-05-05T18:18:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,112 | r | Tests.R | # Tests for RunHead
- Sample Size < 0
- Sample Size Too Big
- Sample Size non number
- Directory not valid
- Original File not found
# Tests for createArguments
- argparse not compatible or not found ?
# Tests for initLogger
- Input Tests:
+ Quiet - TRUE Verbose - FALSE . Expected Result: Set to Quiet Mode (lvl 30)
+ Quiet - TRUE Verbose - TRUE Expected Result: throw error. set to normal mode
+ Quiet - FALSE Verbose - FALSE . Expected Result: set to normal mode
+ Quiet - FALSE Verbose - TRUE . Expected Result: Set to Verbose mode (lvl 10)
+ Quiet - NOT LOGICAL Verbose - TRUE Expected Result: Set to Verbose mode (lvl 10)
+ Quiet - TRUE Verbose - NOT LOGICAL : Expected Result: Set to Quiet Mode (lvl 30)
+ Quiet - NOT LOGICAL Verbose NOT LOGICAL - Expected Result: throw error. set to normal mode
- Other Tests:
+ Directory does not exist
+ Directory cannot be created
# Tests for ParseFileName
- Input Tests:
- Other Tests:
# Tests for readFile
- Input Tests:
- Other Tests:
# Tests for purgeData
- Input Tests:
- Other Tests:
# Tests for ParseFileName
- Input Tests:
- Other Tests:
|
088e47dda78471f97b996f2b437d1ff91895afc5 | 8ad7f68413ee9a4ec8bd868bcb3566755833b56a | /generar_tipo_cambio_trimestral_anual.r | b4d7e1ee3eb38b850e64e9eeaec4bac7a275e028 | [] | no_license | droper/scriptsthesis | 124ce67c7083f022e21063a4135af797503a12aa | e16f8721b2ff5e60dff08445bd9eb3b3677df121 | refs/heads/master | 2021-01-25T08:48:12.036983 | 2015-06-13T01:51:43 | 2015-06-13T01:51:43 | 37,352,260 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,233 | r | generar_tipo_cambio_trimestral_anual.r | # Directorios origen de la data
DATA_TIPO_CAMBIO = "/home/pedro/univs/doctorado/tesis/tesis/material tesis/data_economica/"
TIPO_CAMBIO_CSV = "tipo_cambio.csv"
TIPO_CAMBIO_TRIM_CSV = "tipo_cambio_trimestral.csv"
TIPO_CAMBIO_ANUAL_CSV = "tipo_cambio_anual.csv"
# Se lee un archivo de información financiera
tipo_cambio = read.csv(file=paste(DATA_TIPO_CAMBIO, TIPO_CAMBIO_CSV, sep=""),
header=TRUE, sep=",", colClasses = c("character", "numeric"))
tc_trim = numeric(0)
per_trim = character(0)
anno = 1992
i = 3
per = 1
# Se generan los promedios trimestrales
while (i <= nrow(tipo_cambio)){
promedio = (tipo_cambio[["TC"]][i] + tipo_cambio[["TC"]][i-1] + tipo_cambio[["TC"]][i-2])/3
tc_trim = c(tc_trim, promedio)
per_trim = c(per_trim, paste(anno,'-',per, sep="" ))
i = i + 3
# Si periodo es igual a cuatro, lo regresamos a uno
# y aumentamos en uno el año
if (per == 4){
per = 1
anno = anno + 1
}
else {
per = per + 1
}
}
tc_anual = numeric(0)
per_anual = character(0)
anno = 1992
i = 12
# Se generan los promedios anuales
while (i <= nrow(tipo_cambio)){
promedio = (tipo_cambio[["TC"]][i] + tipo_cambio[["TC"]][i-1] + tipo_cambio[["TC"]][i-2] + tipo_cambio[["TC"]][i-3]
+ tipo_cambio[["TC"]][i-4] + tipo_cambio[["TC"]][i-5] + tipo_cambio[["TC"]][i-6] + tipo_cambio[["TC"]][i-7]
+ tipo_cambio[["TC"]][i-8] + tipo_cambio[["TC"]][i-9] + tipo_cambio[["TC"]][i-10] + tipo_cambio[["TC"]][i-11])/12
tc_anual = c(tc_anual, promedio)
per_anual = c(per_anual, anno)
i = i + 12
anno = anno + 1
}
# Se generan los promedios trimestrales del tipo de cambio juntando los vectores
tipo_cambio_trim = data.frame(per_trim, tc_trim)
# Se generan los promedios anuales del tipo de cambio juntando los vectores
tipo_cambio_anual = data.frame(per_anual, tc_anual)
# Se escribe el data frame de datos para las pruebas en un archivo cuyo prefijo es
# pruebas
write.csv(tipo_cambio_trim, file=paste(DATA_TIPO_CAMBIO,
TIPO_CAMBIO_TRIM_CSV, sep=""), sep=",", row.names=FALSE)
write.csv(tipo_cambio_anual, file=paste(DATA_TIPO_CAMBIO,
TIPO_CAMBIO_ANUAL_CSV, sep=""), sep=",", row.names=FALSE)
|
437e7cba4e4420da82efebc66c8eab847977e801 | 68562f46424bf312d5fe070990243ae03ed1454e | /R/binarizeSNPs.R | 6ac42179eea1795a977fc4b2242080d8f589b9f1 | [
"Apache-2.0"
] | permissive | ANTsX/ANTsR | edb12114bc3d143c59ebd3947301de705ec51b63 | 8deb4d897fdb295a0213ca59e3bf1846f62ce99a | refs/heads/master | 2023-06-24T14:48:05.362501 | 2023-06-24T11:15:10 | 2023-06-24T11:15:10 | 5,782,626 | 86 | 32 | Apache-2.0 | 2023-06-17T12:15:50 | 2012-09-12T16:28:03 | R | UTF-8 | R | false | false | 836 | r | binarizeSNPs.R | .binarizeSNPs <- function(snps) {
if (nargs() == 0) {
print("Usage: x_b<-.binarizeSNPs( x ) ")
return(1)
}
nrep <- 2
binsnps <- (matrix(rep(NA, nrow(snps) * ncol(snps) * nrep), nrow = nrow(snps),
ncol = ncol(snps) * nrep))
binsnpsdf <- data.frame(matrix(rep(NA, nrow(snps) * ncol(snps) * nrep), nrow = nrow(snps),
ncol = ncol(snps) * nrep))
inds1 <- seq(1, (ncol(binsnps)), by = 2)
inds2 <- inds1 + 1
binsnps[, inds1] <- snps
binsnps[, inds2] <- snps
ww <- (binsnps[, inds1] == 2)
binsnps[, inds1][ww] <- 0
binsnps[, inds1][!ww] <- 1
ww <- (binsnps[, inds2] == 1)
binsnps[, inds2][ww] <- 0
binsnps[, inds2][!ww] <- 1
osnps <- data.frame(binsnps)
names(osnps)[inds1] <- paste(names(snps), ".1", sep = "")
names(osnps)[inds2] <- paste(names(snps), ".2", sep = "")
return(osnps)
}
|
fa22f2d7609387ab14bbc30fddad121aed0dd640 | 55af8a760323f6a8f5e3542e404f23e87ebbbdca | /render_rmd.R | d813da084e8aa63f1e5195cc52fea229c4f0bbbf | [] | no_license | petermeissner/ecprwsmt16adcr | 79b8227f762d4960abe2a3417e76fc48032af28d | 03dc54c2cfa34eb5804bfec2ea29b9e24ee40ff7 | refs/heads/master | 2021-01-10T05:33:22.315542 | 2016-03-06T13:08:27 | 2016-03-06T13:08:27 | 53,254,841 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,010 | r | render_rmd.R | #!/usr/bin/env Rscript
# necessary package
library(stringr)
# extracting argument
file <- commandArgs()[grep("--args", commandArgs())+1]
# worker-function
render_rmd <- function(file, try=FALSE){
if( length(file) == 0 ) return("nothing to do")
if( length(file) > 1 ){
lapply(file, render_rmd, try=try )
}else{
file_r <- str_replace(file, ".Rmd", ".R")
if( try==TRUE ){
try(rmarkdown::render(file, "all", encoding="UTF-8") )
try( knitr::purl(input=file, output=file_r) )
}else{
rmarkdown::render(file, "all", encoding="UTF-8")
knitr::purl(input=file, output=file_r)
}
}
}
# doing-duty-to-do
if( length(file) == 0 ){
rmd <- list.files("course", pattern = ".Rmd$", recursive = TRUE, full.names = TRUE)
pdf <- str_replace(rmd, "Rmd$", "pdf")
rmd_mtime <- file.info(rmd)$mtime
pdf_mtime <- file.info(pdf)$mtime
iffer <- is.na(rmd_mtime > pdf_mtime) | rmd_mtime > pdf_mtime
render_rmd(rmd[iffer], try=FALSE)
}else{
render_rmd(file)
}
|
22c40c554ed648a5fd46fa5f3e087f124c24d2f9 | f33eb8d7112fcbc3ca78e0fa1eeb613364bc430c | /R/Orify.R | ae105c00f3eb08c63d06d65e53e357e6b1840abc | [] | no_license | paulhendricks/functools | d49d084b903735d95ec3624186d026cc1afc07fd | 63eb95d538864a953f13cb612359dfa9a8e365d5 | refs/heads/master | 2021-01-10T05:25:45.035861 | 2017-07-01T20:22:57 | 2017-07-01T20:22:57 | 36,077,427 | 13 | 1 | null | null | null | null | UTF-8 | R | false | false | 827 | r | Orify.R | #' Predicate function operator that creates new predicate functions linked by the || operator.
#'
#' @param ... n functions to apply in order from left to right
#' @return A predicate function linked by the || operator.
#' @family predicate function operators
#' @seealso \code{\link{Andify}} to create new predicate functions linked by the && operator.
#' @examples
#' # Examples
#' is_character_or_factor <- Orify(is.character, is.factor)
#' is_character_or_factor(letters) # TRUE
#' is_character_or_factor(factor(state.abb)) # TRUE
#' is_character_or_factor(1:100) # FALSE
#' @export
Orify <- function(...) {
fs <- lapply(list(...), match.fun)
first <- fs[[1]]
rest <- fs[-1]
function(...) {
out <- first(...)
for (f in rest) {
if (out) return(TRUE)
out <- `||`(out, f(...))
}
out
}
}
|
c4c210ac3b755d8f9d55802c231c65142bbe466a | b8be9f57f05c5e279ff38ff1d0bb7efeb26b4308 | /man/consumption_vis.Rd | ab7b91724b4ed28185329ba57d2c6a0670c63da3 | [] | no_license | Ozeidi/water | aa96f8e451b008f0224da000e8e389a6df492273 | 9aa62d17b217d00346fac920e2d02c66798c2786 | refs/heads/master | 2020-06-17T08:02:26.012796 | 2019-07-08T16:36:39 | 2019-07-08T16:36:39 | 195,854,634 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,167 | rd | consumption_vis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/01.Visualisation.R
\name{consumption_vis}
\alias{consumption_vis}
\title{consumption_vis}
\usage{
consumption_vis(df, contracts_list = NA, scale = "discrete",
fixed_scale = TRUE, file_name = "p", path = getwd(),
output_format = c("png", "pdf"))
}
\arguments{
\item{df}{xts object of the meter consumption}
\item{contracts_list}{list of contracts to be investigated}
\item{scale}{type of scale used for the heatmap, continues or discrete}
\item{fixed_scale}{Boolean, to indicate whether the scale should be fixed for all the plots or change dyanmically per plot. We recommend keeping the scale fixed to enable comparison over different contracts. Default is TRUE.}
\item{file_name}{name of the file to be used for outputing the plotting results}
\item{path}{name of the path where plots will be outputed}
\item{output_format}{png or pdf format}
}
\value{
pdf of png output with the results
}
\description{
Generate diagnostic plots to analyse the consumption patterns
}
\examples{
\dontrun{consumption_vis(contract_lst, contract_list= NA, scale= 'discrete', fixed_scale= TRUE)}
}
|
4631c101222cfa8da3090c1ffd88888769477fff | b2318f37b16834b822fd946b0586737cfeb45090 | /Clean data EC & PS - no join.R | 55913d067dc87567eae6f2cc7d30e3707b65e72d | [] | no_license | philipjxg/Scrape-Goats | fa0d89abbf54c656f480ab518f48d3d9a062d31c | 0a5dc61aa70431773f9c4d84a378fcfaea962f9c | refs/heads/master | 2020-04-06T06:59:42.681078 | 2016-08-23T13:41:39 | 2016-08-23T13:41:39 | 65,821,193 | 0 | 2 | null | 2016-08-19T13:55:25 | 2016-08-16T13:08:25 | null | UTF-8 | R | false | false | 7,025 | r | Clean data EC & PS - no join.R | library("readr")
library("purrr")
library("dplyr")
library("tidyr")
library("ggplot2")
library("stringr")
library("plyr")
##########################################################################################
##### Political Science ##################################################################
##########################################################################################
raw.data.PS=read.csv("C:\\Users\\PhilipJunXian\\OneDrive\\Polit\\Master\\Social Data Science\\Opgave\\staku.csv")
#########################################################
### Clean the data for NA og hyperlink
data.PS = raw.data.PS %>%
filter( !is.na(o_kar)==TRUE) %>%
select(-X, -id, -o_gns, -r_gns, -hyperlink)
#########################################################
### clean Ej mødt, Ikke bestået, Syg
### create aggregated grade variable
data.PS$s_kar = ifelse(is.na(data.PS$r_kar)==TRUE,data.PS$o_kar, data.PS$o_kar + data.PS$r_kar )
data.PS$kar.m = as.numeric(levels(data.PS$kar))[data.PS$kar]
data.PS=data.PS %>%
filter(!is.na(kar.m)==TRUE)
data.PS$kar.m = NULL
#########################################################
### create semester and year column
data.PS = data.PS %>%
separate(sem, c("semester", "year"), 7)
data.PS = as.data.frame(lapply(data.PS,function(x)
if(is.character(x)|is.factor(x)) gsub("Winter-","Fall",x) else x))
data.PS = as.data.frame(lapply(data.PS,function(x)
if(is.character(x)|is.factor(x)) gsub("Summer-","Spring",x) else x))
data.PS$semester = ifelse(is.na(data.PS$blok)==TRUE, as.character(data.PS$semester), "Summer" )
data.PS$blok = NULL
#########################################################
### correct grades and numeric variables
as.numeric(data.PS$o_kar)
as.numeric(data.PS$r_kar)
as.numeric(data.PS$s_kar)
revalue(data.PS$kar, c("00" = "0", "02" = "2", "-3" = "-3")) -> data.PS$kar
data.PS$kar = data.PS$kar %>%
as.character() %>%
as.numeric()
#########################################################
#########################################################
#### preparing data frame for plotting graph
# select variables
data.PS.graf = data.PS %>%
select(year, semester, name, kar, s_kar)
data.PS.graf$product = data.PS.graf$s_kar * data.PS.graf$kar
data.PS.graf1 = aggregate(data.PS.graf$product, by=list(data.PS.graf$year, data.PS.graf$semester), FUN=sum, na.rm=TRUE)
colnames(data.PS.graf1)[3] = "sum.product"
data.PS.graf2 = aggregate(data.PS.graf$s_kar, by=list(data.PS.graf$year,data.PS.graf$semester), FUN=sum, na.rm=TRUE)
colnames(data.PS.graf2)[3] = "count"
data.PS.graf.plot = left_join(data.PS.graf1, data.PS.graf2)
colnames(data.PS.graf.plot)[1] = "year"
colnames(data.PS.graf.plot)[2] = "semester"
data.PS.graf.plot$average = data.PS.graf.plot$sum.product / data.PS.graf.plot$count
### HUSK - fjern summer: summer 2014 og 2015 5 - Ikke retvisende: 1 og hhv. 5 fag
# data.PS.graf.plot1 = data.PS.graf.plot[!(data.PS.graf.plot$semester=="Summer" ),]
##### Political Science SLUT #############################################################
##########################################################################################
##### Economics ##########################################################################
##########################################################################################
raw.data.EC=read.csv("C:\\Users\\PhilipJunXian\\OneDrive\\Polit\\Master\\Social Data Science\\Opgave\\polit.csv")
#########################################################
### Clean the data for NA og hyperlink
data.EC = raw.data.EC %>%
filter( !is.na(o_kar)==TRUE) %>%
select(-X, -id, -o_gns, -r_gns, -hyperlink)
#########################################################
### clean Ej mødt, Ikke bestået, Syg
### create aggregated grade variable
data.EC$s_kar = ifelse(is.na(data.EC$r_kar)==TRUE,data.EC$o_kar, data.EC$o_kar + data.EC$r_kar )
data.EC$kar.m = as.numeric(levels(data.EC$kar))[data.EC$kar]
data.EC=data.EC %>%
filter(!is.na(kar.m)==TRUE)
data.EC$kar.m = NULL
#########################################################
### create semester and year column
data.EC = data.EC %>%
separate(sem, c("semester", "year"), 7)
data.EC = as.data.frame(lapply(data.EC,function(x)
if(is.character(x)|is.factor(x)) gsub("Winter-","Fall",x) else x))
data.EC = as.data.frame(lapply(data.EC,function(x)
if(is.character(x)|is.factor(x)) gsub("Summer-","Spring",x) else x))
data.EC$semester = ifelse(is.na(data.EC$blok)==TRUE, as.character(data.EC$semester), "Summer" )
data.EC$blok = NULL
#########################################################
### correct grades and numeric variables
as.numeric(data.EC$o_kar)
as.numeric(data.EC$r_kar)
as.numeric(data.EC$s_kar)
revalue(data.EC$kar, c("00" = "0", "02" = "2", "-3" = "-3")) -> data.EC$kar
data.EC$kar = data.EC$kar %>%
as.character() %>%
as.numeric()
#########################################################
#########################################################
#### preparing data frame for plotting graph
# select variables
data.EC.graf = data.EC %>%
select(year, semester, name, kar, s_kar)
data.EC.graf$product = data.EC.graf$s_kar * data.EC.graf$kar
data.EC.graf1 = aggregate(data.EC.graf$product, by=list(data.EC.graf$year, data.EC.graf$semester), FUN=sum, na.rm=TRUE)
colnames(data.EC.graf1)[3] = "sum.product"
data.EC.graf2 = aggregate(data.EC.graf$s_kar, by=list(data.EC.graf$year,data.EC.graf$semester), FUN=sum, na.rm=TRUE)
colnames(data.EC.graf2)[3] = "count"
data.EC.graf.plot = left_join(data.EC.graf1, data.EC.graf2)
colnames(data.EC.graf.plot)[1] = "year"
colnames(data.EC.graf.plot)[2] = "semester"
data.EC.graf.plot$average = data.EC.graf.plot$sum.product / data.EC.graf.plot$count
### HUSK - fjern 2011, spring - ej retvisende: 1 fag
### HUSK - fjern 2016, summer - endnu ej realiserede karaktere: 2 fag
# data.EC.graf.plot1 = data.EC.graf.plot[!(data.EC.graf.plot$year=="2011" & data.EC.graf.plot$semester=="Spring"),]
# data.EC.graf.plot1 = data.EC.graf.plot1[!(data.EC.graf.plot1$year=="2016" & data.EC.graf.plot1$semester=="Summer"),]
##### Economics SLUT #####################################################################
##########################################################################################
##########################################################################################
##########################################################################################
### Combining the EC and PS data frames
data.EC$study = "Economics"
data.PS$study = "Political Science"
data.Comb = rbind(data.EC, data.PS)
# data.EC.graf.plot$study = "Economics"
# data.PS.graf.plot$study = "Political Science"
#
# data.Comb.graf.plot = rbind(data.EC.graf.plot, data.PS.graf.plot)
###
|
1899476b1578f3bb9bf2a6da96d3393121618918 | f0b63c76d1221a7a859b2692327bbc809b9015f9 | /AES_R/AES+SHA1_sign.R | 779e3a1331b58a6d4045ece0d385805c7f5f9279 | [] | no_license | K-subin/R_Cipher | df5aed824da80cb513cb5f0fdd0d248f2cbeb729 | 7e479eb348a08217d02200fbe8f454551dfbb7a3 | refs/heads/main | 2023-02-19T21:38:44.854572 | 2021-01-18T09:06:54 | 2021-01-18T09:06:54 | 312,599,342 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 893 | r | AES+SHA1_sign.R | #=========================================
# AES (using digest built-in function)
#=========================================
library("digest")
msg_chr <- "Hello! Our Team name is 4. This is LEA Encrypt !"
msg_raw <- charToRaw(msg_chr)
msg_raw
#AES Algoritm Setup
aes_key <- as.raw(0:15)
aes <- AES(aes_key, mode = "ECB")
#Call AES Encrypt
cipher_raw <- aes$encrypt(msg_raw)
#Generate HMAC Tag
hmac_key <- as.raw(16:31)
hmac_tag <- hmac(hmac_key, msg_chr, "sha1")
hmac_tag
### ciphertext packet = ( cipher_raw, hmac_tag ) ---> [Bob]
aes_key_bob <- as.raw(0:15)
aes_bob <- AES(aes_key_bob, mode = "ECB")
msg_bob_raw <- aes_bob$decrypt(cipher_raw, raw=TRUE)
msg_bob_chr <- rawToChar(msg_bob_raw)
msg_bob_chr
hmac_bob_key <- as.raw(16:31)
hmac_bob_tag <- hmac(hmac_bob_key, msg_bob_chr, "sha1")
hmac_bob_tag
stopifnot(identical(hmac_tag, hmac_bob_tag))
|
486f335a5b546b8b985a5665625f62a0a72ecb3d | dcc2229cce6a8737f3a8d8e367547a365fcf066f | /man/remove_edges.Rd | 68a99d1546cdb7940a7fd951531b7147f5fbea11 | [
"Apache-2.0"
] | permissive | synalogik-mike/arango-driver | 335090183c67f0f6ca9d7f84fc540072a1f58a97 | 488e8e128ea94131f276a20b1ee605d232ad171d | refs/heads/master | 2022-12-25T04:18:20.366591 | 2019-07-08T06:58:42 | 2019-07-08T06:58:42 | 295,026,977 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 527 | rd | remove_edges.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/arango_graph.R
\name{remove_edges}
\alias{remove_edges}
\title{Edges removal}
\usage{
remove_edges(.graph, listOfEdges)
}
\arguments{
\item{.graph}{the graph affected by the deletion}
\item{listOfEdges}{a list of lists containing edges information}
}
\value{
the ArangoGraph object of the structure affected by the change
}
\description{
Removes a collection of elements from the given graph.
}
\author{
Gabriele Galatolo, g.galatolo(at)kode.srl
}
|
95a0165aa344c94125bc5b35abef4e50d8819dcf | 9bf1f830525f8aa8b2f56a95d9f2bf762d773dd3 | /run_analysis.R | f2e9a321f99038d839f66acc3398a34673099b5d | [] | no_license | tynesjo/getcleandata | 0443d08d91beb99f2c18e98d929bb81af358a036 | 77da5ff8e36845b29d7c259b817b993fec15ae6e | refs/heads/master | 2021-01-19T09:57:30.654519 | 2015-06-21T22:54:56 | 2015-06-21T22:54:56 | 37,828,433 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,316 | r | run_analysis.R | # Script to Acquire and Organize Wearable Tech Machine Learning Data
# ==============================================================================
# Settings.
# ------------------------------------------------------------------------------
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
dat_dir <- "./dat" # Folder in which to save the data.
zip_file <- "dat.zip" # Name of the zip file once downloaded.
colnames_dictionary <- c( # Translations for column names
"Mag"= "Magnitude", "^t"="time", "^f"="frequency", "Acc"="Accelerometer",
"Gyro"="Gyroscope", "BodyBody"="Body"
)
# Packages.
# ------------------------------------------------------------------------------
require(magrittr) # To use forward-pipes ("%>%" etc.).
require(plyr) # To create the tidy data set.
# Step 1: Acquire and Combine the Data.
# ------------------------------------------------------------------------------
DataGetUnzip <- . %>% { # Downloads and unzips the data file.
# Create data directory if not existing.
if(!file.exists(dat_dir)) {dir.create(dat_dir)}
# Dowload data file if not existing (use "curl" method to handle https)
dest <- file.path(dat_dir, zip_file)
if(!file.exists(dest)) {download.file(url=url, destfile=dest, method="curl")}
# Unzip data file.
unzip(zipfile=dest, exdir=dat_dir) # Unzip.
}
ReadT <- function(sub_dir, file, path=file.path(dat_dir, "UCI HAR Dataset")) {
# Reads a .txt file given a subdirectory path and file name using read.table
file.path(path, sub_dir, paste0(file, ".txt")) %>%
read.table(header=FALSE, stringsAsFactors=FALSE)
}
feat_names <- ReadT(".", "features")$V2 # Feature names from file.
d <- cbind( # Combine the data into one data frame.
# Subject data (combined training and testing).
rbind(ReadT("train", "subject_train"), ReadT("test", "subject_test")),
# Activity data (combined training and testing)
rbind(ReadT("test", "y_test"), ReadT("train", "y_train")),
# Features data.
rbind(ReadT("test", "X_test"), ReadT("train", "X_train"))
) %T>% # Finally assign column names.
{colnames(.) <- c("subject", "activity", feat_names)}
# Step 2: Extract only standard deviation and mean related features.
# ------------------------------------------------------------------------------
d %<>% {.[, c(1:2, grep("mean\\(|std", colnames(.)))]}
# Step 3: Appropriately label the activities using factors.
# ------------------------------------------------------------------------------
activity_factors <- ReadT(".", "activity_labels")
d$activity %<>% factor(levels=activity_factors[,1], labels=activity_factors[,2])
# Step 4: Make Column Names
# ------------------------------------------------------------------------------
v <- colnames_dictionary
for(i in 1:length(v)) {
colnames(d) %<>% gsub(names(v)[[i]], v[[i]] %>% unname, .)
}
# Clean up the column names.
{colnames(d) %<>% gsub("-", "_", .) %>% gsub("\\(\\)", "", .)}
rm(v)
# Step 5: Create a Tidy Data Set.
# ------------------------------------------------------------------------------
TidyCreate <- . %>% { # Creates a tidy data set from "d" and saves as CSV file.
aggregate(. ~ activity + subject, d, mean) %>%
{.[order(.$subject, .$activity),]} %>%
write.table("tidy_data.txt", row.names=F)
}
|
d28d5c75b49e8a9254685d119a92336855c169d2 | ca2802548f8a961ca6e0fe57d7906f912eb3f221 | /man/order_matchdata.Rd | b68ac5a0c884fa93a39ef30f82c4d2ef76a4589c | [] | no_license | O1sims/FootballStats | 4dca2ebb135922f5ca3beafa7c3206022faea490 | 266d476f5f15d57960f8715d33a766f8fa091daa | refs/heads/master | 2023-06-08T08:48:39.646688 | 2021-06-29T09:07:44 | 2021-06-29T09:07:44 | 381,303,716 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 670 | rd | order_matchdata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classify_utils.R
\name{order_matchdata}
\alias{order_matchdata}
\title{Order Match Dataset}
\usage{
order_matchdata(matchData, formatter = "\%d.\%m.\%Y")
}
\arguments{
\item{matchData}{A data frame that contains rows of single matches
that have been played between two teams.}
\item{formatter}{A string that defines how the dates coming in should
look, i.e. allow for any kind of standard date formats.}
}
\value{
A data frame that has been ordered by date.
}
\description{
A function that takes an arbitrary data frame
consisting of match data and orders it by date in ascending
order.
}
|
089e416e07582b550d272428c648c8e12515782a | d6a831acab0f09e1292de90f33cbad86fe6ab0e9 | /code/6_R2R_R1_grouped.R | ee9b00d823bea3ff6dbf86f8dbe36e3769b009c0 | [
"CC0-1.0"
] | permissive | yangclaraliu/covid_vac_africa | 1a42f79205ab543d172fbafd13f56ab896a24428 | 5400d14f43cfdd73251621c4de0107ec73509a8c | refs/heads/main | 2023-04-10T20:06:44.570204 | 2023-02-07T21:36:14 | 2023-02-07T21:36:14 | 426,681,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,552 | r | 6_R2R_R1_grouped.R | merge_novac_grouped <- function(tmp){
require(magrittr)
tmp$pfizer$non_fatal %<>%
left_join(res_novac_grouped$pfizer[[1]] %>%
rename(novac = value),
by = c("name", "population", "group","year"))
tmp$pfizer$fatal %<>%
left_join(res_novac_grouped$pfizer[[2]],
by = c("name", "population", "group", "year"))
tmp$az$non_fatal %<>%
left_join(res_novac_grouped$az[[1]] %>%
rename(novac = value),
by = c("name", "population", "group","year"))
tmp$az$fatal %<>%
left_join(res_novac_grouped$az[[2]],
by = c("name", "population", "group", "year"))
return(tmp)
}
# load data
params_grid_az_70_grouped <- readRDS("~/GitHub/covid_vac_africa/data/intermediate/params_grid_az_70_grouped.rds")
params_grid_pfizer_70_grouped <- readRDS("~/GitHub/covid_vac_africa/data/intermediate/params_grid_pfizer_70_grouped.rds")
res_novac_grouped <- readRDS("~/GitHub/covid_vac_africa/data/intermediate/res_novac_grouped.rds")
# organise results
res_grouped <- organise_outcomes(tmp_pfizer = params_grid_pfizer_70_grouped,
tmp_az = params_grid_az_70_grouped)
res_grouped_merged <- merge_novac_grouped(res_grouped)
# write tables
input_fatal_grouped <- list()
res_grouped_merged$pfizer$fatal %>%
mutate(Type = "pfizer") %>%
bind_rows(res_grouped_merged$az$fatal %>%
mutate(Type = "az")) %>%
left_join(fitted_table[,c("loc", "iso3c")] %>%
rename(population = loc),
by = c("population", "iso3c")
) %>%
rename(epi_id = scenario_id,
country = iso3c,
age = group,
deaths = value) %>%
mutate(age = factor(age,
levels = unique(res_grouped_merged$pfizer$fatal$group),
labels = 1:16),
age = as.numeric(age)) %>%
ungroup -> input_fatal_grouped[["all"]]
c(input_fatal_grouped,
input_fatal_grouped$all %>%
dplyr::select(epi_id, country, year, age, deaths, Type) %>%
group_by(Type) %>% group_split() %>%
setNames(c("az","pfizer")) %>%
map(dplyr::select, -Type) %>%
map(data.table)) -> input_fatal_grouped
input_fatal_grouped$all %>%
dplyr::select(epi_id, country, year, age, novac) %>%
rename(deaths = novac) %>%
distinct() %>%
data.table() -> input_fatal_grouped[["novac"]]
input_non_fatal_grouped <- list()
res_grouped_merged$pfizer$non_fatal %>%
mutate(Type = "pfizer") %>%
bind_rows(res_grouped_merged$az$non_fatal %>%
mutate(Type = "az")) %>%
filter(!grepl("_p_", name)) %>%
dplyr::select(-novac) %>%
pivot_wider(names_from = name, values_from = value) %>%
rename(epi_id = scenario_id,
country = iso3c,
icu = critical_i_all,
non_icu = severe_i_all) %>%
ungroup() -> input_non_fatal_grouped[["all"]]
c(input_non_fatal_grouped,
input_non_fatal_grouped$all %>%
dplyr::select(epi_id, country, group, year, cases, non_icu, icu, Type) %>%
group_by(Type) %>% group_split() %>%
setNames(c("az","pfizer")) %>%
map(dplyr::select, -Type)%>%
map(data.table)) -> input_non_fatal_grouped
res_grouped_merged$pfizer$non_fatal %>%
mutate(Type = "pfizer") %>%
bind_rows(res_grouped_merged$az$non_fatal %>%
mutate(Type = "az")) %>%
filter(!grepl("_p_", name)) %>%
dplyr::select(-value, -Type) %>%
distinct() %>%
pivot_wider(names_from = name, values_from = novac) %>%
rename(epi_id = scenario_id,
country = iso3c,
icu = critical_i_all,
non_icu = severe_i_all) %>%
ungroup() %>%
data.table() -> input_non_fatal_grouped[["novac"]]
input_non_fatal_grouped$novac |>
dplyr::select(colnames(input_non_fatal_grouped$az)) -> input_non_fatal_grouped$novac
#
#testing
# epi_deaths = input_fatal_grouped$az
# epi_cases = input_non_fatal_grouped$az
# econ_scens = econ_scens[1,]
# LT = UNLT
# POP = UNPOP
# GDPPC = GDPPC
# GNIPC = GNIPC
# pre calculate life expectancy as now we only care about one type of economic
# scenario
dLE <- vector("list", length = nrow(econ_scens))
for (s in 1:nrow(econ_scens)){
# calculate discount life expectancy for given discount rate, smr and country
dLE[[s]] <- cov_dLE(
r = econ_scens[[s,"discount_rate"]],
smr = econ_scens[[s,"smr"]],
selectCountries = unique(epi_deaths[,country]),
LT = UNLT,
POP = UNPOP
)
dLE[[s]][, econ_id := s]
}
dLE <- rbindlist(dLE)
# update_functions
cov_econ_outcomes_grouped <- function(
epi_deaths, # data table of age-specific deaths
epi_cases, # data table of cases, non_icu and icu admission (not age-specific)
econ_scens, # econ scenarios specifying discount rate, smr
LT, # UNWPP life tables
POP, # UNWPP population estimates
GDPPC, # World Bank country GDP per capita 2020
GNIPC # World Bank country GNI per capita 2020
){
require(data.table)
# merge deaths and vsl data
vsl <- cov_VSL(GNIPC = GNIPC)
ylls <- vsl[ # merge deaths and vsl data
epi_deaths,
on = .(country == country)
]
# merge gdp per capita data
ylls <- GDPPC[
ylls,
on = .(country == country)
]
# merge dLE data and calculated ylls, vsl and hc
first_year <- 2021 # min(epi_deaths$year) # reference year for discounting
ylls <- dLE[
ylls,
on = .(country = country, AgeBand = age),
allow.cartesian = TRUE
][
,
.(
ylls = sum(deaths * d_LEx *
1 / (1 + econ_scens[[s,"discount_rate"]])^(year - first_year)
),
vsl = sum(deaths * vsl),
human_capital = sum(d_LEx * GDPPC_2020_USD),
GDPPC_2020_USD = mean(GDPPC_2020_USD)
),
by = .(epi_id, econ_id, country, AgeBand) # collapse age bands
]
# ylds based on number of cases / hospitalizations
unit_ylds <- cov_unit_ylds()
first_year <- 2021
ylds <- epi_cases[
,
as.list(econ_scens[,c("econ_id","discount_rate")]), # combine with different econ scenarios
by=epi_cases
][
,
.(
ylds = sum(
(
(cases * unit_ylds$per_case) +
(non_icu * unit_ylds$per_non_icu_case) +
(icu * unit_ylds$per_icu_case)
) * 1 / (1 + discount_rate)^(year - first_year) # discounting
)
),
by = .(epi_id, econ_id, country, group)
]
ylls |>
mutate(group = factor(AgeBand,
levels = 1:16,
labels = unique(ylds$group))) |>
dplyr::select(-AgeBand) -> ylls
# merge ylls with ylds and calculate dalys
out <- ylds[
ylls,
on = .(epi_id, econ_id, country, group)
][, dalys := ylls + ylds]
return(out)
}
cov_econ_outcomes_grouped(
epi_deaths = input_fatal_grouped$az,
epi_cases = input_non_fatal_grouped$az,
econ_scens = econ_scens[1,],
LT = UNLT,
POP = UNPOP,
GDPPC = GDPPC,
GNIPC = GNIPC) -> out_az_grouped
cov_econ_outcomes_grouped(
epi_deaths = input_fatal_grouped$pfizer,
epi_cases = input_non_fatal_grouped$pfizer,
econ_scens = econ_scens[1,],
LT = UNLT,
POP = UNPOP,
GDPPC = GDPPC,
GNIPC = GNIPC) -> out_pfizer_grouped
cov_econ_outcomes_grouped(
epi_deaths = input_fatal_grouped$novac,
epi_cases = input_non_fatal_grouped$novac,
econ_scens = econ_scens[1,],
LT = UNLT,
POP = UNPOP,
GDPPC = GDPPC,
GNIPC = GNIPC) -> out_novac_grouped
# when medium is better than fast
pop_bycountry <- pop |>
group_by(iso3c) |>
summarise(tot = sum(m,f)*1000) |>
filter(iso3c %in% fitted_table$iso3c)
ICER$az_03 |>
filter(econ_id == 1) |>
mutate(Type = "az") |>
bind_rows(ICER$pf_03 |>
filter(econ_id == 1) |>
mutate(Type = "pf")) |>
dplyr::select(date_start, ICER_scaled, iso3c, scenario, Type) |>
left_join(pop_bycountry, by = "iso3c") |>
mutate(ICER_cat = case_when(ICER_scaled < 0.1 ~ 1,
ICER_scaled >= 0.1 & ICER_scaled < 0.3 ~ 2,
ICER_scaled >= 0.3 & ICER_scaled < 0.5 ~ 3,
ICER_scaled >= 0.5 & ICER_scaled < 1 ~ 4,
ICER_scaled >= 1 ~ 5)) |>
# select(-ICER_scaled) |>
dplyr::select(-ICER_cat) |>
mutate(ICER_scaled = round(ICER_scaled, 2)) |>
pivot_wider(names_from = scenario, values_from = ICER_scaled) |>
mutate(check = case_when(medium < fast ~ "1",
medium == fast ~ "2",
medium > fast ~ "3")) -> bar_met
# plot results
out_az_grouped |>
left_join(ms_scenarios |>
rownames_to_column(var = "epi_id"),
by = "epi_id") |>
mutate(Type = "az") |>
bind_rows(out_pfizer_grouped |>
left_join(ms_scenarios |>
rownames_to_column(var = "epi_id"),
by = "epi_id") |>
mutate(Type = "pf")) |>
mutate(date_start = ymd(date_start),
older = if_else(group %in% c("60-64",
"65-69",
"70-74",
"75+"),
T, F)) |>
group_by(epi_id, country, scenario, date_start, older, Type) |>
summarise(dalys = sum(dalys)) |>
group_by(epi_id, country, scenario, date_start, Type) |>
mutate(dalys_tot = sum(dalys),
dalys_prop = dalys/dalys_tot,
scenario = factor(scenario,
levels = c("slow", "medium", "fast"))) |>
rename(iso3c = country) |>
right_join(bar_met[,c("date_start", "iso3c", "check", "Type")],
by = c("date_start", "iso3c", "Type")) |>
filter(older == T, scenario != "slow") |>
ungroup() |>
dplyr::select(-dalys, -dalys_tot, -epi_id) |>
pivot_wider(names_from = scenario, values_from = dalys_prop) |>
data.table() -> data_test
t1 <- t.test(data_test[check == "1" & Type == "az", ]$fast,
data_test[check == "1" & Type == "az", ]$medium,
paired = T,
alternative = "two.sided")
t2 <- t.test(data_test[check == "3" & Type == "az", ]$fast,
data_test[check == "3" & Type == "az", ]$medium,
paired = T,
alternative = "two.sided")
t3 <- t.test(data_test[check == "1" & Type == "pf", ]$fast,
data_test[check == "1" & Type == "pf", ]$medium,
paired = T,
alternative = "two.sided")
t4 <- t.test(data_test[check == "3" & Type == "pf", ]$fast,
data_test[check == "3" & Type == "pf", ]$medium,
paired = T,
alternative = "two.sided")
data.frame(mean = c(t1$estimate,
t2$estimate,
t3$estimate,
t4$estimate),
LL = c(t1$conf.int[1],
t2$conf.int[1],
t3$conf.int[1],
t4$conf.int[1]),
UL = c(t1$conf.int[2],
t2$conf.int[2],
t3$conf.int[2],
t4$conf.int[2]),
lab = c("ICER_medium < ICER_fast",
"ICER_medium > ICER_fast"),
Type = c("Viral vector vaccine",
"Viral vector vaccine",
"mRNA vaccine",
"mRNA vaccine")) |>
ggplot() +
geom_point(aes(x = lab, y = mean), size = 3) +
geom_segment(aes(x = lab, xend = lab,
y = LL, yend = UL), size = 1.5) +
theme_bw() +
custom_theme +
labs(x = "",
y = expression("paDALY"["fast, 60+"] - "paDALY"["medium, 60+"])) +
scale_x_discrete(labels = parse(text = c("ICER[medium] < ICER[fast]",
"ICER[medium] > ICER[fast]")))+
facet_wrap(~Type, ncol = 1, scales = "free")
ggsave("figs/R2R_R1/ICER_grouped.png", width = 8, height = 12)
out_az_grouped |>
left_join(ms_scenarios |>
rownames_to_column(var = "epi_id"),
by = "epi_id") |>
mutate(Type = "az") |>
bind_rows(out_pfizer_grouped |>
left_join(ms_scenarios |>
rownames_to_column(var = "epi_id"),
by = "epi_id") |>
mutate(Type = "pf")) |>
mutate(date_start = ymd(date_start),
older = if_else(group %in% c("60-64",
"65-69",
"70-74",
"75+"),
T, F)) |>
group_by(epi_id, country, scenario, date_start, older, Type) |>
summarise(dalys = sum(dalys)) |>
group_by(epi_id, country, scenario, date_start, Type) |>
mutate(dalys_tot = sum(dalys),
dalys_prop = dalys/dalys_tot,
scenario = factor(scenario,
levels = c("slow", "medium", "fast"))) |>
rename(iso3c = country) |>
ungroup() |>
filter(older == T) |>
dplyr::select(Type, iso3c, scenario, dalys_prop, date_start) |>
pivot_wider(names_from = scenario,
values_from = dalys_prop) |>
mutate(fast = round(fast, 2),
medium = round(medium, 2),
diff_paDALYs = case_when(fast > medium ~ "paDALYs: fast > medium",
fast < medium ~ "paDALYs: fast < medium",
fast == medium ~ "paDALYs: fast == medium")) |>
right_join(bar_met[,c("date_start", "iso3c", "check", "Type")],
by = c("date_start", "iso3c", "Type")) |>
rename(diff_ICERs = check) |>
mutate(diff_ICERs = case_when(diff_ICERs == 1 ~ "ICER: fast > medium",
diff_ICERs == 2 ~ "ICER: fast == medium",
diff_ICERs == 3 ~ "ICER: fast < medium")) |>
group_by(diff_paDALYs, diff_ICERs, Type) |>
tally() -> byTable
byTable |>
filter(Type == "pf") |>
filter(diff_ICERs != "ICER: fast == medium",
diff_paDALYs != "paDALYs: fast == medium") |>
pivot_wider(names_from = diff_ICERs,
values_from = n) %>%
# replace(is.na(.), 0) |>
ungroup() %>%
as.data.frame() %>%
set_rownames(.[,1]) |>
dplyr::select(3:4) |> as.matrix() |> oddsratio()
byTable |>
filter(Type == "az") |>
filter(diff_ICERs != "ICER: fast == medium",
diff_paDALYs != "paDALYs: fast == medium") |>
pivot_wider(names_from = diff_ICERs,
values_from = n) |>
ungroup() %>%
as.data.frame() %>%
set_rownames(.[,1]) |>
dplyr::select(3:4) |> as.matrix() |> oddsratio()
|
2de291c4a2a267cebdc9a493946511a679c539f5 | 1ec5ef3b45a275980230031720f98d7878dc5e97 | /plot1.R | 34dd47890a78e95f41720823c2d4af8c7b7876b8 | [] | no_license | courseraRR/ExData_Plotting1 | 67bd55e62590d9b3c3e1459c056ecffb9bd8353b | 99eff103c9cfc1910f515b8371aea7b14ba4a487 | refs/heads/master | 2021-01-14T14:27:52.038785 | 2014-05-10T22:57:11 | 2014-05-10T22:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,029 | r | plot1.R |
#' Script to plot Global Active Power frequence dist
#if necessary download and unzip the data file to current directory
if(!file.exists("household_power_consumption.txt")){
require(utils)
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",
"household_power_consumption.zip", method="curl") # download
unzip("household_power_consumption.zip" )
}
#set the path to the data file
data.file.name<-"./household_power_consumption.txt"
#read the file
df<-read.csv(data.file.name, sep=";", stringsAsFactors=FALSE, na.strings="?")
#subset to the desired dates
df<-subset(df, df$Date=="1/2/2007"| df$Date=="2/2/2007")
#for convenience, assign global active power to a variable called gap
gap<-df$Global_active_power
#turn on the graphics device
png("./plot1.png")
#do the plot
hist(gap, breaks=seq(floor(min(gap)), ceiling(max(gap)), by=.5), col="red", main="Global Active Power", xlab="Global Active Power (kilowattts)")
#turn of the graphics device
dev.off()
|
26357137a3044d1b596438ccc3a05af195fff500 | d3a83275ed27bf445f2d6ddf967f3c3013dac89e | /NCAA/project/src/features/FeatureManipulation.R | 16187949a3ec01f36fbc8239fd63ade604511cad | [
"MIT"
] | permissive | paridhi1603/NCAA-Prediction | e67cd657f3bada155f3e0afa6b2cb4e0664247f4 | 8236c74831641df7e311d6c680b7f15c28970478 | refs/heads/main | 2023-06-19T04:11:39.745347 | 2021-07-15T20:43:38 | 2021-07-15T20:43:38 | 379,400,771 | 0 | 0 | null | 2021-06-25T18:42:13 | 2021-06-22T21:06:30 | R | UTF-8 | R | false | false | 6,553 | r | FeatureManipulation.R | rm(list = ls())
library(data.table)
library(caret)
library(reshape2)
library(tidyr)
test <- fread('./project/volume/data/raw/MSampleSubmissionStage2.csv')
season <- fread('./project/volume/data/raw/MRegularSeasonDetailedResults.csv')
tourney <- fread('./project/volume/data/raw/MNCAATourneyDetailedResults.csv')
ranks <- fread('./project/volume/data/raw/MMasseyOrdinals.csv')
All_Games_Table <- rbind(season,tourney)
W_stats <- All_Games_Table[,.(Season, DayNum, WTeamID, WScore, WFGM, WFGA, WFGM3, WFGA3, WFTM, WFTA ,WOR ,WDR , WAst, WTO, WStl, WBlk ,WPF)]
L_stats <- All_Games_Table[,.(Season, DayNum, LTeamID, LScore, LFGM, LFGA, LFGM3, LFGA3, LFTM, LFTA ,LOR ,LDR , LAst, LTO, LStl, LBlk ,LPF)]
colnames(W_stats) <- c("Season", "DayNum", "TeamID", "Score", "FGM", "FGA", "FGM3", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF")
colnames(L_stats) <- c("Season", "DayNum", "TeamID", "Score", "FGM", "FGA", "FGM3", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF")
master_stats <- rbind(W_stats,L_stats)
stats_by_day <- NULL
for (i in 1:max(master_stats$DayNum))
{
sub_master_stats <- master_stats[DayNum < i]
team_stats_by_day <- data.table::dcast(setDT(sub_master_stats), TeamID+Season~., mean , value.var = c("FGM", "Score", "FGA", "FTM", "FTA", "FGA3", "FTM", "FTA" ,"OR" ,"DR" , "Ast", "TO", "Stl", "Blk" ,"PF"))
team_stats_by_day$DayNum <- i
stats_by_day <- rbind(stats_by_day, team_stats_by_day)
}
stats_by_day$TeamID <- as.character(stats_by_day$TeamID)
#- Clean test
#test <- data.table(matrix(unlist(strsplit(test$id,"_")),ncol=2,byrow=T))
#setnames(test,c("V1","V2"),c("team_1","team_2"))
test<- data.table(matrix(unlist(strsplit(test$ID,"_")),ncol=3,byrow=T))
setnames(test,c("V1","V2","V3"),c("Season","team_1","team_2"))
#test$DayNum<-max(d2$Season==2021,d2$DayNum)+1
test$DayNum<-max(tourney$Season == 2021,tourney$DayNum)
#test$DayNum <- max(season[Season == 2021,DayNum]) + 1
test$Result <- 0.5
#- initializing train
train <- rbind(season,tourney)
train <- train[,.(WTeamID,LTeamID,Season,DayNum)]
setnames(train,c("WTeamID","LTeamID"),c("team_1","team_2"))
train$Result <- 1
#- make master data file
master <- rbind(train,test)
#- ensure my team ids are characters
master$team_1 <- as.character(master$team_1)
master$team_2 <- as.character(master$team_2)
master$Season <- as.integer(master$Season)
temp <- merge(master, stats_by_day, by.x = c("team_1", "Season", "DayNum"), by.y = c("TeamID", "Season", "DayNum"), all.x = T)
master <- merge(temp, stats_by_day, by.x = c("team_2", "Season", "DayNum"), by.y = c("TeamID", "Season", "DayNum"), all.x = T)
master$FGMdif<- master$FGM.x-master$FGM.y
master$Scoredif<- master$Score.x-master$Score.y
master$FGAdif<- master$FGA.x-master$FGA.y
master$FTMdif<- master$FTM.x-master$FTM.y
master$FTAdif<- master$FTA.x-master$FTA.y
master$ORdif<- master$OR.x-master$OR.y
master$FGA3dif<- master$FGA3.x-master$FGA3.y
master$DRdif<- master$DR.x-master$DR.y
master$Astdif<- master$Ast.x-master$Ast.y
master$TOdif<- master$TO.x-master$TO.y
master$Stldif<- master$Stl.x-master$Stl.y
master$Blkdif<- master$Blk.x-master$Blk.y
master$PFdif<- master$PF.x-master$PF.y
master<-na.omit(master)
#- teams' rank often change the day of a game so don't want to use the 'future'
# values. we ofset them by one.
ranks$Season<- as.integer(ranks$Season)
ranks$DayNum <- ranks$RankingDayNum+1
#- you should optimize the following by creating a list of the systems and
#- creating a loop to add them into the table
#- following is the list of the five systems you should use. Your mission, should
# you decide to accept it is to turn the big chunk of code starting at line #48ish
# into a for loop to incorporate the five rankings
which_system <- c("POM","SAG","MOR","DOK")
master$Season<-as.integer(master$Season)
#- start here
#- subset the ranks table
for (i in (which_system))
{
#subset_ranks<- ranks[SystemName == which_system][,.(Season,DayNum,TeamID,OrdinalRank)]
one_rank <- ranks[SystemName == i][,.(Season,DayNum,TeamID,OrdinalRank)]
#- prep and join into the first team
setnames(one_rank,"TeamID","team_1")
one_rank$team_1 <- as.character(one_rank$team_1)
setkey(master,Season,team_1,DayNum)
setkey(one_rank,Season,team_1,DayNum)
#- join here
master <- one_rank[master,roll=T]
setnames(master,"OrdinalRank","team_1_rank")
#- prep and merge into the second team
setnames(one_rank,"team_1","team_2")
setkey(master,Season,team_2,DayNum)
setkey(one_rank,Season,team_2,DayNum)
master <- one_rank[master,roll=T]
setnames(master,"OrdinalRank","team_2_rank")
#subtract the rankings for a new variable
master$rank_dif <- master$team_2_rank-master$team_1_rank
master$team_1_rank <- NULL
master$team_2_rank <- NULL
setnames(master,"rank_dif",paste0(i,"_dif"))
# end here
}
####INITIAL TAABLES CODE STARTS HERE
#- clean up the data
master <- master[order(Season,DayNum)]
#- get rid of id variables and nas ( you should keep the ids, Season and Day)
master <- master[,.(team_1,team_2,POM_dif, SAG_dif, MOR_dif, DOK_dif, Scoredif, FTMdif, FTAdif, PFdif, Blkdif, Stldif, TOdif, Astdif, DRdif, FGA3dif, ORdif, Result)]
master <- master[!is.na(master$POM_dif)]
#master <- master[!is.na(master$PIG_dif)]
master <- master[!is.na(master$SAG_dif)]
master <- master[!is.na(master$MOR_dif)]
master <- master[!is.na(master$DOK_dif)]
# #add all
#split back into train and test
test <- master[Result == 0.5]
train <- master[Result == 1]
#- divide so I have losses
rand_inx <- sample(1:nrow(train),nrow(train)*0.5)
train_a <- train[rand_inx,]
train_b <- train[!rand_inx,]
#- train_b will encode the loses
train_b$Result <- 0
train_b$POM_dif <- train_b$POM_dif*-1
#train_b$PIG_dif <- train_b$PIG_dif*-1
train_b$SAG_dif <- train_b$SAG_dif*-1
train_b$MOR_dif <- train_b$MOR_dif*-1
train_b$DOK_dif <- train_b$DOK_dif*-1
#train_b$FGMdif<- NULL
train_b$Scoredif<- train_b$Scoredif*-1
#train_b$FGAdif<- NULL
train_b$FTMdif<- train_b$FTMdif*-1
train_b$FTAdif<- train_b$FTAdif*-1
train_b$ORdif<- train_b$ORdif*-1
train_b$FGA3dif<- train_b$FGA3dif*-1
train_b$DRdif<- train_b$DRdif*-1
train_b$Astdif<- train_b$Astdif*-1
train_b$TOdif<- train_b$TOdif*-1
train_b$Stldif<- train_b$Stldif*-1
train_b$Blkdif<- train_b$Blkdif*-1
train_b$PFdif<- train_b$PFdif*-1
setnames(train_b,c("team_1","team_2"),c("team_2","team_1"))
train <- rbind(train_a,train_b)
fwrite(test,'./project/volume/data/interim/test.csv')
fwrite(train,'./project/volume/data/interim/train.csv')
|
1976180af646ccad917ce81201e753be6cbaecfe | a5d4010a57f0703d915652d6ff08191b654e1d3a | /cachematrix.R.bak | 1d6faef628e4e53e31903d6fe27089b4c5d6ebfa | [] | no_license | githubds/ProgrammingAssignment2 | be92bd8267b6f2b8221aefd40421e11020faef74 | a7ea2551aad88c2ab9334b4ec99c045a0f0ab515 | refs/heads/master | 2021-04-09T13:50:15.071525 | 2015-03-19T07:56:25 | 2015-03-19T07:56:25 | 32,507,008 | 0 | 0 | null | 2015-03-19T07:28:21 | 2015-03-19T07:28:20 | null | UTF-8 | R | false | false | 1,208 | bak | cachematrix.R.bak | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##makeCacheMatrix
## input param--1. a square matrix; returns a list of functions
## this function creates an enviornment and initalizes x_inv variable
## enclosing functions
## set --assigns to variables in parent env
## get --returns the input parameter of the parent function
## setInv --sets value to parent env variable x_inv
## getInv --gets the parent env variable x_inv
makeCacheMatrix <- function(x = matrix()) {
x_inv<-NULL
set<- function(y){
x<<-y
x_inv<<-NULL
}
get<-function() x
setInv<- function(inv) x_inv<<-inv
getInv<- function() x_inv
list( set=set
,get=get
,setInv=setInv
,getInv=getInv
)
}
## cacheSolve
## inputparam --function; returns the inverse of square matrix
## first it checks if the inverse is available in cache; if not then
## it calculates inverse and stores that in the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInv();
if (!is.null(m)){
print("getting cached data...")
return (m)
}
z<-x$get()
m<-solve(z)
x$setInv(m)
m
} |
3702b57e3e0a1c43ab3993059ae842dc45329206 | 0969a8a76b2361bd1301b61503392debe2e454e3 | /Rproject/archive/R/GR_clustering_1.R | 7fe19dbb3ad33ce8a3e0c8940ff7fc0eacc75a5c | [] | no_license | daliagachc/GR_chc | 9a85b133919863769e9d655c2e7c63fa3676177a | c40d7a28e18f1cea4c940af44151e48c8926bf55 | refs/heads/master | 2020-06-01T04:00:06.653784 | 2019-06-13T05:33:04 | 2019-06-13T05:33:04 | 190,622,843 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,015 | r | GR_clustering_1.R | #GR_clustering
minR=0.8
wd=event_GR_wide %>%
mutate(R_00_03=NULL,GR_00_03=NULL,GR_total=NULL,R_total=NULL)
wd=wd %>%
mutate(GR_03_07=ifelse(R_03_07>=minR,GR_03_07,NA),
GR_07_20=ifelse(R_07_20>=minR,GR_07_20,NA),
GR_20_80=ifelse(R_20_80>=minR,GR_20_80,NA),
special=NULL)
wd=na.omit(wd)
mask=npfevent_size_frame %>%
select(ion,eventID) %>%
filter(ion=="negative")
mask=unique(mask)
wd=merge(wd,mask,by="eventID")
wd=subset(wd,ion="negative") %>%
select(eventID,GR_03_07, GR_07_20,GR_20_80)
#creating the normalization factor for every day
wd_normfac=wd %>%
dplyr::group_by(eventID) %>%
dplyr::summarise(normfac=sqrt(GR_03_07^2+GR_07_20^2+GR_20_80^2))
#combining the two and creating a normalized column
wd_merged=merge(wd,wd_normfac, by="eventID")
wd_merged$GR_03_07=wd_merged$GR_03_07/wd_merged$normfac
wd_merged$GR_07_20=wd_merged$GR_07_20/wd_merged$normfac
wd_merged$GR_20_80=wd_merged$GR_20_80/wd_merged$normfac
toCluster=wd_merged[,1:4]
#plotting the "cluster cuantity selection" plot
wssplot(toCluster[-1])
set.seed(1234)
nc <- NbClust(toCluster[-1], min.nc=2, max.nc=7, method="kmeans")
table(nc$Best.n[1,])
#doing the cluster fit
set.seed(1234)
fit.km <- kmeans(toCluster[-1], 2, nstart=25) #3
fit.km$size
fit.km$centers
#which days are assigned to which cluster
df_clustered=data.frame(eventID=wd$eventID, clus=fit.km$cluster)
#plotting cluster centers
test=aggregate(na.omit(df_wide[-1]), by=list(cluster=fit.km$cluster), mean)
test_long=gather(data=test, key=hour,value=value, as.character(0:23))
test_long$hour=as.numeric(test_long$hour)
test_long$cluster=as.factor(test_long$cluster)
#
#creating the normalization factor for every day
wd_normfac=wd %>%
dplyr::group_by(eventID) %>%
dplyr::summarise(normfac=sqrt(GR_3_7^2+GR_7_20^2))
#combining the two and creating a normalized column
wd_merged=merge(wd,wd_normfac, by="eventID")
workData_merged$TGM_norm=workData_merged$TGM/workData_merged$normfac
|
f9512951782f6e50339a93406e8ee6cde4f46059 | d810f1775139ce2fc0392d860fdbb85637aec357 | /data-raw/loadExampleData.R | b0da87b1301c67eb5ea2b70508f84fd5007725ab | [] | no_license | SusannaPagni/knitauthors | bc509a0ac406d8909d3d42c59967cf79d60574a0 | 15f87cf98f561cc097d47f463905b2ca5b4ab35b | refs/heads/master | 2023-05-31T14:36:02.528889 | 2016-06-16T15:18:32 | 2016-06-16T15:18:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 108 | r | loadExampleData.R | exampleInput <- read.delim("data-raw/authors.tab", sep="\t",)
devtools::use_data(exampleInput, overwrite=T)
|
0c1851a84c145e68e3d2f09665c4314eb46d8f39 | 40cf390f349297ad4b12cdcef20577b0ff2d0340 | /R/system.R | f83843b724a307b231c79e7095034591ac58d7e2 | [] | no_license | omegahat/RTimingTools | 2fa1742fb5ddf3f6f4d1555a2ebe50dec4be16b0 | cfcdf8dae2b2303a93cc0cbdb3618145ee682fad | refs/heads/master | 2016-09-06T00:45:11.588514 | 2012-02-22T01:54:38 | 2012-02-22T01:54:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 473 | r | system.R | # Alternatives:
# C code http://gwenole.beauchesne.info/projects/cpuinfo/
cpuInfo.Linux =
function()
{
txt = system("cat /proc/cpuinfo", intern = TRUE)
m = read.dcf(textConnection(txt))
colnames(m) = gsub("\\s*$", "", colnames(m))
structure(
list(mhz = m[,"cpu MHz"],
cache = m[, 'cache size'],
cores = m[, 'cpu cores'],
model = m[, 'model name'],
vendor = m[, 'vendor_id']),
class = c("LinuxCPUInfo", "CPUInfo"))
}
|
8e13aa69fc62ceb0c0ea10db2b77c5644f4294e6 | 88e00382da404a7929f346ad82818d5986d3c667 | /run_analysis.R | e7d23feba37e10dbac4574f88b39ab8e0bdb92ba | [] | no_license | jjc0965/getting_and_cleaning_data | 0d4b49203213fdba3f87b46f4a3551b1f5ba87ec | 48c5abe5b930cca5b04d181462f700020919bc01 | refs/heads/master | 2021-01-10T22:13:45.460664 | 2015-03-20T17:18:53 | 2015-03-20T17:18:53 | 32,487,780 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,083 | r | run_analysis.R | ## 1) READS DATA INTO R
activity_labels<-read.table("UCI HAR Dataset/activity_labels.txt") ## Imports the different types of activities into R
features<-read.table("UCI HAR Dataset/features.txt") ## Reads the features data (i.e., "tBodyAcc-mean()-X") into R
train_set<-read.table("UCI HAR Dataset/train/X_train.txt") ## Reads X-train data (training set) into R
train_labels<-read.table("UCI HAR Dataset/train/y_train.txt") ## Reads training labels (1-6) into R
train_subject<-read.table("UCI HAR Dataset/train/subject_train.txt") ## Reads subject who performed the training activity (1-30) into R
test_set<-read.table("UCI HAR Dataset/test/X_test.txt") ## Reads X-test data (test set) into R
test_labels<-read.table("UCI HAR Dataset/test/y_test.txt") ## Reads test labels (1-6) into R
test_subject<-read.table("UCI HAR Dataset/test/subject_test.txt") ## Reads subject who performed the test activity (1-30) into R
## 2) COMBINES TRAIN AND TEST DATA
combined_set<-rbind(train_set, test_set) ## Combines train and test sets
combined_labels<-rbind(train_labels, test_labels) ## Combines train and test labels
combined_subject<-rbind(train_subject, test_subject) ## Combines train and test subjects
## 3) EXTRACTS ONLY FEATURES WHICH ANALYZE MEANS OR STANDARD DEVIATIONS
features_mean_only<-grep("-mean()", as.character(features$V2), fixed=TRUE) ## A vector of integers - selects only the features data containing "mean".
## Setting fixed=TRUE does not allow () to be interpreted as a metacharacter. I am eliminating all fields with "meanFreq", by doing this, as these are not truly means
features_std_only<-grep("-std()", as.character(features$V2), fixed=TRUE) ## A vector of integers - selects only the features data containing "std" - same rationale as above
features_mean_or_std<-sort(c(features_mean_only, features_std_only)) ## A vector of integers - selects only the features data containing either "mean" or "std"
## 4) CREATES FINAL WORKING DATA SET - NAMED "set"
set<-cbind(combined_subject, combined_labels, combined_set[features_mean_or_std]) ## the set we will be working with with columns combined and only means and stds included - short name for this reason
## 5) CREATION OF TIDY DATA SET
tidydata<-aggregate(set[3:68], by=set[1:2], mean) ## Gets aggregate of data by test subject ID and activity
colnames(tidydata)<-c("Subject.ID", "Activity", as.character(features[features_mean_or_std,2])) ## Labels all columns
tidydata<-tidydata[with(tidydata, order(Subject.ID, Activity)), ] ## Puts the data in order by "Subject.ID and "Activity"
rownames(tidydata)<-NULL
tidydata[,"Activity"]<-activity_labels[tidydata[,"Activity"],2] ## Puts text of activity in column 2 (labeled "Activity") of the output
## 6) WRITES TABLE TO "tidydata.txt"
write.table(tidydata, file="tidydata.txt", sep="\t") ## Writes the data to a file named "tidydata.txt", not incorporating row.names
## 7) READS DATA FROM "tidydata.txt" BACK TO "data_final"
data_final<-read.table("tidydata.txt") ##Reads the "tidydata.txt" file back into R
View(data_final) |
79c8be389c05b3de40e7532082060e836f541bda | 8572f31d31d0ea012b1b5625e6efb9b944df113b | /2020/week13/R/analysis.R | 4fc7cac3cb9714e06f4c2f9fb845c51306e883f7 | [] | no_license | larsbrenta/tidyt_jkaupp | 0bf5572c230ad4d2d8bc40fd83d0803c8e541681 | 7ce8ae0df2f58b62794a45c90405b5aa561fcc93 | refs/heads/master | 2023-06-12T17:24:52.896620 | 2021-07-06T16:23:03 | 2021-07-06T16:23:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,209 | r | analysis.R | library(tidyverse)
library(here)
library(jkmisc)
library(janitor)
library(waffle)
library(glue)
library(scales)
tbi_age <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-03-24/tbi_age.csv')
injury_mechanisms <- set_names(unique(tbi_age$injury_mechanism), c("Motor Vehicle", "Struck By/Against", "Struck By/Against", "Other/Unknown", "Self-Harm", "Assault", "Other/Unknown"))
age_groups <- c("0-4", "5-14", "15-24", "25-34", "35-44", "45-54", "55-64",
"65-74", "75+")
injury_labels <- c("Motor Vehicle", "Self-Harm", "Assault", "Other/Unknown", "Struck By/Against")
tbi_age_pruned <- tbi_age %>%
filter(age_group != "0-17", age_group != "Total") %>%
mutate(injury_mechanism = fct_recode(injury_mechanism, !!!injury_mechanisms),
age_group = factor(age_group, age_groups)) %>%
count(age_group, injury_mechanism, wt = number_est) %>%
mutate(per_capita = n/1000)
pal <- c(RColorBrewer::brewer.pal(6, "Greys")[c(-1, -6)],"#048ba8") %>%
set_names(injury_labels)
titles <- imap(pal, ~highlight_text(.y, .x, 'b'))
plot <- ggplot(tbi_age_pruned) +
geom_waffle(aes(fill = fct_rev(injury_mechanism), values = per_capita), n_rows = 10, flip = TRUE, size = 0.1, color = "white", show.legend = FALSE) +
facet_wrap(~age_group, nrow = 1, strip.position = "bottom") +
scale_x_discrete() +
scale_y_continuous(labels = function(x) glue("{x * 10}K"),
expand = c(0,0),
limits = c(0, 50)) +
coord_equal() +
scale_fill_manual(values = pal) +
labs(title = "Traumatic Brain Injuries Peak in Late Teens/Early Adults and again in Seniors.",
subtitle = glue("{paste0(flatten_chr(titles[-4:-5]), collapse = ', ')} and {titles[4]} all pose risks for brain injury, but accidents classified as<br>{titles$`Struck By/Against`} are the greatest risk across all ages."),
caption = "**Data**: CDC | **Graphic**: @jakekaupp") +
theme_jk(grid = FALSE,
ticks = TRUE,
markdown = TRUE) +
theme(strip.text.x = element_text(hjust = 0.5, size = 9))
ggsave(here("2020", "week13", "tw13_plot.png"), plot, width = 10, height = 6, dev = ragg::agg_png())
|
cb0ae5875366a8dfc90dde5631b50510262135c9 | 79eb7c6958b247770731ee20a5d9be525d8f5ed0 | /exercises/concept/lasagna/lasagna.R | cafa7c4b4a395a03ff0320574eda074fcbc09a21 | [
"CC-BY-SA-4.0",
"CC-BY-3.0",
"CC-BY-4.0",
"MIT"
] | permissive | exercism/r | 345781f314b8a66be047abd889238cba2630a20c | 566676cca76019e3e6a602f8f4d8086c54a51e1e | refs/heads/main | 2023-08-03T09:30:59.027153 | 2023-07-28T00:18:31 | 2023-07-28T00:18:31 | 24,401,761 | 22 | 37 | MIT | 2023-09-05T11:19:45 | 2014-09-24T05:22:10 | R | UTF-8 | R | false | false | 227 | r | lasagna.R | # TODO: define the 'expected_minutes_in_oven()' function
# TODO: define the 'remaining_time_in_minutes()' function
# TODO: define the 'prep_time_in_minutes()' function
# TODO: define the 'elapsed_time_in_minutes()' function
|
8382313196b1b54d0f5076f9b1f9a03e0e24119f | f3244f77367f735f30833a5b6e2b69321a28dfb5 | /man/chronique.figure.preferendums.Rd | 873d67e00d71c48d8d38b934859fbdffdf555950 | [] | no_license | jbfagotfede39/aquatools | 3f36367668c6848ddd53950708222fd79f0e3b7a | 9c12f80919790ec3d0c1ee7f495e9c15cc2c9652 | refs/heads/master | 2023-08-03T09:10:17.314655 | 2023-07-25T08:32:45 | 2023-07-25T08:32:45 | 46,334,983 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 2,055 | rd | chronique.figure.preferendums.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chronique.figure.preferendums.R
\name{chronique.figure.preferendums}
\alias{chronique.figure.preferendums}
\title{Vues des préférendums thermiques des especes piscicoles}
\usage{
chronique.figure.preferendums(
staderecherche = c("Adulte", "Reproduction", "Embryon", "Larve", "Juvénile",
"Tous stades"),
tmm30j = 0,
liste_especes = c("Toutes espèces", "TRF", "CHA", "LPP", "VAI", "LOF", "OBR", "EPI",
"BLE", "BLN", "CHE", "APR", "GOU", "HOT", "TOX", "BAF", "LOT", "SPI", "VAN", "EPT",
"BOU", "BRO", "PER", "GAR", "ABL", "CAS", "GRE", "CCO", "SAN", "TAN", "BRB", "BRE",
"PES", "ROT", "BBG", "PCH", "SIL"),
titre = "",
save = F,
projet = NA_character_,
format = ".png"
)
}
\arguments{
\item{staderecherche}{Stade de vie recherché : \code{Adulte} (par défaut), \code{Reproduction}, \code{Embryon}, \code{Larve}, \code{Juvénile} ou \code{Tous stades}}
\item{tmm30j}{Valeur de Tmm30j à représenter : 0 par défaut (aucun affichage)}
\item{liste_especes}{Liste des espèces à afficher : toutes par défaut. Les espèces sont affichées dans l'ordre de saisie.}
\item{titre}{Titre du graphique (vide par défaut)}
\item{save}{Si \code{FALSE} (par défault), n'enregistre pas les figures. Si \code{TRUE}, les enregistre.}
\item{projet}{Nom du projet}
\item{format}{Défini le format d'enregistrement (par défaut .png)}
}
\description{
Cette fonction permet de représenter les préférendums thermiques des espèces piscicoles, avec éventuellement la Tmm30j observée
}
\examples{
chronique.figure.preferendums()
chronique.figure.preferendums("Reproduction")
chronique.figure.preferendums("Tous stades", tmm30j = 17.4)
chronique.figure.preferendums("Toutes espèces", tmm30j = 17.4)
chronique.figure.preferendums("Adulte", tmm30j = 17.4, "TRF,CHA,LOG,VAL")
chronique.figure.preferendums("Adulte", tmm30j = 17.4, liste_especes)
chronique.figure.preferendums("Adulte", tmm30j = 17.4, .$chsta_sprep)
}
\keyword{chronique}
\keyword{poissons}
|
549457eb0d4ad9e300e7b89ad061bb1521a05f90 | 5f684a2c4d0360faf50fe055c1147af80527c6cb | /2020/2020-week38/kids.R | a114660755e392caa7419a3cc5d76e477001fb65 | [
"MIT"
] | permissive | gkaramanis/tidytuesday | 5e553f895e0a038e4ab4d484ee4ea0505eebd6d5 | dbdada3c6cf022243f2c3058363e0ef3394bd618 | refs/heads/master | 2023-08-03T12:16:30.875503 | 2023-08-02T18:18:21 | 2023-08-02T18:18:21 | 174,157,655 | 630 | 117 | MIT | 2020-12-27T21:41:00 | 2019-03-06T14:11:15 | R | UTF-8 | R | false | false | 2,676 | r | kids.R | library(tidyverse)
library(geofacet)
library(colorspace)
kids <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-09-15/kids.csv')
states_abbr <- read_csv(here::here("2020-week38", "data", "us-states-abbr-AP.csv")) %>%
select(state = title_case, ap_style)
libraries <- kids %>%
filter(variable == "lib") %>%
filter(year == 1997 | year == 2016) %>%
mutate(inf_adj_perchild = inf_adj_perchild * 1000) %>%
pivot_wider(id_cols = state, names_from = year, names_prefix = "year_", values_from = inf_adj_perchild) %>%
mutate(diff = year_2016 - year_1997) %>%
left_join(states_abbr)
f1 <- "Proxima Nova"
f1b <- "Proxima Nova Bold"
f1m <- "Proxima Nova Medium"
f2c <- "IBM Plex Sans Condensed"
ggplot(libraries) +
# Point and arrow
geom_point(aes(x = 0, y = year_1997, color = ifelse(diff > 0, "grey97", "grey20")), size = 1.5) +
geom_segment(aes(x = 0, xend = 1, y = year_1997, yend = year_2016, color = ifelse(diff > 0, "grey97", "grey20")), arrow = arrow(length = unit(0.2, "cm")), size = 0.75) +
# State label
geom_text(aes(x = -0.3, y = 350, label = ap_style), stat = "unique", hjust = 0, color = "grey95", family = f1b, size = 3.5) +
# 1997 value label
geom_text(aes(x = 0, y = year_1997 - 90, label = round(year_1997)), hjust = 0.5, family = f2c, size = 3, color = darken("#7E95A9", 0.3)) +
# 2016 value label
geom_text(aes(x = 1.1, y = year_2016 - 90, label = round(year_2016)), hjust = 0.5, family = f2c, size = 3, color = darken("#7E95A9", 0.4)) +
# Scales, facet, labs
# scale_color_gradient2(low = "red", mid = "grey75", high = "grey97") +
scale_color_identity() +
facet_geo(vars(state)) +
labs(
title = "Change in public spending on libraries from 1997 to 2016",
subtitle = "Dollars spent per child, adjusted for inflation",
caption = "Source: Urban Institute | Graphic: Georgios Karamanis"
) +
coord_cartesian(clip = "off", expand = FALSE) +
# Theme
theme_void() +
theme(
legend.position = "none",
plot.background = element_rect(fill = "#7E95A9", color = NA),
plot.margin = margin(20, 30, 15, 30),
panel.spacing.x = unit(1.5, "lines"),
panel.spacing.y = unit(1, "lines"),
strip.text = element_blank(),
plot.title = element_text(size = 16, family = f1b, hjust = 0.5, margin = margin(10, 0, 0, 0)),
plot.subtitle = element_text(size = 14, family = f1, hjust = 0.5, margin = margin(5, 0, 25, 0)),
plot.caption = element_text(family = f1, hjust = 1, margin = margin(20, 0, 0, 0))
)
ggsave(here::here("temp", paste0("kids-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")), dpi = 320, width = 9, height = 8)
|
c1f6c7a51e6ded825bb4aba0c952a80bd936e58b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/paleotree/examples/unitLengthTree.Rd.R | 6f3e08da1ff6edfa38e9db59fea375d971d913e8 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 226 | r | unitLengthTree.Rd.R | library(paleotree)
### Name: unitLengthTree
### Title: Scale Tree to Unit-Length
### Aliases: unitLengthTree
### ** Examples
set.seed(444)
tree <- rtree(10)
layout(1:2)
plot(tree)
plot(unitLengthTree(tree))
layout(1)
|
077dfe6aa8ffb31a98739987875d0a271ec147d0 | 5be5469a64c3c6a26f7bbda0d6b2d996755396f3 | /plot4.R | 2ef901a5ac350959b839530371919be93a6fb37d | [] | no_license | Slak60/ExData_Plotting1 | c29efd984b0f2c6c0809dbe72e26d5666ff8a986 | 5bde35c4d3a6152fac2cfbdc5233b0d6be2dbe16 | refs/heads/master | 2022-10-26T15:39:45.398905 | 2020-06-16T17:46:18 | 2020-06-16T17:46:18 | 272,774,053 | 0 | 0 | null | 2020-06-16T17:43:19 | 2020-06-16T17:43:18 | null | UTF-8 | R | false | false | 1,442 | r | plot4.R | temp <- tempfile() ## create a temp file to download the .zip file
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", temp)##download zip file
power_consumption <- unzip(temp)## unzip file
## read the .txt file
mydata<- read.table("./household_power_consumption.txt", header=TRUE, sep=";",na.strings="?",stringsAsFactors = FALSE)
library(lubridate)
mydf <- transform(subset(mydata, Date=="1/2/2007"| Date =="2/2/2007"),
Date=format(as.Date(Date, format="%d/%m/%Y"),"%Y-%m-%d"),
Time=format(strptime(Time, format="%H:%M:%S"),"%H:%M:%S"))
mydf$DateTime <- as.POSIXct(paste(mydf$Date,mydf$Time))
png(file="Plot4.png",width=480,height=480)
my.par <- par(mfrow=c(2,2))
plot(mydf$DateTime,mydf$Global_active_power,type="l",xlab="",ylab="Global Active Power(kilowatts)")
plot(mydf$DateTime,mydf$Voltage,type="l",xlab="datetime",ylab="Voltage")
plot(mydf$DateTime,mydf$Sub_metering_1,type="l",col="black",xlab="",ylab="Energy sub metering")
lines(mydf$DateTime,mydf$Sub_metering_2, col="red")
lines(mydf$DateTime,mydf$Sub_metering_3, col="blue")
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col=c("black","red","blue"),
pch=c("-","-","-"), lty=c(1,1,1),lwd=c(2,2,2), ncol=1)
plot(mydf$DateTime,mydf$Global_reactive_power,type="l",xlab="datetime",ylab="Global_reactive_power")
par(my.par)
dev.off() |
da70d1e7b8d0ec2a97e6021621b0b0341da77bc3 | 7224813a0f5d032aed634a637328e87030dd070d | /R/prepare_models.R | 6ab767f8d08309f6a8de7971b7d64cd6d54d3705 | [
"MIT"
] | permissive | ethanwhite/portalcasting | 3d164472a54e5ae746a668cc848e813e4b795eaf | 03bd42b5a01536a9178e9aeb47046847acda9984 | refs/heads/master | 2023-04-30T20:59:54.178447 | 2022-06-01T00:13:25 | 2022-06-01T00:13:25 | 157,772,725 | 0 | 0 | MIT | 2018-11-15T21:06:31 | 2018-11-15T21:06:31 | null | UTF-8 | R | false | false | 7,968 | r | prepare_models.R | #' @title Read and Write Model Control Lists
#'
#' @description Input/output functions for model control lists.
#'
#' @param quiet \code{logical} indicator controlling if messages are printed.
#'
#' @param main \code{character} value of the name of the main component of the directory tree.
#'
#' @param models \code{character} vector of name(s) of model(s) to include.
#'
#' @param settings \code{list} of controls for the directory, with defaults set in \code{\link{directory_settings}}.
#'
#' @param new_model_controls \code{list} of controls for any new models (not in the prefab models) listed in \code{models} that are to be added to the control list and file.
#'
#' @return \code{list} of \code{models}' control \code{list}s, \code{\link[base]{invisible}}-ly for \code{write_model_controls}.
#'
#' @name read and write model controls
#'
#' @export
#'
read_model_controls <- function (main = ".",
settings = directory_settings()) {
read_yaml(file.path(main, settings$subs$models, settings$files$model_controls))
}
#' @rdname read-and-write-model-controls
#'
#' @export
#'
model_controls <- function (main = ".",
models = prefab_models(),
settings = directory_settings()) {
nmodels <- length(models)
if (nmodels == 1) {
read_model_controls(main = main,
settings = settings)[[models]]
} else if (nmodels > 1) {
read_model_controls(main = main,
settings = settings)[models]
}
}
#' @rdname read-and-write-model-controls
#'
#' @export
#'
write_model_controls <- function (main = ".",
new_model_controls = NULL,
models = prefab_models(),
settings = directory_settings(),
quiet = FALSE) {
model_controls <- prefab_model_controls()
nmodels <- length(model_controls)
nnew_models <- length(new_model_controls)
if (nnew_models > 0) {
for (i in 1:nnew_models) {
model_controls <- update_list(model_controls,
x = new_model_controls[[i]])
names(model_controls)[nmodels + i] <- names(new_model_controls)[i]
}
}
write_yaml(x = model_controls,
file = file.path(main, settings$subs$models, settings$files$model_controls))
invisible(model_controls)
}
#' @title Write Model Function Script into Directory
#'
#' @description Writes a model's function as a script into the defined directory for use in forecasting. \cr \cr \code{model} can be input as a \code{character} string, symbol (backquoted name), or \code{function}, as \code{\link{match.fun}}
#'
#' @param main \code{character} value of the name of the main component of the directory tree.
#'
#' @param model \code{character} name of a model function, the \code{function} itself, or its symbol (backquoted name).
#'
#' @param settings \code{list} of controls for the directory, with defaults set in \code{\link{directory_settings}} that should generally not need to be altered.
#'
#' @param quiet \code{logical} indicator if progress messages should be quieted.
#'
#' @param verbose \code{logical} indicator of whether or not to print out all of the information (and thus just the tidy messages).
#'
#' @param datasets \code{character} vector of dataset names for the model.
#'
#' @return \code{write_mode} \code{\link{write}}s the model script out and returns \code{NULL}, \code{\link[base]{invisible}}-ly.. \cr \cr
#' \code{model_template}: \code{character}-valued text for a model script to be housed in the model directory. \cr \cr
#' \code{control_list_arg}: \code{character}-valued text for part of a model script. \cr \cr
#'
#' @examples
#' \donttest{
#' create_dir()
#' write_model("AutoArima")
#' model_template()
#' control_list_arg(runjags_control(nchains = 3), "runjags_control")
#' }
#'
#' @export
#'
write_model <- function (main = ".",
model = NULL,
settings = directory_settings(),
quiet = FALSE,
verbose = FALSE) {
return_if_null(model)
control_model <- tryCatch(prefab_model_controls()[[model]],
error = function(x){NULL})
datasets <- control_model$datasets
if (is.null(datasets)) {
messageq(" ~datasets = NULL for ", model, quiet = quiet)
datasets <- prefab_datasets()
}
model_file <- paste0(model, ".R")
mod_path <- file.path(main, settings$subs$models, model_file)
mod_template <- model_template(main = main,
model = model,
datasets = prefab_datasets(),
settings = directory_settings(),
quiet = FALSE,
verbose = FALSE)
if (file.exists(mod_path) & settings$overwrite) {
write(mod_template, mod_path)
messageq(" -", ifelse(verbose, "Updating ", ""), model, quiet = quiet)
} else if (!file.exists(mod_path)) {
write(mod_template, mod_path)
messageq(" -", ifelse(verbose, "Adding ", ""), model, quiet = quiet)
}
invisible()
}
#' @rdname write_model
#'
#' @export
#'
model_template <- function (main = ".",
model = NULL,
datasets = NULL,
settings = directory_settings(),
quiet = FALSE,
verbose = FALSE) {
return_if_null(model)
control_model <- tryCatch(prefab_model_controls()[[model]],
error = function(x){NULL})
datasets <- control_model$datasets
nds <- length(datasets)
return_if_null(datasets)
main_arg <- paste0(', main = "', main, '"')
quiet_arg <- paste0(', quiet = ', quiet)
verbose_arg <- paste0(', verbose = ', verbose)
ds_args <- paste0('dataset = "', datasets, '"')
settings_arg <- paste0(', settings = directory_settings()')
additional_args <- NULL
nadditional_args <- length(control_model$args)
if (nadditional_args > 0) {
for (i in 1:nadditional_args) {
additional_args <- paste0(additional_args, ", ", names(control_model$args)[i], " = ", control_model$args[i])
}
}
out <- NULL
for(i in 1:nds){
resp <- paste0('cast_', datasets[i])
model_args <- paste0(ds_args[i], main_arg, settings_arg, quiet_arg, verbose_arg, additional_args)
model_fun <- paste0(model, '(', model_args, ');')
model_line <- paste0(resp, ' <- ', model_fun)
save_args <- paste0(resp, main_arg, settings_arg, quiet_arg)
save_fun <- paste0('save_cast_output(', save_args, ');')
save_line <- save_fun
newout <- c(model_line, save_line)
out <- c(out, newout)
}
out
}
#' @title Create a covariate model list
#'
#' @description Convenience function for creating covariate model \code{list}s.
#'
#' @param model \code{character} name for covariate models. Currently only \code{"pevGARCH"} is supported.
#'
#' @return \code{list} of covariate model structures.
#'
#' @examples
#' covariate_models()
#'
#' @export
#'
covariate_models <- function (model = "pevGARCH") {
out <- NULL
if (model == "pevGARCH") {
out <- list(c("maxtemp", "meantemp", "precipitation", "ndvi"),
c("maxtemp", "mintemp", "precipitation", "ndvi"),
c("mintemp", "maxtemp", "meantemp", "precipitation"),
c("precipitation", "ndvi"),
c("mintemp", "ndvi"),
c("mintemp"),
c("maxtemp"),
c("meantemp"),
c("precipitation"),
c("ndvi"),
c(NULL))
}
out
}
|
97f82e3e771548ea9f0b5473c01595758aa6a308 | 6d70e865c21e483f74dd6085fa0cb01f91b04729 | /man/cytoband.col.Rd | 26db63cc4c09130c04a06b9ca905c5122d1db63a | [
"MIT"
] | permissive | dgabbe/circlize | 8eb53ec62425033b46cb9ca652e7c5cc2dec4f61 | a01d0015e1ccd60f6ff2f7a8d568c32f873063f4 | refs/heads/master | 2022-07-17T08:37:49.032202 | 2020-11-23T18:07:17 | 2020-11-23T18:07:17 | 232,622,670 | 0 | 0 | NOASSERTION | 2020-01-08T17:42:35 | 2020-01-08T17:42:34 | null | UTF-8 | R | false | false | 473 | rd | cytoband.col.Rd | \name{cytoband.col}
\alias{cytoband.col}
\title{
Assign colors to cytogenetic band (hg19) according to the Giemsa stain results
}
\description{
Assign colors to cytogenetic band (hg19) according to the Giemsa stain results
}
\usage{
cytoband.col(x)
}
\arguments{
\item{x}{A vector containing the Giemsa stain results
}
}
\details{
The color theme is from \url{http://circos.ca/tutorials/course/slides/session-2.pdf,} page 42.
}
\examples{
# There is no example
NULL
}
|
4c9f7b32004dd9d96bccc94607c51a0aff00b5fd | 29ce175254b27a361714074b99fe5644ca2c3158 | /man/data_wings.Rd | 6da340dbafae16e4ca6602eae629919495ea9238 | [] | no_license | raz1/Momocs | 148903d3f05428e72d7cef8ede46c0bba7f80f04 | 09a817bb0720d87969d48dd9e0f16516e042e13e | refs/heads/master | 2021-01-17T18:09:55.724182 | 2014-08-15T14:49:17 | 2014-08-15T14:49:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 425 | rd | data_wings.Rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\docType{data}
\name{wings}
\alias{wings}
\title{Data: Landmarks coordinates of mosquito wings}
\format{A \link{Ldk} object containing 18 (x; y) landmarks from 127 mosquito wings, from}
\source{
Rohlf and Slice 1990 and \url{http://life.bio.sunysb.edu/morph/data/RohlfSlice1990Mosq.nts}
}
\description{
Data: Landmarks coordinates of mosquito wings
}
\keyword{Datasets}
|
6151107fd63e57ca653739bbdb8ad939a4c0c4df | c3e7dcf4ac8688519084710e5eb6ec7c557dbfdf | /examples/FH.frac.cal_example.R | ada132007c1fb71cea44b0b96cb35774ee2b16c1 | [] | no_license | lilywang1988/IAfrac | e24df17c15d314cc98257740435236138a154277 | 2c9e5329174937c0a418b93d2cc6f467f1dd20f1 | refs/heads/master | 2021-06-24T19:29:02.958609 | 2021-03-07T07:59:48 | 2021-03-07T07:59:48 | 202,036,400 | 1 | 1 | null | 2021-03-07T07:59:48 | 2019-08-13T01:16:18 | R | UTF-8 | R | false | false | 2,036 | r | FH.frac.cal_example.R | # Examples for FH.frac.cal
# install.packages("devtools")
# library(devtools)
# install_github("keaven/nphsim")
library(nphsim)
eps<-2 # delayed effect
p<-0.5 #treatment assignment
b<-30 # an intrinsic parameter to decide the number of intervals per time unit
tau<- 18 # end of the study
R<-14 # accrual period [0,R]
omega<- tau-R
lambda<-log(2)/6 # control group risk hazard
theta<-0.7 # hazard ratio
lambda.trt<- lambda*theta #hazard after the change point for the treatment arm
rho<- 0 # parameter for the weights
gamma<-1 #parameter for the weights
alpha<-0.025 #type 1 error
beta<-0.1 #type 2 error
# First we decide the sample size:
size_FH <- sample.size_FH(eps,p,b,tau,omega,lambda,lambda.trt,rho, gamma,alpha,beta)
n_FH <-size_FH$n
n_event_FH<-size_FH$n_event
accrual.rt<-n_FH/R # the needed arrual rate
#Generate data accordingly, use eta=1e-5 to inhibit censoring
data_temp <- nphsim(nsim=1,lambdaC=lambda, lambdaE = c(lambda,lambda.trt),
ssC=ceiling(n_FH*(1-p)),intervals = c(eps),ssE=ceiling(n_FH*p),
gamma=accrual.rt, R=R, eta=1e-5, fixEnrollTime = TRUE)$simd
# Example 1 for FH.frac.cal: Set trimmed=F and work on the crude dataset from nphsim()
inf_frac_vec1<-FH.frac.cal(data_temp,c(10,15,18),I_denom,rho,gamma,trimmed=F)
inf_frac_vec1
# Example 2 for FH.frac.cal: First trim the data before inputting into FH.frac.cal() setting trimmed=T, and obtain the whole spectrum.
I_denom<-I.1(rho, gamma,lambda,theta,eps,R,p,t.star=tau)*n_FH
tau.star=21 #in case the ratio=1 when t>tau
#Trim the data
data_temp2 <-data.trim(tau.star,data_temp)
t_seq <- seq(0.1,tau.star,0.1) # the time series to check the information fraction
inf_frac_vec2<-FH.frac.cal(data_temp2,t_seq,I_denom,rho,gamma,trimmed=T)
# WLRT at the interim
interim_index<- which.min(abs(inf_frac_vec2-0.6))
interim_time<-t_seq[interim_index]
interim_frac<-inf_frac_vec2[interim_index]
# WLRT at the final
final_index<- which.min(abs(inf_frac_vec2-1))
final_time<-t_seq[final_index]
final_frac<-inf_frac_vec2[final_index]
|
0ec3c71d4f411425302d2ff37c4fdbd74a554c09 | 9f63b0971eda98e020bb8ba3edeba5d3332cda16 | /scripts/create_inflow_files_sunapee/MergeFlowNutrients_Sunapee_Make1Inflow.R | d36b7810fb347d2f77c2e7d65102bdd2f794acb1 | [] | no_license | tadhg-moore/Sunapee-GLM | c2664404660712267587ee1a95d3a6ae615345af | 083ff8ef04cdba3d05aa5999ea51dad3ba6ccc4a | refs/heads/master | 2023-05-08T14:03:19.041890 | 2021-06-01T20:53:47 | 2021-06-01T20:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,143 | r | MergeFlowNutrients_Sunapee_Make1Inflow.R | rm(list=ls())
#options(scipen = 999)
nuts<-read.csv("./data/individual_inflows/TN_TP_Inflow_conc_fracNP_boot_2021-03-16.csv")
files<-list.files("./data/individual_inflows/", pattern = "*totalinflow_temp_2021-03-17.csv")
alldata <- do.call(rbind,lapply(paste0("./data/individual_inflows/", files), read.csv))
alldata$date<-as.character(alldata$date)
alldata$date<-as.POSIXct(alldata$date, format="%Y-%m-%d")
alldata$year<-as.POSIXct(alldata$date, format="%Y")
library(dplyr)
i505<-alldata[which(alldata$stream_id==505),]
i505<- subset(i505, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i505)[names(i505)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW505","TEMP505")
nut505<-subset(nuts,select=c("V1","i505_TP","i505_TN"))
names(nut505)[names(nut505)==c("V1", "i505_TP","i505_TN")] <- c("time","TP505","TN505")
nut505$time<-as.character(nut505$time)
nut505$time<-as.POSIXct(nut505$time, format="%Y-%m-%d")
nut505$time<-as.POSIXct(nut505$time, format="%Y")
i505 <- i505 %>% left_join(nut505)
i790<-alldata[which(alldata$stream_id==790),]
i790<- subset(i790, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i790)[names(i790)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW790","TEMP790")
nut790<-subset(nuts,select=c("V1","i790_TP","i790_TN"))
names(nut790)[names(nut790)==c("V1", "i790_TP","i790_TN")] <- c("time","TP790","TN790")
nut790$time<-as.character(nut790$time)
nut790$time<-as.POSIXct(nut790$time, format="%Y-%m-%d")
nut790$time<-as.POSIXct(nut790$time, format="%Y")
i790 <- i790 %>% left_join(nut790)
i830<-alldata[which(alldata$stream_id==830),]
i830<- subset(i830, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i830)[names(i830)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW830","TEMP830")
nut830<-subset(nuts,select=c("V1","i830_TP","i830_TN"))
names(nut830)[names(nut830)==c("V1", "i830_TP","i830_TN")] <- c("time","TP830","TN830")
nut830$time<-as.character(nut830$time)
nut830$time<-as.POSIXct(nut830$time, format="%Y-%m-%d")
nut830$time<-as.POSIXct(nut830$time, format="%Y")
i830 <- i830 %>% left_join(nut830)
i788<-alldata[which(alldata$stream_id==788),]
i788<- subset(i788, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i788)[names(i788)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW788","TEMP788")
nut788<-subset(nuts,select=c("V1","i788_TP","i788_TN"))
names(nut788)[names(nut788)==c("V1", "i788_TP","i788_TN")] <- c("time","TP788","TN788")
nut788$time<-as.character(nut788$time)
nut788$time<-as.POSIXct(nut788$time, format="%Y-%m-%d")
nut788$time<-as.POSIXct(nut788$time, format="%Y")
i788 <- i788 %>% left_join(nut788)
i510<-alldata[which(alldata$stream_id==510),]
i510<- subset(i510, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i510)[names(i510)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW510","TEMP510")
nut510<-subset(nuts,select=c("V1","i510_TP","i510_TN"))
names(nut510)[names(nut510)==c("V1", "i510_TP","i510_TN")] <- c("time","TP510","TN510")
nut510$time<-as.character(nut510$time)
nut510$time<-as.POSIXct(nut510$time, format="%Y-%m-%d")
nut510$time<-as.POSIXct(nut510$time, format="%Y")
i510 <- i510 %>% left_join(nut510)
i540<-alldata[which(alldata$stream_id==540),]
i540<- subset(i540, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i540)[names(i540)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW540","TEMP540")
nut540<-subset(nuts,select=c("V1","i540_TP","i540_TN"))
names(nut540)[names(nut540)==c("V1", "i540_TP","i540_TN")] <- c("time","TP540","TN540")
nut540$time<-as.character(nut540$time)
nut540$time<-as.POSIXct(nut540$time, format="%Y-%m-%d")
nut540$time<-as.POSIXct(nut540$time, format="%Y")
i540 <- i540 %>% left_join(nut540)
#
i800<-alldata[which(alldata$stream_id==800),]
i800<- subset(i800, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i800)[names(i800)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW800","TEMP800")
nut800<-subset(nuts,select=c("V1","i800_TP","i800_TN"))
names(nut800)[names(nut800)==c("V1", "i800_TP","i800_TN")] <- c("time","TP800","TN800")
nut800$time<-as.character(nut800$time)
nut800$time<-as.POSIXct(nut800$time, format="%Y-%m-%d")
nut800$time<-as.POSIXct(nut800$time, format="%Y")
i800 <- i800 %>% left_join(nut800)
i835<-alldata[which(alldata$stream_id==835),]
i835<- subset(i835, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i835)[names(i835)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW835","TEMP835")
nut835<-subset(nuts,select=c("V1","i835_TP","i835_TN"))
names(nut835)[names(nut835)==c("V1", "i835_TP","i835_TN")] <- c("time","TP835","TN835")
nut835$time<-as.character(nut835$time)
nut835$time<-as.POSIXct(nut835$time, format="%Y-%m-%d")
nut835$time<-as.POSIXct(nut835$time, format="%Y")
i835 <- i835 %>% left_join(nut835)
i805<-alldata[which(alldata$stream_id==805),]
i805<- subset(i805, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i805)[names(i805)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW805","TEMP805")
nut805<-subset(nuts,select=c("V1","i805_TP","i805_TN"))
names(nut805)[names(nut805)==c("V1", "i805_TP","i805_TN")] <- c("time","TP805","TN805")
nut805$time<-as.character(nut805$time)
nut805$time<-as.POSIXct(nut805$time, format="%Y-%m-%d")
nut805$time<-as.POSIXct(nut805$time, format="%Y")
i805 <- i805 %>% left_join(nut805)
i665<-alldata[which(alldata$stream_id==665),]
i665<- subset(i665, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i665)[names(i665)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW665","TEMP665")
nut665<-subset(nuts,select=c("V1","i670_TP","i670_TN"))
names(nut665)[names(nut665)==c("V1", "i670_TP","i670_TN")] <- c("time","TP665","TN665")
nut665$time<-as.character(nut665$time)
nut665$time<-as.POSIXct(nut665$time, format="%Y-%m-%d")
nut665$time<-as.POSIXct(nut665$time, format="%Y")
i665 <- i665 %>% left_join(nut665)
i760<-alldata[which(alldata$stream_id==760),]
i760<- subset(i760, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(i760)[names(i760)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOW760","TEMP760")
nut760<-subset(nuts,select=c("V1","i760_TP","i760_TN"))
names(nut760)[names(nut760)==c("V1", "i760_TP","i760_TN")] <- c("time","TP760","TN760")
nut760$time<-as.character(nut760$time)
nut760$time<-as.POSIXct(nut760$time, format="%Y-%m-%d")
nut760$time<-as.POSIXct(nut760$time, format="%Y")
i760 <- i760 %>% left_join(nut760)
nuts<-nuts%>%mutate(aveTP=(i505_TP+i790_TP+i830_TP+i788_TP+i510_TP+i540_TP+i800_TP+
i835_TP+i805_TP+i670_TP+i760_TP)/11)
nuts<-nuts%>%mutate(aveTN=(i505_TN+i790_TN+i830_TN+i788_TN+i510_TN+i540_TN+i800_TN+
i835_TN+i805_TN+i670_TN+i760_TN)/11)
test<-subset(nuts,select=c("V1","aveTP","aveTN"))
iUNG<-alldata[which(alldata$stream_id=="ung"),]
iUNG<- subset(iUNG, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(iUNG)[names(iUNG)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOWung","TEMPung")
#nutUNG<-test #!!!14June19 testing how to reduce nutrient concentration for overall inflow concentration being ~3x epi concentration
nutUNG<-subset(nuts,select=c("V1","i505_TP","i505_TN"))
#names(nutUNG)[names(nutUNG)==c("V1","aveTP","aveTN")] <- c("time","TPung","TNung")
names(nutUNG)[names(nutUNG)==c("V1","i505_TP","i505_TN")] <- c("time","TPung","TNung")
nutUNG$time<-as.character(nutUNG$time)
nutUNG$time<-as.POSIXct(nutUNG$time, format="%Y-%m-%d")
nutUNG$time<-as.POSIXct(nutUNG$time, format="%Y")
iUNG <- iUNG %>% left_join(nutUNG)
iBAL<-alldata[which(alldata$stream_id=="bal"),]
iBAL<- subset(iBAL, select=c("date", "modelinflow_m3ps", "ModStreamTemp_degC"))
names(iBAL)[names(iBAL)==c("date", "modelinflow_m3ps", "ModStreamTemp_degC")] <- c("time","FLOWBAL","TEMPBAL")
nutBAL<-test
names(nutBAL)[names(nutBAL)==c("V1","aveTP","aveTN")] <- c("time","TPBAL","TNBAL")
nutBAL$time<-as.character(nutBAL$time)
nutBAL$TPBAL<-0.0001
nutBAL$TNBAL<-0.0001
nutBAL$time<-as.POSIXct(nutBAL$time, format="%Y-%m-%d")
nutBAL$time<-as.POSIXct(nutBAL$time, format="%Y")
iBAL <- iBAL %>% left_join(nutBAL)
iALL <- iUNG %>% left_join(iBAL)%>%left_join(i505)%>% left_join(i790) %>%
left_join(i830) %>% left_join(i788) %>% left_join(i510) %>% left_join(i540) %>% left_join(i800) %>%
left_join(i835) %>% left_join(i805)%>% left_join(i665)%>% left_join(i760)
iALL<-iALL%>%mutate(totFLOW=FLOWung+FLOWBAL+FLOW505+FLOW790+FLOW830+FLOW788+FLOW510+FLOW540+FLOW800+
FLOW835+FLOW805+FLOW665+FLOW760)
iALL<-iALL%>%mutate(vwsTP=(TPung*FLOWung/totFLOW)+(TPBAL*FLOWBAL/totFLOW)+(TP505*FLOW505/totFLOW)+(TP790*FLOW790/totFLOW)+
(TP830*FLOW830/totFLOW)+(TP788*FLOW788/totFLOW)+(TP510*FLOW510/totFLOW)+
(TP540*FLOW540/totFLOW)+(TP800*FLOW800/totFLOW)+
(TP835*FLOW835/totFLOW)+
(TP805*FLOW805/totFLOW)+(TP665*FLOW665/totFLOW)+(TP760*FLOW760/totFLOW))
iALL<-iALL%>%mutate(vwsTN=(TNung*FLOWung/totFLOW)+(TNBAL*FLOWBAL/totFLOW)+(TN505*FLOW505/totFLOW)+(TN790*FLOW790/totFLOW)+
(TN830*FLOW830/totFLOW)+(TN788*FLOW788/totFLOW)+(TN510*FLOW510/totFLOW)+
(TN540*FLOW540/totFLOW)+(TN800*FLOW800/totFLOW)+
(TN835*FLOW835/totFLOW)+
(TN805*FLOW805/totFLOW)+(TN665*FLOW665/totFLOW)+(TN760*FLOW760/totFLOW))
iALL<-iALL%>%mutate(vwsTEMP=(TEMPung*FLOWung/totFLOW)+(TEMPBAL*FLOWBAL/totFLOW)+(TEMP505*FLOW505/totFLOW)+(TEMP790*FLOW790/totFLOW)+
(TEMP830*FLOW830/totFLOW)+(TEMP788*FLOW788/totFLOW)+(TEMP510*FLOW510/totFLOW)+
(TEMP540*FLOW540/totFLOW)+(TEMP800*FLOW800/totFLOW)+
(TEMP835*FLOW835/totFLOW)+
(TEMP805*FLOW805/totFLOW)+(TEMP665*FLOW665/totFLOW)+(TEMP760*FLOW760/totFLOW))
iALL<-iALL%>%mutate(SALT=0)
oneINFLOW<-subset(iALL,select=c("time","totFLOW","SALT","vwsTEMP","vwsTP","vwsTN"))
names(oneINFLOW)[names(oneINFLOW)==c("time","totFLOW","SALT","vwsTEMP","vwsTP","vwsTN")] <-
c("time","FLOW","SALT","TEMP","TP","TN")
test<-oneINFLOW[which(oneINFLOW$time>"2004-12-26"),]
test<-test[which(test$time<"2011-01-01"),]
mean(test$TP,na.rm=TRUE)*31/1000
mean(test$TN,na.rm=TRUE)*14/1000
oneINFLOW<-oneINFLOW%>%mutate(OGM_doc=125)%>%mutate(OGM_poc=12.5)%>%mutate(OGM_don=0.4*TN)%>%
mutate(NIT_nit=0.1*TN)%>%mutate(NIT_amm=0.1*TN)%>%mutate(OGM_pon=0.4*TN)%>%
mutate(PHS_frp=0.0295*TP)%>%mutate(OGM_dop=0.1435*TP)%>%mutate(OGM_pop=0.327*TP)%>%
mutate(PHS_frp_ads=0.5*TP)
oneINFLOW<-subset(oneINFLOW,select=c("time","FLOW","SALT","TEMP","OGM_doc","OGM_poc",
"OGM_don","NIT_nit","NIT_amm","OGM_pon","PHS_frp","OGM_dop",
"OGM_pop","PHS_frp_ads"))
write.csv(oneINFLOW,paste0("./data/oneInflow", Sys.Date(), ".csv"), row.names = FALSE, quote = FALSE)
|
48702b58e038be4cadb781cc8cc20d8b98872234 | 6df5c96f99b722b3cd5cd0b1f44f122072fad24b | /code/Unchecked/function_ah2sp.R | ca876b3f37224bcd443585e7c88e61317679e41d | [
"MIT"
] | permissive | Otoliths/Floristic-Kingdoms | 95d2cc6caaa25200eee3427819bf5c37f2394fc1 | 7cb66545dffac44150f292dea482a6ed99213dda | refs/heads/main | 2023-04-09T01:12:42.893686 | 2021-04-22T19:17:01 | 2021-04-22T19:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,520 | r | function_ah2sp.R | ah2sp <- function(x, increment=360, rnd=10, proj4string=CRS(as.character(NA)),tol=1e-4) {
if (!inherits(x, "ahull")) {
stop("x needs to be an ahull class object")
}
# Extract the edges from the ahull object as a dataframe
xdf <- as.data.frame(x$arcs)
#correct for possible arc order strangeness (Pascal Title addition 29 Nov 2013)
k <- 1
xdf <- cbind(xdf, flip = rep(FALSE, nrow(xdf)))
repeat{
if (is.na(xdf[k+1, 'end1'])) {
break
}
#cat(k, '\n')
if (xdf[k,'end2'] == xdf[k+1,'end1']) {
#cat('case 1\n')
k <- k + 1
} else if (xdf[k,'end2'] != xdf[k+1,'end1'] & !xdf[k,'end2'] %in% xdf$end1[k+1:nrow(xdf)] & !xdf[k,'end2'] %in% xdf$end2[k+1:nrow(xdf)]) {
#cat('case 2\n')
k <- k + 1
} else if (xdf[k,'end2'] != xdf[k+1,'end1'] & xdf[k,'end2'] %in% xdf$end1[k+1:nrow(xdf)] & !xdf[k,'end2'] %in% xdf$end2[k+1:nrow(xdf)]) {
#cat('case 3\n')
m <- which(xdf$end1[k+1:nrow(xdf)] == xdf[k,'end2']) + k
xdf <- rbind(xdf[1:k,],xdf[m,],xdf[setdiff((k+1):nrow(xdf),m),])
} else if (xdf[k,'end2'] != xdf[k+1,'end1'] & !xdf[k,'end2'] %in% xdf$end1[k+1:nrow(xdf)] & xdf[k,'end2'] %in% xdf$end2[k+1:nrow(xdf)]) {
#cat('case 4\n')
m <- which(xdf$end2[k+1:nrow(xdf)] == xdf[k,'end2']) + k
tmp1 <- xdf[m,'end1']
tmp2 <- xdf[m,'end2']
xdf[m,'end1'] <- tmp2
xdf[m,'end2'] <- tmp1
xdf[m,'flip'] <- TRUE
xdf <- rbind(xdf[1:k,], xdf[m,], xdf[setdiff((k+1):nrow(xdf), m),])
} else {
k <- k + 1
}
}
# Remove all cases where the coordinates are all the same
xdf <- subset(xdf, xdf$r > 0)
res <- NULL
if (nrow(xdf) > 0) {
# Convert each arc to a line segment
linesj <- list()
prevx <- NULL
prevy <- NULL
j <- 1
for(i in 1:nrow(xdf)) {
rowi <- xdf[i,]
v <- c(rowi$v.x, rowi$v.y)
theta <- rowi$theta
r <- rowi$r
cc <- c(rowi$c1, rowi$c2)
# Arcs need to be redefined as strings of points. Work out the number of points to allocate in this arc segment.
ipoints <- 2 + round(increment * (rowi$theta / 2), 0)
# Calculate coordinates from arc() description for ipoints along the arc.
angles <- alphahull::anglesArc(v, theta)
if (rowi['flip'] == TRUE){ angles <- rev(angles) }
seqang <- seq(angles[1], angles[2], length = ipoints)
x <- round(cc[1] + r * cos(seqang),rnd)
y <- round(cc[2] + r * sin(seqang),rnd)
# Check for line segments that should be joined up and combine their coordinates
if (is.null(prevx)) {
prevx <- x
prevy <- y
# added numerical precision fix (Pascal Title Dec 9 2013)
} else if ((x[1] == round(prevx[length(prevx)],rnd) | abs(x[1] - prevx[length(prevx)]) < tol) && (y[1] == round(prevy[length(prevy)],rnd) | abs(y[1] - prevy[length(prevy)]) < tol)) {
if (i == nrow(xdf)){
#We have got to the end of the dataset
prevx <- append(prevx ,x[2:ipoints])
prevy <- append(prevy, y[2:ipoints])
prevx[length(prevx)] <- prevx[1]
prevy[length(prevy)] <- prevy[1]
coordsj <- cbind(prevx,prevy)
colnames(coordsj) <- NULL
# Build as Line and then Lines class
linej <- Line(coordsj)
linesj[[j]] <- Lines(linej, ID = as.character(j))
} else {
prevx <- append(prevx, x[2:ipoints])
prevy <- append(prevy, y[2:ipoints])
}
} else {
# We have got to the end of a set of lines, and there are several such sets, so convert the whole of this one to a line segment and reset.
prevx[length(prevx)] <- prevx[1]
prevy[length(prevy)] <- prevy[1]
coordsj <- cbind(prevx,prevy)
colnames(coordsj)<-NULL
# Build as Line and then Lines class
linej <- Line(coordsj)
linesj[[j]] <- Lines(linej, ID = as.character(j))
j <- j + 1
prevx <- NULL
prevy <- NULL
}
}
#Drop lines that will not produce adequate polygons (Pascal Title addition 9 Dec 2013)
badLines <- vector()
for (i in 1:length(linesj)){
if (nrow(linesj[[i]]@Lines[[1]]@coords) < 4){
badLines <- c(badLines,i)
}
}
if (length(badLines) > 0){linesj <- linesj[-badLines]}
# Promote to SpatialLines
lspl <- SpatialLines(linesj)
# Convert lines to polygons
# Pull out Lines slot and check which lines have start and end points that are the same
lns <- slot(lspl, "lines")
polys <- sapply(lns, function(x) {
crds <- slot(slot(x, "Lines")[[1]], "coords")
identical(crds[1, ], crds[nrow(crds), ])
})
# Select those that do and convert to SpatialPolygons
polyssl <- lspl[polys]
list_of_Lines <- slot(polyssl, "lines")
sppolys <- SpatialPolygons(list(Polygons(lapply(list_of_Lines, function(x) { Polygon(slot(slot(x, "Lines")[[1]], "coords")) }), ID = "1")), proj4string=proj4string)
# Create a set of ids in a dataframe, then promote to SpatialPolygonsDataFrame
hid <- sapply(slot(sppolys, "polygons"), function(x) slot(x, "ID"))
areas <- sapply(slot(sppolys, "polygons"), function(x) slot(x, "area"))
df <- data.frame(hid,areas)
names(df) <- c("HID","Area")
rownames(df) <- df$HID
res <- SpatialPolygonsDataFrame(sppolys, data=df)
res <- res[which(res@data$Area > 0),]
}
return(res)
} |
f4e97f28e8d553b76578df0fc86efb42552c5d8d | 028d02e227415930b1e42ac363a98e7ecba8a493 | /Text_Word_Vec_Analysis.R | ead4922bdf1f99b7a5eef1da041b97daa31e2f9a | [] | no_license | SaraJKerr/Letters_1916_Internship | 8dc3f7dba97013a4f916ab5eac4693e0ec12a52c | 90a6f1f70f13791154a76be7b6d4129a7999b672 | refs/heads/master | 2021-01-24T11:18:29.122999 | 2017-07-05T15:40:49 | 2017-07-05T15:40:49 | 70,236,628 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,041 | r | Text_Word_Vec_Analysis.R | ################################################################################
# File-Name: Text_Word_Vec_Analysis.R #
# Date: 1 September 2016 #
# Author: Sara J Kerr #
# ORCID:orcid.org/0000-0002-2322-1178 #
# Purpose: Word2Vec creation, analysis and visualisation #
# Based on: https://github.com/bmschmidt/wordVectors/tree/master/R #
# http://www.codeproject.com/Tips/788739/Visualization-of-High- #
# Dimensional-Data-using-t-SNE #
# Matthew L. Jockers (2014) Text Analysis with R #
# https://eight2late.wordpress.com/2015/12/02/a-gentle-introduction- #
# to-network-graphs-using-r-and-gephi/ #
# http://kateto.net/networks-r-igraph #
# #
# Data Used: Plain text file of combined texts #
# Packages Used: wordVectors, tsne, Rtsne, magrittr, ggplot2, ggrepel, #
# stringi #
# Input: folder of plain text files #
# Output: VSMs, csv files, wordlists, t-SNE plots, html network graphs #
# Last Updated: 8th May 2017 #
################################################################################
# w2v_train uses train_word2vec and allows several variables to be used
# to create 4 VSM, one based on the default settings, one based on Baroni, Dinu
# and Kruszewski's (2014) suggestions for predictive models, and one with a larger
# window and negative samples also per Baroni et al.
# train_word2vec takes several parameters - an input prepared .txt file, an
# output file, vectors are the number of dimensions the default is 100, and
# window is the number of words either side of the context word, by default
# the function uses skip-gram this can be changed by including cbow = 1
# text <- "Processed_Files/Letters_corpus.txt"
w2v_train <- function(text) {
train_word2vec(text, output= paste0(config_results_folderpath, "/W2V/Let_default.bin"),
threads = 2, vectors = 100, window = 12,
negative_samples = 5)
train_word2vec(text, output = paste0(config_results_folderpath, "/W2V/Let_win5.bin"),
threads = 2, vectors = 400, window = 5,
negative_samples = 10)
train_word2vec(text, output = paste0(config_results_folderpath, "/W2V/Let_win2.bin"),
threads = 2, vectors = 300, window = 2,
negative_samples = 10)
train_word2vec(text, output = paste0(config_results_folderpath, "/W2V/Let_win15.bin"),
threads = 2, vectors = 300, window = 15,
negative_samples = 10)
}
################################################################################
# text_kwic takes in a .txt file checks whether a target word is present and,
# if so, creates a dataframe with text name and keyword in context and saves it.
text_kwic <- function(files, input, word, context) {
corpus <- make_word_list(files, input)
context <- as.numeric(context)
keyword <- tolower(word)
result <- NULL
# create the KWIC readout
for (i in 1:length(corpus)) {
hits <- which(corpus[[i]] == keyword)
let <- files[i]
if(length(hits) > 0){
for(j in 1:length(hits)) {
start <- hits[j] - context
if(start < 1) {
start <- 1
}
end <- hits[j] + context
myrow <- cbind(let, hits[j],
paste(corpus[[i]][start: (hits[j] -1)],
collapse = " "),
paste(corpus[[i]][hits[j]],
collapse = " "),
paste(corpus[[i]][(hits[j] +1): end],
collapse = " "))
result <- rbind(result, myrow)
}
} else {
z <- paste0(let, " YOUR KEYWORD WAS NOT FOUND\n")
cat(z)
}
}
colnames(result) <- c("file", "position", "left",
"keyword", "right")
write.csv(result, paste0(config_results_folderpath, "/KWIC/", word, "_",
context, ".csv"))
cat("Your results have been saved")
}
# Function used within text_kwic
make_word_list <- function(files, input.dir) {
# create an empty list for the results
word_list <- list()
# read in the files and process them
for(i in 1:length(files)) {
text <- scan(paste(input.dir, files[i], sep = "/"),
what = "character", sep = "\n")
text <- paste(text, collapse = " ")
text_lower <- tolower(text)
text_words <- strsplit(text_lower, "\\W")
text_words <- unlist(text_words)
text_words <- text_words[which(text_words != "")]
word_list[[files[i]]] <- text_words
}
return(word_list)
}
################################################################################
# w2v_analysis2 analyses a chosen term in a vector space model
# The function takes 6 arguments:
# vsm - a vector space model
# words - a character vector of focus words
# seed - an integer
# path - the path to the folder you want files saved to
# ref_name - the reference name for the exported files
# num - the number of nearest words you wish to examine
# The function will create a vector which is the average of the words input and
# will output a wordlist of the n nearest words, a csv of the words and their
# positions, and a plot of the 2D reduction of the vector space
# model using the Barnes-Hut implementation of t-SNE. The points for each word
# are marked in red so the labels can be moved by ggrepel for ease of reading.
# An HTML network graph for the chosen word will also be created
# set.seed is used to ensure replicability
w2v_analysis2 <- function(vsm, words, seed, path, ref_name, num) {
# Set the seed
if (!missing(seed))
set.seed(seed)
# Identify the nearest 10 words to the average vector of search terms
ten <- nearest_to(vsm, vsm[[words]])
# Identify the nearest n words to the average vector of search terms and
# save as a .txt file
main <- nearest_to(vsm, vsm[[words]], num)
wordlist <- names(main)
filepath <- paste0(path, ref_name)
write(wordlist, paste0(filepath, ".txt"))
# Create a subset vector space model
new_model <- vsm[[wordlist, average = F]]
# Run Rtsne to reduce new VSM to 2D (Barnes-Hut)
reduction <- Rtsne(as.matrix(new_model), dims = 2, initial_dims = 50,
perplexity = 30, theta = 0.5, check_duplicates = F,
pca = F, max_iter = 1000, verbose = F,
is_distance = F, Y_init = NULL)
# Extract Y (positions for plot) as a dataframe and add row names
df <- as.data.frame(reduction$Y)
rows <- rownames(new_model)
rownames(df) <- rows
# Save dataframe as .csv file
write.csv(df, paste0(filepath, ".csv"))
# Create t-SNE plot and save as jpeg
ggplot(df) +
geom_point(aes(x = V1, y = V2), color = "red") +
geom_text_repel(aes(x = V1, y = V2, label = rownames(df),
size = 8)) +
xlab("Dimension 1") +
ylab("Dimension 2 ") +
theme_bw(base_size = 16) +
theme(legend.position = "none") +
ggtitle(paste0("2D reduction of VSM ", ref_name, " using t_SNE"))
ggsave(paste0(ref_name, ".jpeg"), path = path, width = 24,
height = 18, dpi = 100)
# Create a network plot of the words
sim <- cosineSimilarity(new_model, new_model) %>% round(2)
# convert those below threshold to 0
sim[sim < max(sim)/2] <- 0
g <- graph_from_incidence_matrix(sim)
# Create a graph object
edges <- get.edgelist(g)
# Name columns
colnames(edges) <- c("from", "to")
g2 <- graph(edges = edges)
g2 <- simplify(g2) # removes loops
# Community detection based on greedy optimization of modularity
cfg <- cluster_fast_greedy(as.undirected(g2))
V(g2)$community <- cfg$membership
pal2 <- rainbow(33, alpha = 0.7)
V(g2)$color <- pal2[V(g2)$community]
data <- toVisNetworkData(g2)
visNetwork(nodes = data$nodes, edges = data$edges, main = "Network of Letters
Clustered by Fast Greedy") %>%
visOptions(highlightNearest = T,
selectedBy = "community") %>%
visSave(paste0(ref_name, ".html"))
new_list <- list("Ten nearest" = ten, "Status" = "Analysis Complete")
return(new_list)
}
|
4d99c4dc7320850f835fdc5d2048c33f3cf769a6 | ebbe08d58a57ae2e9d308a12df500e1e0ef8d098 | /wgk/AllSample.R | 8e854561f81e0c315d71ed54a7d1dde4c55b5b64 | [] | no_license | Drizzle-Zhang/bioinformatics | a20b8b01e3c6807a9b6b605394b400daf1a848a3 | 9a24fc1107d42ac4e2bc37b1c866324b766c4a86 | refs/heads/master | 2022-02-19T15:57:43.723344 | 2022-02-14T02:32:47 | 2022-02-14T02:32:47 | 171,384,799 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 16,291 | r | AllSample.R | setwd('/home/zy/my_git/bioinformatics/wgk')
.libPaths('/home/yzj/R/x86_64-pc-linux-gnu-library/4.0')
library(Seurat)
library(ggplot2)
require("RColorBrewer")
file.all <- '/home/yzj/JingMA_NEW/res/Harmony/ALL/RDS/PBMC_harmony.RDS'
seurat.all <- readRDS(file.all)
path.data <- '/home/disk/drizzle/wgk/data/AllSample_2_merge/'
DimPlot(seurat.all, group.by = "batch")
status <- rep('0', length(seurat.all$batch))
status[seurat.all$batch %in% c('C1', 'C2', 'C3', 'C4', 'C5')] <- 'Normal'
status[seurat.all$batch %in% c('M1', 'M2', 'M3')] <- 'Microtia'
seurat.all$status <- status
DimPlot(seurat.all, group.by = "status")
seurat.all <- FindNeighbors(seurat.all, reduction = "pca", dims = 1:100)
seurat.all <- FindClusters(seurat.all, resolution = 3)
DimPlot(seurat.all, group.by = "RNA_snn_res.3", label = T)
FeaturePlot(seurat.all, features = c('ID3', 'HES1', 'COL1A1', 'CYTL1'))
FeaturePlot(seurat.all, features = c('DCN', 'STC1', 'COL9A3', 'FRZB'))
# unknown 23 28
FeaturePlot(seurat.all, features = c('IFIT1', 'OAS3', 'XAF1', 'OAS1'))
# neuron 27
FeaturePlot(seurat.all, features = c('PLP1', 'SLITRK6', 'ANK3', 'CIT'))
# commu stromal 10
FeaturePlot(seurat.all, features = c('CFD', 'APOE', 'VCAN', 'CSF3'))
# matrix stromal 3
FeaturePlot(seurat.all, features = c('MMP10', 'COCH', 'OGN', 'COMP'))
# stromal stem cell 22
FeaturePlot(seurat.all, features = c('HES1', 'ID3', 'COL1A1', 'LUM'))
# Chondral stem cell 9 21
FeaturePlot(seurat.all, features = c('HES1', 'ID3', 'CYTL1', 'COL2A1'))
# Transitional chondrocyte 13 17
FeaturePlot(seurat.all, features = c('HES1', 'ID3', 'CYTL1', 'COL2A1'))
# Chondrocyte1 1 2 5 8 12 14 15 26
FeaturePlot(seurat.all, features = c('COL1A1', 'COL1A2', 'COL2A1'), ncol=3)
# Chondrocyte2 0 4 6 7 11 16
FeaturePlot(seurat.all, features = c('STC1'))
# filter
seurat.all.filter <- subset(seurat.all,
subset = RNA_snn_res.2 %in%
setdiff(unique(seurat.all$RNA_snn_res.2), c(23, 27, 28)))
seurat.all.filter <- FindNeighbors(seurat.all.filter, reduction = "pca", dims = 1:100)
seurat.all.filter <- FindClusters(seurat.all.filter, resolution = 1.5)
DimPlot(seurat.all.filter, group.by = "RNA_snn_res.2", label = T)
cluster_3 <- seurat.all$RNA_snn_res.3
celltypes <- rep('_', length(cluster_3))
celltypes[cluster_3 %in% c(22, 25)] <- 'Stromal cell1'
celltypes[cluster_3 %in% c(20, 21, 29)] <- 'Stromal cell2'
celltypes[cluster_3 %in% c(27)] <- 'Stromal stem cell'
celltypes[cluster_3 %in% c(18, 23, 33, 35)] <- 'Chondral stem cell'
celltypes[cluster_3 %in% c(12, 16, 25, 9, 18)] <- 'Transitional chondrocyte'
celltypes[cluster_3 %in% c(10, 16, 1, 5, 15, 12, 17, 4, 6, 13, 8)] <- 'Chondrocyte1'
celltypes[cluster_3 %in% c(0, 3, 7, 11, 14, 9, 19, 26, 30, 38)] <- 'Chondrocyte2'
celltypes[cluster_3 %in% c(28, 36, 24)] <- 'Perivascular cell'
celltypes[cluster_3 %in% c(32, 37, 41)] <- 'Endothelial cell'
celltypes[cluster_3 %in% c(39)] <- 'Immune cell'
seurat.all$celltype_3 <- celltypes
DimPlot(seurat.all, group.by = "celltype_3", label = T)
# cell type
cluster_2 <- seurat.all.filter$RNA_snn_res.2
celltypes <- rep('_', length(cluster_2))
celltypes[cluster_2 %in% c(10)] <- 'Stromal cell1'
celltypes[cluster_2 %in% c(3)] <- 'Stromal cell2'
celltypes[cluster_2 %in% c(22)] <- 'Stromal stem cell'
celltypes[cluster_2 %in% c(9, 21)] <- 'Chondral stem cell'
celltypes[cluster_2 %in% c(13, 17, 14, 5)] <- 'Transitional chondrocyte'
celltypes[cluster_2 %in% c(1, 2, 5, 8, 12, 14, 15, 26)] <- 'Chondrocyte1'
celltypes[cluster_2 %in% c(0, 4, 6, 7, 11, 16)] <- 'Chondrocyte2'
celltypes[cluster_2 %in% c(19, 20, 24)] <- 'Perivascular cell'
celltypes[cluster_2 %in% c(18)] <- 'Endothelial cell'
celltypes[cluster_2 %in% c(25)] <- 'Immune cell'
table(seurat.all.filter$batch, celltypes)/as.vector(table(seurat.all.filter$batch))
seurat.all.filter$celltype <- celltypes
DimPlot(seurat.all.filter, group.by = "celltype", label = T)
file.merge_2 <- paste0(path.data, 'seurat_celltype.Rdata')
saveRDS(seurat.all.filter, file.merge_2)
seurat.all.filter <- readRDS(file.merge_2)
# cell marker
clusters <- unique(seurat.all.filter$celltype)
list.marker.all <- list()
for (cluster in clusters) {
sub.markers <- FindMarkers(seurat.all.filter, ident.1 = cluster, group.by = 'celltype',
logfc.threshold = 0.25, min.diff.pct = 0.05, only.pos = T)
list.marker.all[[cluster]] <- sub.markers
}
# surface marker
file.surface <- '/home/disk/drizzle/wgk/public_data/surface_marker_wlab.txt'
df.surface <- read.delim2(file.surface)
surface.genes <- unique(toupper(df.surface$ENTREZ.gene.symbol))
# chon and stroma lineage marker genes
seurat.CS <- subset(seurat.all.filter, subset =
celltype %in% c('Stromal cell1', 'Stromal cell2', 'Stromal stem cell',
'Chondral stem cell', 'Transitional chondrocyte',
'Chondrocyte1', 'Chondrocyte2'))
clusters <- unique(seurat.CS$celltype)
list.marker <- list()
for (cluster in clusters) {
sub.markers <- FindMarkers(seurat.CS, ident.1 = cluster, group.by = 'celltype',
logfc.threshold = 0.3, min.diff.pct = 0, only.pos = T)
list.marker[[cluster]] <- sub.markers
}
View(list.marker$`Chondral stem cell`)
View(list.marker$`Stromal stem cell`)
intersect(rownames(list.marker$`Chondral stem cell`), surface.genes)
intersect(rownames(list.marker$`Stromal stem cell`), surface.genes)
genes.chon <- c("CD83", "TSPAN6", 'ENPP1', 'ITM2B', 'CD99', 'TSPAN4',
'SCARA3', 'BOC', 'PTPRZ1', 'MXRA8', 'FGFR3', 'AQP1',
'CNTFR', 'CADM1', 'SLC29A1', 'LRP1', 'CD46', 'A2M', 'ITGA10')
genes.stroma <- c("GPNMB", "LRRC32", 'ABCA8', 'SCARA5', 'THY1', 'SLC2A3',
'MXRA8', 'PDGFRB', 'LEPR', 'SLC38A2', 'ANK2', 'PCDH9',
'CLEC2B', 'GPRC5A', 'PRNP', 'SLC20A1', 'CERCAM', 'BOC', 'FAP',
'PLP2', 'CD276', 'AQP1', 'ABCA1', 'ITM2B', 'BMPR2', 'TSPAN4',
'SGCE', 'CD83', 'EMP1', 'SLC19A2', 'F3', 'TMEM2', 'FCGRT',
'LRP1', 'IL1R1')
genes.CS <- unique(c(genes.chon, genes.stroma))
celltypes <- unique(seurat.all.filter$celltype)
df.bubble <- data.frame(stringsAsFactors = F)
for (cell in celltypes) {
sub.seurat <- subset(seurat.all.filter, subset = celltype == cell)
sub.mat <- sub.seurat@assays$RNA@data
for (gene in genes.CS) {
vec.exp <- sub.mat[gene,]
mean.exp <- mean(vec.exp)
prop.exp <- sum(vec.exp != 0)/length(vec.exp)
df.bubble <- rbind(df.bubble,
data.frame(Cell = cell, Gene = gene,
MeanExp = mean.exp, Prop = prop.exp))
}
}
mat.exp <- reshape2::dcast(df.bubble, Cell ~ Gene, value.var = 'MeanExp')
row.names(mat.exp) <- mat.exp$Cell
mat.exp$Cell <- NULL
mat.exp.scale <- scale(mat.exp)
df.exp.scale <- reshape2::melt(mat.exp.scale)
names(df.exp.scale) <- c('Cell', 'Gene', 'ScaleExp')
df.bubble <- merge(df.bubble, df.exp.scale, by = c('Cell', 'Gene'))
df.bubble$Cell <- factor(df.bubble$Cell,
levels = c('Stromal cell1', 'Stromal cell2', 'Stromal stem cell',
'Chondral stem cell', 'Transitional chondrocyte',
'Chondrocyte1', 'Chondrocyte2','Perivascular cell',
'Endothelial cell', 'Immune cell'))
plot.bubble <-
ggplot(data = df.bubble,
aes(x = Gene, y = Cell, size = Prop, color = ScaleExp)) +
geom_point(fill = 'cornsilk') +
scale_colour_gradientn(
colours = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100)) +
# facet_grid( ~ Status) +
labs(x = 'Gene', y = 'Cell Type', color = 'Scaled expression',
size = 'Proportion') +
theme(panel.background = element_rect(color = 'gray',
fill = 'transparent'),
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(plot = plot.bubble, path = path.data,
filename = 'marker_surface.png',
height = 15, width = 35, units = 'cm')
# no filter
# cell type
cluster_2 <- seurat.all$RNA_snn_res.2
celltypes <- as.character(cluster_2)
celltypes[cluster_2 %in% c(10)] <- 'Stromal cell1'
celltypes[cluster_2 %in% c(3)] <- 'Stromal cell2'
celltypes[cluster_2 %in% c(22)] <- 'Stromal stem cell'
celltypes[cluster_2 %in% c(9, 21)] <- 'Chondral stem cell'
celltypes[cluster_2 %in% c(13, 17)] <- 'Transitional chondrocyte'
celltypes[cluster_2 %in% c(1, 2, 5, 8, 12, 14, 15, 26)] <- 'Chondrocyte1'
celltypes[cluster_2 %in% c(0, 4, 6, 7, 11, 16)] <- 'Chondrocyte2'
celltypes[cluster_2 %in% c(19, 20, 24)] <- 'Perivascular cell'
celltypes[cluster_2 %in% c(18)] <- 'Endothelial cell'
celltypes[cluster_2 %in% c(25)] <- 'Immune cell'
# table(seurat.all$batch, celltypes)/as.vector(table(seurat.all$batch))
seurat.all$celltype <- celltypes
DimPlot(seurat.all, group.by = "celltype", label = T)
celltypes <- unique(seurat.all$celltype)
df.bubble <- data.frame(stringsAsFactors = F)
for (cell in celltypes) {
sub.seurat <- subset(seurat.all, subset = celltype == cell)
sub.mat <- sub.seurat@assays$RNA@data
for (gene in genes.CS) {
vec.exp <- sub.mat[gene,]
mean.exp <- mean(vec.exp)
prop.exp <- sum(vec.exp != 0)/length(vec.exp)
df.bubble <- rbind(df.bubble,
data.frame(Cell = cell, Gene = gene,
MeanExp = mean.exp, Prop = prop.exp))
}
}
mat.exp <- reshape2::dcast(df.bubble, Cell ~ Gene, value.var = 'MeanExp')
row.names(mat.exp) <- mat.exp$Cell
mat.exp$Cell <- NULL
mat.exp.scale <- scale(mat.exp)
df.exp.scale <- reshape2::melt(mat.exp.scale)
names(df.exp.scale) <- c('Cell', 'Gene', 'ScaleExp')
df.bubble <- merge(df.bubble, df.exp.scale, by = c('Cell', 'Gene'))
df.bubble$Cell <- factor(df.bubble$Cell,
levels = c('Stromal cell1', 'Stromal cell2', 'Stromal stem cell',
'Chondral stem cell', 'Transitional chondrocyte',
'Chondrocyte1', 'Chondrocyte2','Perivascular cell',
'Endothelial cell', 'Immune cell',
'23', '27', '28'))
plot.bubble <-
ggplot(data = df.bubble,
aes(x = Gene, y = Cell, size = Prop, color = ScaleExp)) +
geom_point(fill = 'cornsilk') +
scale_colour_gradientn(
colours = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100)) +
# facet_grid( ~ Status) +
labs(x = 'Gene', y = 'Cell Type', color = 'Scaled expression',
size = 'Proportion') +
theme(panel.background = element_rect(color = 'gray',
fill = 'transparent'),
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(plot = plot.bubble, path = path.data,
filename = 'marker_surface_no_filter.png',
height = 18, width = 35, units = 'cm')
# sort
df.chon <- df.bubble[df.bubble$Cell == 'Chondral stem cell', ]
df.bubble$Gene <- factor(df.bubble$Gene,
levels = df.chon[order(df.chon$ScaleExp, decreasing = T), 'Gene'])
plot.bubble <-
ggplot(data = df.bubble,
aes(x = Gene, y = Cell, size = Prop, color = ScaleExp)) +
geom_point(fill = 'cornsilk') +
scale_colour_gradientn(
colours = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100)) +
# facet_grid( ~ Status) +
labs(x = 'Gene', y = 'Cell Type', color = 'Scaled expression',
size = 'Proportion') +
theme(panel.background = element_rect(color = 'gray',
fill = 'transparent'),
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(plot = plot.bubble, path = path.data,
filename = 'marker_surface_no_filter_chon.png',
height = 18, width = 35, units = 'cm')
df.stroma <- df.bubble[df.bubble$Cell == 'Stromal stem cell', ]
df.bubble$Gene <- factor(df.bubble$Gene,
levels = df.stroma[order(df.stroma$ScaleExp, decreasing = T), 'Gene'])
plot.bubble <-
ggplot(data = df.bubble,
aes(x = Gene, y = Cell, size = Prop, color = ScaleExp)) +
geom_point(fill = 'cornsilk') +
scale_colour_gradientn(
colours = colorRampPalette(rev(brewer.pal(n = 7, name = "RdYlBu")))(100)) +
# facet_grid( ~ Status) +
labs(x = 'Gene', y = 'Cell Type', color = 'Scaled expression',
size = 'Proportion') +
theme(panel.background = element_rect(color = 'gray',
fill = 'transparent'),
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
ggsave(plot = plot.bubble, path = path.data,
filename = 'marker_surface_no_filter_stroma.png',
height = 18, width = 35, units = 'cm')
# violin plot
library(ggplot2)
marker.genes <- c('APOE', 'VCAN', 'OGN', 'COCH',
'COL1A1', 'LUM', 'HES1', 'ID3', 'SOX9',
'CYTL1', 'COL2A1', 'COL9A2', 'ACAN',
'ACTA2', 'PDGFRB', 'CDH5', 'IL1B')
# marker.genes <- c('CFD', 'APOE', 'VCAN', 'MMP10', 'COCH',
# 'ASPN', 'OGN')
df.gene <- data.frame(stringsAsFactors = F)
for (gene in marker.genes) {
df.sub <- data.frame(expvalue = seurat.all.filter@assays$RNA@data[gene,],
gene = rep(gene, ncol(seurat.all.filter@assays$RNA@data)),
celltype = seurat.all.filter$celltype)
df.gene <- rbind(df.gene, df.sub)
}
df.plot <- df.gene
df.plot$gene <- factor(df.gene$gene, levels = marker.genes)
df.plot$celltype <- factor(df.gene$celltype,
levels = c('Stromal cell1', 'Stromal cell2', 'Stromal stem cell',
'Chondral stem cell', 'Transitional chondrocyte',
'Chondrocyte1', 'Chondrocyte2','Perivascular cell',
'Endothelial cell', 'Immune cell'))
plot.vln <-
ggplot(data = df.plot, aes(x = gene, y = expvalue, color = gene, fill = gene)) +
geom_violin(trim = T, scale = 'width') +
facet_grid( ~ celltype) +
theme_classic() + coord_flip() +
stat_summary(fun= mean, geom = "point",
shape = 23, size = 2, color = "black") +
labs(x = 'Gene', y = 'Expression Level') + theme(legend.position = 'none')
ggsave(plot = plot.vln, path = path.data,
filename = 'Vln.png',
height = 15, width = 35, units = 'cm')
# prop
seurat.4 <- subset(seurat.all, subset = RNA_snn_res.0.6 == 4)
table(seurat.4$batch)/table(seurat.all$batch)
df.plot <- data.frame(sample = names(table(seurat.4$batch)),
prop = as.numeric(table(seurat.4$batch)/table(seurat.all$batch)))
ggplot(df.plot, aes(x = sample, y = prop)) + geom_bar(stat = 'identity')
seurat.5 <- subset(seurat.all, subset = RNA_snn_res.0.6 == 5)
table(seurat.5$batch)
seurat.9 <- subset(seurat.all, subset = RNA_snn_res.0.6 == 9)
table(seurat.9$batch)
seurat.11 <- subset(seurat.all, subset = RNA_snn_res.2 == 11)
DimPlot(seurat.11, group.by = 'RNA_snn_res.2')
seurat.0 <- subset(seurat.all, subset = RNA_snn_res.2 == 0)
DimPlot(seurat.0, group.by = 'RNA_snn_res.2')
seurat.M3 <- subset(seurat.all, subset = batch == 'M3')
FeaturePlot(seurat.M3, features = c('HES1', 'ID3', 'COL1A1', 'CYTL1'))
seurat.M1 <- subset(seurat.all, subset = batch == 'M1')
FeaturePlot(seurat.M1, features = c('HES1', 'ID3', 'COL1A1', 'CYTL1'))
FeaturePlot(seurat.M1, features = c('TNF'))
seurat.C4 <- subset(seurat.all, subset = batch == 'C4')
FeaturePlot(seurat.C4, features = c('HES1', 'ID3', 'COL1A1', 'CYTL1'))
seurat.C6 <- subset(seurat.all, subset = batch == 'C6')
FeaturePlot(seurat.C6, features = c('HES1', 'ID3', 'COL1A1', 'CYTL1'))
FeaturePlot(seurat.C6, features = c('TNF'))
seurat.C1 <- subset(seurat.all, subset = batch == 'C1')
FeaturePlot(seurat.C1, features = c('TNF', 'TNFSF10'))
(table(seurat.4$batch) + table(seurat.5$batch))/table(seurat.all$batch)
FeaturePlot(seurat.first, features = c('FRZB', 'CTGF',
'SERPINA1', "SCRG1", 'COL9A3', 'FGFBP2'), ncol = 3)
|
ccbdb91734d68709d8a30504765c9851f988d1a4 | 4059f892c51304bb8af43d90105da02f49fa263a | /storetrackeR/global.R | 85b779590152d0448dc79927d0434204ce5bcd83 | [] | no_license | martinSchneiderEoda/storetracker | 49fbc350feed0f2c2744a94d0dd6f142b6d4fbe9 | 47982c735c450d25224ab04369ae6ad502aac75b | refs/heads/master | 2021-04-09T07:31:04.518342 | 2020-03-22T17:24:56 | 2020-03-22T17:24:56 | 248,850,995 | 0 | 1 | null | 2020-03-21T13:02:13 | 2020-03-20T20:59:21 | R | UTF-8 | R | false | false | 915 | r | global.R |
# libraries ---------------------------------------------------------------
library(shiny)
library(shinyMobile)
library(leaflet)
library(DT)
library(DBI)
library(lubridate)
library(geosphere)
library(dplyr)
library(ggplot2)
library(tidyr)
library(stringr)
library(geoloc)
library(revgeo)
library(osmdata)
library(sf)
library(dbplyr)
# source -------------------------------------------------------------------------
source("storeFunctions.R")
source("uiFunctions.R")
con <- dbConnect(RSQLite::SQLite(), "storeTrackeDB.sqlite")
# -------------------------------------------------------------------------
# replace with db
product_choices <- tbl(con, "Products") %>% pull(ID)
names(product_choices) <- tbl(con, "Products") %>% pull(Name)
stores <- tbl(con, "Supermarket") %>% pull(ID)
names(stores) <- tbl(con, "Supermarket") %>% pull(Name)
store_choices <- tbl(con, "Supermarket") %>% pull(Name)
|
693c1721b79ca3ccac10a24e60e6fc89c5277309 | 11bd69f4cc83ac6ea62eb3118072fc22c667cf76 | /correlation.R | 45c2fedb473f655efeee1b9a6d6990b834c3af5d | [] | no_license | VarSriv/Firefight | c482c11638e15e733fdf39f394050781a7184149 | 02936d2bf66e321f2ac542ba5a255f72d2a70519 | refs/heads/master | 2021-08-16T01:20:25.976253 | 2017-11-18T14:48:13 | 2017-11-18T14:48:13 | 111,216,984 | 0 | 0 | null | 2017-11-18T15:37:14 | 2017-11-18T15:37:13 | null | UTF-8 | R | false | false | 922 | r | correlation.R | library('RSQLite')
conn <- dbConnect(SQLite(), 'C:/Users/User/Downloads/FPA_FOD_20170508.sqlite')
# pull the fires table into RAM
fires <- dbReadTable(conn,"Fires")
# disconnect from db
dbDisconnect(conn)
ctrl <- fires[is.na(fires$CONT_DATE) == FALSE,"CONT_DATE"]-fires[is.na(fires$CONT_DATE) == FALSE,"DISCOVERY_DATE"]
cor(ctrl, fires[is.na(fires$CONT_DATE) == FALSE,"FIRE_SIZE"])
plot(fires[is.na(fires$CONT_DATE) == FALSE,"FIRE_SIZE"],ctrl,xlim = c(0,40000),ylim = c(0,10000))
df <- as.data.frame(cbind(fires[is.na(fires$CONT_DATE) == FALSE,"FIRE_SIZE"],ctrl))
set.seed(123)
train_index <- sample(c(TRUE, FALSE), nrow(df), replace = TRUE, prob = c(0.8, 0.2))
test_index <- !train_index
# Create x/y, train/test data
x_train <- df[train_index,]
x_test <- df[test_index,]
model <- lm(ctrl~V1,x_train)
summary(model)
p <- predict(model,x_test)
error <- mean((abs(p-x_test$ctrl))/p)
|
3756f79772e703349f7f5fb5c8546be644de60e5 | 4d66d82231c7f9218538ca62fd15f366b2b2d8a2 | /src/analysis/calculate_k_from_hall_eq.R | 9353d79b83d8d73c7336d0688a90ce197521537f | [] | no_license | amcarter/nhc_50yl_sandbox | 78f12f2ee3175f165f0e12fe2c567526d476c585 | 3cd8a25fa8279b88c80be04c2a42a730e274dcdb | refs/heads/master | 2023-06-24T13:02:50.673995 | 2021-07-23T04:01:11 | 2021-07-23T04:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,978 | r | calculate_k_from_hall_eq.R | # Calculate k values for NHC based on the equation in Hall 1970
# Appendix
# Equation 4
# k2 = 5.026 * V ^ 0.969 * R(ft) ^ -1.673 at 20 C (day-1, per volume)
# Hall approximates R, hydraulic radius as Depth
# k2(T) = k2(T=20) * 1.0241^(T-20)
# k (g/m3/d) = 2.3 * k2 * DO.sat / 24hr
# K (g/m2/d) = k * d
library(streamMetabolizer)
# get Hall K values ####
##Convert KO2 to K600
calc_k210_from_d_v <- function(d_f, v_fs){
k2 = 5.026 * v_fs ^ 0.969 * d_f ^ -1.673
return(k2)
}
calc_k_from_k210 <- function(k2, temp, DOsat = NULL, airP = 101.3){
if(is.null(DOsat)){ DOsat <- calc_DO_sat(temp, airP * 10)}
k = k2 * 1.024^(temp - 20) * DOsat * 2.3 / 24
return(k)
}
calc_K2_from_K00 <- function(K600, temp){
sa = 1568
sb = -86.04
sc = 2.142
sd = -0.0216
se = -0.5
K2 = K600 * ((sa + sb * temp + sc * temp^2 + sd * temp^3)/600) ^ se
return(K2)
}
calc_K600_from_k210 <- function(k210, temp){
sa = 1568
sb = -86.04
sc = 2.142
sd = -0.0216
se = -0.5
K2 = k210 * 2.3
K600 = K2 / ((sa + sb * temp + sc * temp^2 + sd * temp^3)/600) ^ se
return(K600)
}
# load hall data
hallk <- read_csv("hall_50yl/code/data/hall/hall_tableA2_k_morphology.csv")
hallk <- hallk %>%
mutate(k2_day_vol = K2_perday_perarea / depth_m,
K600 = calc_K600_from_k210(k2_day_vol, 20),
v_ms = ((k2_day_vol / (5.026 * (depth_m * 3.28) ^ -1.673)) ^
(1/0.969))/3.28)
write_csv(hallk,"hall_50yl/code/data/hall/hall_tableA2_k_morphology_extended.csv")
# get measured K ####
# ar_k <- read_csv("data/estimated_k_values.csv")
# d = depth (m), v = velocity (m/s), DO.sat = DO at saturation (mg/L), t = temp C
# setwd("C:/Users/Alice Carter/Dropbox (Duke Bio_Ea)/projects/NHC_2019_metabolism/data")
# nhc <- read_csv("metabolism/processed/NHC.csv") %>%
# mutate(DateTime_EST = force_tz(DateTime_EST, tz = "EST"),
# date = as.Date(DateTime_EST)) %>%
# filter(date == as.Date("2017-06-27")) %>%
# select(DO.obs, temp.water, level_m, depth, discharge)
# unhc <- read_csv("metabolism/processed/NHC.csv") %>%
# mutate(DateTime_EST = force_tz(DateTime_EST, tz = "EST"),
# date = as.Date(DateTime_EST)) %>%
# filter(date == as.Date("2017-07-11")) %>%
# select(DO.obs, temp.water, level_m, depth, discharge)
#
# summary(nhc)
# nhc_k <- ar_k %>%
# filter(site =="NHC") %>%
# mutate(depth = .23,
# discharge = .14,
# watertemp = 22,
# K600 = K600fromO2(watertemp, k_md/depth))
#
# summary(unhc)
# unhc_k <- ar_k %>%
# filter(site =="UNHC") %>%
# mutate(depth = .14,
# discharge = .02,
# watertemp = 25,
# K600 = K600fromO2(watertemp, k_md/depth))
#
# ar_k <- bind_rows(nhc_k, unhc_k)
# write_csv(ar_k, "gas_data/measured_k_Ar_releases.csv")
# calc k from Hall 1970 equation ####
# load processed site data files
# Calculate K600 for sites ####
#
# # this width needs to be replaced with actual data from longitudinal surveys
# widths <- read_csv("C:/Users/Alice Carter/Dropbox (Duke Bio_Ea)/projects/NHC_2019_metabolism/data/siteData/NHCsite_metadata.csv") %>%
# select(site = sitecode, width_m = width_mar_m) %>%
# slice(1:7)
# ZQ <- read_csv("rating_curves/depth_discharge_relationship_LM1953.csv") %>%
# rename(site = sitename) %>%
# left_join(widths) %>%
# mutate(cv_ms = 1/c_m/width_m)
# ZQ$cv_ms[ZQ$site %in% c("NHC", "UNHC")] <- .194
# This loop needs to be stepped through for each individual site.
kk <- data.frame()
par(mfrow = c(2,1))
for(site in sites$sitecode){
dat <- read_csv(paste0("NHC_2019_metabolism/data/metabolism/processed/",
site, ".csv"), guess_max = 10000)
dat <- dat %>%
group_by(date = as.Date(with_tz(DateTime_UTC, tz = "EST"))) %>%
select(date, discharge, depth, avg_velocity, DO.obs,
DO.sat, temp_C = temp.water) %>%
summarize_all(mean, na.rm = T) %>%
mutate(k210_vol = calc_k210_from_d_v(depth * 3.28, avg_velocity * 3.28),
k_gm3hr = calc_k_from_k210(k210_vol, temp_C, DO.sat),
K600 = calc_K600_from_k210(k210_vol, temp_C))
# plot(dat$depth, dat$avg_velocity, pch = 20, main = site,
# col = "grey50", ylim = c(0,1))
# points(hallk$depth_m, hallk$v_ms, col = 2, pch = 20)
#
# plot(dat$depth, dat$K600, pch = 20, main = site, col = "grey50",
# ylim = c(0,20))
# points(hallk$depth_m, hallk$K600, col = 2, pch = 20)
dat$site = site
kk <- bind_rows(kk, dat)
}
# as.tibble(kk) %>%
# ggplot(aes(depth, K600, color= site)) +
# geom_point()
write_csv(kk, "NHC_2019_metabolism/data/siteData/KQ_hall_prior_from_equation_daily.csv")
# generate K/Q nodes for SM for each site year:
# kq <- data.frame()
# par(mfrow = c(1,1))
# for(site in unique(kk$site)){
# dat <- kk %>%
# filter(site == !!site)
# plot(dat$discharge, dat$K600, log = "xy", main = site)
# m <- lm(log(K600) ~ log(discharge), dat)
# mm <- summary(m)$coefficients[,1]
# Qrng <- range(log(dat$discharge), na.rm = T)
# delta = 2
# n = 6
# while(delta > 1){
# n = n + 1
# delta <- (Qrng[2]-Qrng[1])/n
# }
# Qnodes <- seq(Qrng[1] + delta/2, by = delta, length.out = n)
# lnK600 <- mm[1] + mm[2] * Qnodes
# points(exp(Qnodes), exp(lnK600), col = 2, pch = 19)
# nodes <- data.frame(site = site,
# lnQ = Qnodes,
# lnK600 = lnK600)
# kq <- bind_rows(nodes, kq)
# }
#
# write_csv(kq, "siteData/KQ_hall_prior_from_equation.csv")
#
# # Previous attempt ####
# # Hall used the numbers from stream morphology for his calculations, so I will too
# hallQ <- log(range(hall_k$discharge_m3s))
# # #get the range of all Q's
# # q <- read_csv("metabolism/processed/NHC.csv") %>%
# # select(discharge)
# # Q <- log(range(q, na.rm = T))
#
# Q <- seq(-9, 5)
#
# comb <- hall_k %>%
# mutate(logQ = log(discharge_m3s)) %>%
# select(K600, logQ) %>%
# full_join(data.frame(nodes = Q,
# logQ = Q)) %>%
# arrange(logQ)
#
# comb$K600[which(comb$logQ <= hallQ[1])] <- hall_k$K600[1]
# comb$K600[which(comb$logQ >= hallQ[2])] <-
# hall_k$K600[nrow(hall_k)]
#
# comb_k <- transform(comb, K600 = na.approx(K600, logQ, na.rm = F))
#
# plot(comb_k$logQ, comb_k$K600)
# points(comb_k$nodes, comb_k$K600, col = "red", pch = 19)
# write_csv(comb_k, "siteData/KQ_hall_prior.csv")
#
# # 11/19/2020
# # didn't finish here the plan was to use the hall data that already was
# # calculated for stream morphology, but that is problematic for a few reasons:
# # 1. The range is smaller than that of our data
# # 2. the hall rating curve doesn't necesarily apply to this depth
# #
# # Another thing to try is calculating the k from my own data, this is
# # a rabbit hole though because the depth is all fucky from using calc_depth
# # my plan is to use the level data to modify these depths, but I will need
# # to clean it up a lot. I could also use this to get something like velocity
# # as well. |
a2c225d68d548fb0782b3c9a6c794e6720528937 | 96e82189064bacbc7d032ff4937332902cda649e | /skeleton.R | 576a78fbc399372fd509d7f38f7589f6f0fa8542 | [] | no_license | tonyshenyy/Stat-133 | 87b07274c18200f7d8e83ce30b97ebb9ef2cb4c6 | d98c3b4e99041dd9e9261967507551948918172d | refs/heads/master | 2020-05-24T12:37:19.021817 | 2015-08-15T20:17:32 | 2015-08-15T20:17:32 | 40,556,295 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,412 | r | skeleton.R | # =============================================================================
# Skeleton.R
# Description: This file contains all the commands to create folders and files
# as well as commands to download resource files.
# Name: Yiyang Shen and Caylie Marie Connelly
# =============================================================================
# commands create folders code, rawdata, data, resources, report and images
dir.create('./code')
dir.create('./rawdata')
dir.create('./data')
dir.create('./resources')
dir.create('./report')
dir.create('./images')
# command to create file README.md
file.create('./README.md')
# command to download the raw data files
# For storms.csv and tracks.csv
download.file(
'ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r06/wmo/hurdat_format/basin/Basin.NA.ibtracs_hurdat.v03r06.hdat',
'./rawdata/Basin.NA.ibtracs_hurdat.v03r06.hdat')
download.file(
'ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r06/wmo/hurdat_format/basin/Basin.EP.ibtracs_hurdat.v03r06.hdat',
'./rawdata/Basin.EP.ibtracs_hurdat.v03r06.hdat')
# For Visualization
download.file('ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r06/wmo/csv/basin/Basin.EP.ibtracs_wmo.v03r06.csv',
'./rawdata/Basin.EP.ibtracs_wmo.v03r06.csv')
download.file('ftp://eclipse.ncdc.noaa.gov/pub/ibtracs/v03r06/wmo/csv/basin/Basin.NA.ibtracs_wmo.v03r06.csv',
'./rawdata/Basin.NA.ibtracs_wmo.v03r06.csv')
|
e9be02c5ffef95bcc17ca60332f95a43b16eeee6 | 6b49bd4cd7728144ba7d2acf2f0c40d347fdac26 | /Final.Project.Script.R | ea14ddb7349dd3783d5a5f380f623a70d98c7ae4 | [] | no_license | sbordena/BD_final_assignment | ac25c044224a440b8e6e2d40293078cca35afd73 | 3ec0a8cca37910e67cfa4dc41c7c3ffbbd17efa0 | refs/heads/master | 2021-01-10T04:08:55.038725 | 2016-03-02T19:04:29 | 2016-03-02T19:04:29 | 52,987,815 | 0 | 1 | null | 2016-03-05T20:22:48 | 2016-03-02T18:53:35 | R | UTF-8 | R | false | false | 7,078 | r | Final.Project.Script.R | ## Final Project Script
## Load libraries
library(tree)
library(rpart)
library(randomForest)
library(gbm)
library(data.table)
library(caret)
library(doParallel)
library(glmnet)
library(e1071)
library(pROC)
library(curl)
library(dismo)
library(distrom)
library(gamlr)
library(dplyr)
library(Matrix)
## Set Working Directory
setwd("C:/Users/Chingono/Documents/Big Data Course")
## Get Functions
source("https://raw.githubusercontent.com/ChicagoBoothML/HelpR/master/lift.R")
helpr_repo_raw_url <- 'https://raw.githubusercontent.com/ChicagoBoothML/HelpR/master'
source(file.path(helpr_repo_raw_url, 'EvaluationMetrics.R'))
source("naref.R")
source("fdr.R")
source("roc.R")
source("deviance.R")
##Parallel Computing
# set randomizer's seed
set.seed(28)
# Parallel Computation Setup
cl <- makeCluster(detectCores()-2) # create a compute cluster using all CPU cores except 2
clusterEvalQ(cl, library(foreach))
registerDoParallel(cl) # register this cluster
###load data
data=read.csv("equities_data.csv")
data <- data[complete.cases(data),]
### To read the quarterly data, install the "foreign" and "readstata13" packages
### and un-comment the following:
# library(foreign)
# library(readstata13)
# quarterly_data <- read.dta("Quarterly_Fundamentals_2.17.16.dta")
# monthly_returns <- read.dta("Monthly Returns 2.17.16.dta")
## Create factors among explanatory variables
data$size_index <- as.factor(data$size_index)
data$pyreturn_index <- as.factor(data$pyreturn_index)
data$value_index <- as.factor(data$value_index)
data$profit_index <- as.factor(data$profit_index)
data$pb_index <- as.factor(data$pb_index)
data$pyreturn_below_median <- as.factor(data$pyreturn_below_median)
data$asset_turnover <- as.factor(data$asset_turnover)
data$debtpy_debt <- as.factor(data$debtpy_debt)
data$sharespy_shares <- as.factor(data$sharespy_shares)
#split data into train, validate, and test
set.seed(28)
n=nrow(data)
n1=floor(0.6*n)
n2=floor(0.2*n)
n3=n-n1-n2
ii=sample(1:n,n)
trainDf=data[ii[1:n1],]
valDf=data[ii[n1+1:n2],]
testDf=data[ii[n1+n2+1:n3],]
###check that samples are balanced in terms of y (Future Debt Paydown)
table(trainDf$y)
table(valDf$y)
table(testDf$y)
## ~42% of companies in each sample pay down debt over the next year
### Logistic Regression ###
###fit simple logit model
lgfit = glm(y~.,data=trainDf[,-27:-28],family=binomial) # exclude next 1 year return and portfolio year (column 27 & 28)
summary(lgfit)
pv <- coef(summary(lgfit))[,4] #grab p-values
length(pv[pv<=0.05]) # 36 coefficients appear to be significant at the 5% level
## Prediction with simple logit model
p_logit <- predict(lgfit, newdata=valDf, type="response")
D <- deviance(y=valDf$y, pred=p_logit, family="binomial")
ybar <- mean(valDf$y==1) # marginal prob(y==1)
D0 <- deviance(y=valDf$y, pred=ybar, family="binomial")
## OOS R-squared is 5.23%
1 - D/D0
## Plot a couple of Variables that appear to be related to y (Future Debt Paydown)
## We see that:
## 1a. Most companies have low leverage (Long Term Debt/Enterprsie Value)
## 1b. There is a monotonic decrease in frequency of observations as LT Debt/EV increases
## 2. On average, companies that did NOT pay down debt last year have higher LT Debt/Assets
## 3. On average, companies that DID pay down debt last year have higher Gross Profitability
hist(trainDf$lt_debt_ev,
main="Long Term Debt / Enterprise Value",
xlab="LT Debt / EV")
boxplot(debt_assets~debtpy_debt, data=trainDf, ylim=c(0,1),
main="LT Debt / Assets by Prior Debt Paydown",
xlab="Prior Debt Paydown", ylab="LT Debt / Assets")
boxplot(gprofit_assets~debtpy_debt, data=trainDf, ylim=c(-0.5,1),
main="Gross Profitability by Prior Debt Paydown",
xlab="Prior Debt Paydown", ylab="Gross Profit / Assets")
## FDR COntrol
## Plot the p-values
hist(pv,col=8, main="Histogram of P-Values")
# We see a big spike at zero, indicating more p-values near zero than one
# would expect if they all came from the null. There is signal in the data.
# FDR Cut at 0.01:
alpha <- fdr_cut(pv,q=.01,plotit=TRUE)
alpha ## alpha cutoff is 0.001070771
signif <- which(pv <= alpha) ## which are significant
length(signif) ## 26 coefficients (out of the original 36) are significant after FDR control
###fit logit model with interactions
x_train <- trainDf[,-26:-28]
x_train_sp <- sparse.model.matrix(~ .^2, data=x_train)[,-1] #This takes a while!
x_val <- valDf[,-26:-28]
x_val_sp <- sparse.model.matrix(~ .^2, data=x_val)[,-1]
x_test <- testDf[,-26:-28]
x_test_sp <- sparse.model.matrix(~ .^2, data=x_test)[,-1]
## Lasso & CV Regression (both models take a few minutes to run)
y_train <- trainDf$y
lasso_reg <- gamlr(x_train_sp,y_train, family="binomial", lmr=1e-4)
plot(lasso_reg)
lasso.cv <- cv.gamlr(x_train_sp,y_train, family="binomial", lmr=1e-4, verb=TRUE)
plot(lasso.cv)
# CV OOS R-Squared is higher than the simple GLM model
(1- lasso.cv$cvm[lasso.cv$seg.1se]/lasso.cv$cvm[1]) # 1se rule = 6.70%
(1- lasso.cv$cvm[lasso.cv$seg.min]/lasso.cv$cvm[1]) # min rule = 6.81%
### Set up Deviance Loss Function to get OOS R2 of Lasso Regression###
###deviance loss function
lossf = function(y,phat,wht=0.0000001) {
#y should be 0/1
#wht shrinks probs in phat towards .5, don't log 0!
if(is.factor(y)) y = as.numeric(y)-1
phat = (1-wht)*phat + wht*.5
py = ifelse(y==1,phat,1-phat)
return(-2*sum(log(py)))
}
# Lasso reg OOS R-squared
p_lasso_reg <- predict(lasso_reg, newdata=x_val_sp, type="response")
phat_lasso = matrix(p_lasso_reg,ncol=1)
D <- lossf(y=valDf$y, phat=phat_lasso)
ybar <- mean(valDf$y==1) # marginal prob(y==1)
D0 <- deviance(y=valDf$y, pred=ybar, family="binomial")
## Lasso Reg's OOS R-squared is 6.69% (also higher than simple logit)
1 - D/D0
### Principal Component Analysis ###
## Turn Factors to numeric to do PCA
x_train$size_index <- as.numeric(x_train$size_index)
x_train$pyreturn_index <- as.numeric(x_train$pyreturn_index)
x_train$value_index <- as.numeric(x_train$value_index)
x_train$profit_index <- as.numeric(x_train$profit_index)
x_train$pb_index <- as.numeric(x_train$pb_index)
x_train$pyreturn_below_median <- as.numeric(x_train$pyreturn_below_median)
x_train$asset_turnover <- as.numeric(x_train$asset_turnover)
x_train$debtpy_debt <- as.numeric(x_train$debtpy_debt)
x_train$sharespy_shares <- as.numeric(x_train$sharespy_shares)
pca_train <- prcomp(x_train, scale=TRUE)
z_train <- predict(pca_train)
## plot and interpret
plot(pca_train, main="")
mtext(side=1, "Equity PCs", line=1, font=2)
round(pca_train$rotation[,1:4],1)
## PC1 captures the size factor (see loadings on big stocks)
## PC2 captures low leverage, large growth stocks with positive momentum
## PC3 capturs low leverage, small growth stocks with negative momentum
## PC4 captures value stocks with high profitability
save.image()
```{r}
stopCluster(cl) # shut down the parallel computing cluster
```
|
2bb69888fe535e2c5a9389cd43abafd51329f25b | 1623d0f14bec8e9813b75a29e4dd549462000ab2 | /man/getXMLFromWebService.Rd | c7e482d72c87c298092b17f9e2450820c2faf82d | [
"MIT"
] | permissive | jbracher/epi_forecasting_DE | e410827f10b64fb7ea59a6bad093d4f75f79af1e | 4f88974beb2b2a3c84f54982553f780f6d91b193 | refs/heads/master | 2023-09-04T12:13:14.788907 | 2021-10-27T08:11:18 | 2021-10-27T08:11:18 | 239,568,850 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 432 | rd | getXMLFromWebService.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/survstat_webservice.R
\name{getXMLFromWebService}
\alias{getXMLFromWebService}
\title{method to recieve the xml from the WebService}
\usage{
getXMLFromWebService(body_, service_)
}
\arguments{
\item{body_}{the xml request}
\item{service_}{the WebService method}
}
\value{
something XMLish
}
\description{
method to recieve the xml from the WebService
}
|
71c4facaf6af9377d7d507a1e3bbbb750d26fa6a | b77b91dd5ee0f13a73c6225fabc7e588b953842b | /11_run_all_analyze_plot_scripts.R | 63648746f94bcf27152b638604fdd54a39e32e7b | [
"MIT"
] | permissive | ksamuk/gene_flow_linkage | a1264979e28b61f09808f864d5fa6c75568147b0 | 6182c3d591a362407e624b3ba87403a307315f2d | refs/heads/master | 2021-01-18T09:18:02.904770 | 2017-04-02T16:51:40 | 2017-04-02T16:51:40 | 47,041,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 834 | r | 11_run_all_analyze_plot_scripts.R | ################################################################################
# This script performs the following functions:
# 1. Plots all the raw figures for the manuscript
# 3. Performs permutations on fitted model ouput
# 2. Creates all the supplementary data files
################################################################################
library("dplyr")
# this generates warnings; they are safe to ignore (mostly ggplot reporting NAs being removed)
list.files(pattern = "plot", full.names = TRUE) %>%
grep("RUN", ., invert = TRUE, value = TRUE) %>%
sapply(source)
list.files(pattern = "analyze", full.names = TRUE) %>%
grep("RUN", ., invert = TRUE, value = TRUE) %>%
sapply(source)
list.files(pattern = "prepare", full.names = TRUE) %>%
grep("RUN", ., invert = TRUE, value = TRUE) %>%
sapply(source)
|
ce1e31ecfc4da45d10d656cb9d6aa0dfc1d65852 | d9b3b9a18b1151bd5613d5302afa32ef92a5b219 | /share/R/CI.R | aedcbfd8ec64933c61c38f266ddae7a453e7f571 | [
"MIT"
] | permissive | Rbbt-Workflows/combination_index | f56f3019b69ecc6355f1950768e4b0b7febed6af | abb72987f105ae1073c6e68e06ca6c669d168636 | refs/heads/master | 2022-06-27T21:57:57.586452 | 2022-06-20T07:50:40 | 2022-06-20T07:50:40 | 51,080,222 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 17,531 | r | CI.R | rbbt.require('drc')
rbbt.require('ggplot2')
CI.eff_ratio = function(x){
return(x / (1-x));
}
CI.misc.seq <- function(){
res = seq(0, 0.1, length.out=50)
res = c(res, seq(0.1, 0.9, length.out=50))
res = c(res, seq(0.9, 1, length.out=50))
return(res)
}
CI.misc.log_seq <- function(center.point){
res = c(center.point)
for (i in seq(1,5,by=0.5)) {
n = center.point * i
res = c(res, n)
n = center.point * 2^i
res = c(res, n)
n = center.point * 4^i
res = c(res, n)
n = center.point * 8^i
res = c(res, n)
n = center.point * 16^i
res = c(res, n)
n = center.point * 32^i
res = c(res, n)
n = center.point * 64^i
res = c(res, n)
n = center.point / i
res = c(res, n)
n = center.point / 2^i
res = c(res, n)
n = center.point / 4^i
res = c(res, n)
n = center.point / 8^i
res = c(res, n)
n = center.point / 16^i
res = c(res, n)
n = center.point / 32^i
res = c(res, n)
n = center.point / 64^i
res = c(res, n)
}
for (i in seq(0,100, by=1)) {
res = c(res, i)
}
return(sort(res))
}
CI.add_curve = function(m_1, m_2, dm_1, dm_2, d_1, d_2){
additive.levels = CI.misc.seq()
additive.doses = sapply(additive.levels, function(level){
ratio = CI.eff_ratio(level);
t1 = d_1/(dm_1*(ratio^(1/m_1)))
t2 = d_2/(dm_2*(ratio^(1/m_2)))
(d_1+d_2)/(t1 + t2)
})
data.add = data.frame(Dose=additive.doses, Response=additive.levels)
return(data.add)
}
CI.add_curve.bliss = function(m_1, m_2, dm_1, dm_2, d_1, d_2){
additive.levels = CI.misc.seq()
additive.doses = sapply(additive.levels, function(level){
ratio = CI.eff_ratio(level);
t1 = d_1/(dm_1*(ratio^(1/m_1)))
t2 = d_2/(dm_2*(ratio^(1/m_2)))
(d_1+d_2)/(t1 + t2)
})
data.add = data.frame(Dose=additive.doses, Response=additive.levels)
return(data.add)
}
CI.me_curve = function(m, dm, center_dose=NA){
if (is.na(center_dose)) center_dose = dm
doses.me = c(CI.misc.log_seq(center_dose), CI.misc.seq())
if (is.null(m) || is.na(m)){
data.me = data.frame(Dose=doses.me, Response=rep(NA, length(doses.me)))
}else{
response_ratios.me = sapply(doses.me, function(d){ (d / dm)^m });
responses.me = sapply(response_ratios.me, function(ratio){ ratio / (1+ratio) });
data.me = data.frame(Dose=doses.me, Response=responses.me)
}
return(data.me)
}
CI.least_squares.fix_log <- function(value){
res = exp(value)/(1+exp(value))
res[res>1] = 1
return(res)
}
CI.predict_line <- function(modelfile, doses, least_squares=FALSE, invert=FALSE,level=0.90){
data.drc = data.frame(Dose=doses);
if (!is.null(modelfile)){
model = rbbt.model.load(modelfile)
if (least_squares){
data.drc$Response = predict(model, data.drc)
data.drc$Response = CI.least_squares.fix_log(data.drc$Response)
tryCatch({
data.drc$Response.upr = predict(model, data.frame(Dose=data.drc$Dose), interval="confidence", level=level)[,'upr'];
data.drc$Response.lwr = predict(model, data.frame(Dose=data.drc$Dose), interval="confidence", level=level)[,'lwr'];
data.drc$Response.upr = CI.least_squares.fix_log(data.drc$Response.upr)
data.drc$Response.lwr = CI.least_squares.fix_log(data.drc$Response.lwr)
})
}else{
data.drc$Response = predict(model, data.drc)
tryCatch({
data.drc$Response.upr = predict(model, data.frame(Dose=data.drc$Dose), interval="confidence", level=level)[,'Upper'];
data.drc$Response.lwr = predict(model, data.frame(Dose=data.drc$Dose), interval="confidence", level=level)[,'Lower'];
})
}
if (invert){
data.drc$Response = 1 - data.drc$Response
tryCatch({
data.drc$Response.upr = 1 - data.drc$Response.upr
data.drc$Response.lwr = 1 - data.drc$Response.lwr
})
}
}else{
return(NULL)
}
return(data.drc)
}
CI.subset_data <- function(data, min, max){
data = subset(data, data$Response <= 1)
data = subset(data, data$Dose <= max)
data = subset(data, data$Dose >= min)
return(data)
}
#{{{ PLOTS }}}#
CI.plot_fit <- function(m, dm, data, data.me_points=NULL, modelfile=NULL, least_squares=FALSE, invert=FALSE, random.samples=NULL){
data.me = CI.me_curve(m, dm)
max = max(data$Dose)
min = min(data$Dose)
data.me = subset(data.me, data.me$Response <= 1)
data.me = subset(data.me, data.me$Dose <= max)
data.me = subset(data.me, data.me$Dose >= min)
data.drc = CI.predict_line(modelfile, data.me$Dose, least_squares, invert)
if (is.null(data.drc)){
data.drc = data.me
}
min.response=min(c(0,data.me$Response, data.drc$Response, data.me_points$Response))
max.response=max(c(1,data.me$Response, data.drc$Response, data.me_points$Response))
min.dose = min(data.me_points$Dose)
max.dose = max(data.me_points$Dose)
if (least_squares){
plot = ggplot(aes(x=Dose, y=log(Response/(1-Response))), data=data) #+ xlim(log(c(min.dose, max.dose))) + ylim(c(1,-1))
}else{
plot = ggplot(aes(x=Dose, y=Response), data=data) + ylim(c(min.response,max.response)) #+ xlim(log(c(min.dose, max.dose)))
}
if(sum(!is.nan(data.drc$Response.upr)) > 0 && ! least_squares){
plot = plot + geom_ribbon(data=data.drc, aes(ymin=Response.lwr, ymax=Response.upr),col='blue',fill='blue',alpha=0.2, cex=0.1)
}
if (!is.null(random.samples) && length(random.samples)>0){
for (i in seq(0,length(random.samples)/2)){
m.s = random.samples[2*i+1]
dm.s = random.samples[2*i+2]
data.me.s = CI.me_curve(m.s, dm.s)
data.me.s = CI.subset_data(data.me.s, min, max)
plot = plot + geom_line(data=data.me.s, col='cyan', cex=2, linetype='solid', alpha=0.2)
}
}
plot = plot +
scale_x_log10() + annotation_logticks(side='b') +
geom_line(data=data.me, col='blue', cex=2) +
geom_line(data=data.drc, col='blue', linetype='dotted',cex=2) +
geom_point(cex=5) +
geom_point(data=data.me_points, col='blue',cex=7, shape=18) +
geom_point(data=data.me_points, col='white',cex=4, shape=18)
return(plot)
}
CI.plot_combination <- function(blue_m, blue_dm, blue_dose, red_m, red_dm, red_dose, response, blue_data, red_data, data.blue_me_points, data.red_me_points, blue.modelfile = NULL, red.modelfile=NULL, least_squares=FALSE, blue.invert=FALSE, red.invert=FALSE, fix_ratio=FALSE, more_doses = NULL, more_responses = NULL, blue.random.samples=NULL, red.random.samples=NULL, blue.fit_dose = NULL, red.fit_dose = NULL){
data.blue_me = CI.me_curve(blue_m, blue_dm)
data.red_me = CI.me_curve(red_m, red_dm)
max = max(c(blue_data$Dose, red_data$Dose))
min = min(c(blue_data$Dose, red_data$Dose))
data.blue_me = subset(data.blue_me, data.blue_me$Response <= 1)
data.blue_me = subset(data.blue_me, data.blue_me$Dose <= max)
data.blue_me = subset(data.blue_me, data.blue_me$Dose >= min)
data.red_me = subset(data.red_me, data.red_me$Response <= 1)
data.red_me = subset(data.red_me, data.red_me$Dose <= max)
data.red_me = subset(data.red_me, data.red_me$Dose >= min)
if (fix_ratio){
blue_ratio = (blue_dose + red_dose)/blue_dose
red_ratio = (blue_dose + red_dose)/red_dose
}else{
blue_ratio = red_ratio = 1
}
data.add = CI.add_curve(blue_m, red_m, blue_dm, red_dm, blue_dose, red_dose)
data.blue_drc = CI.predict_line(blue.modelfile, data.add$Dose/blue_ratio, least_squares, blue.invert)
data.red_drc = CI.predict_line(red.modelfile, data.add$Dose/red_ratio, least_squares, red.invert)
if (is.null(data.blue_drc)){
data.blue_drc = data.blue_me
}
if (is.null(data.red_drc)){
data.red_drc = data.red_me
}
blue_data$Dose = blue_data$Dose * blue_ratio
data.blue_me$Dose = data.blue_me$Dose * blue_ratio
data.blue_drc$Dose = data.blue_drc$Dose * blue_ratio
data.blue_me_points$Dose = data.blue_me_points$Dose * blue_ratio
red_data$Dose = red_data$Dose * red_ratio
data.red_me$Dose = data.red_me$Dose * red_ratio
data.red_drc$Dose = data.red_drc$Dose * red_ratio
data.red_me_points$Dose = data.red_me_points$Dose * red_ratio
min.response=min(c(0,response, data.blue_me_points$Response, data.blue_drc$Response))
max.response=max(c(1,response, data.blue_me_points$Response, data.blue_drc$Response))
min.response=min(c(min.response, data.red_me_points$Response, data.red_drc$Response))
max.response=max(c(max.response, data.red_me_points$Response, data.red_drc$Response))
if (!is.null(more_responses)){
min.response=min(c(min.response, more_responses))
max.response=max(c(max.response, more_responses))
}
max.response = min(c(1, max.response))
min.response = max(c(0, min.response))
all.doses = c(data.blue_me$Dose, data.red_me$Dose, more_doses)
min.dose = min(all.doses)
max.dose = max(all.doses)
data.blue_drc = CI.subset_data(data.blue_drc, min.dose, max.dose)
data.red_drc = CI.subset_data(data.red_drc, min.dose, max.dose)
data.blue_me = CI.subset_data(data.blue_me, min.dose, max.dose)
data.red_me = CI.subset_data(data.red_me, min.dose, max.dose)
data.add = CI.subset_data(data.add, min.dose, max.dose)
plot = ggplot(aes(x=as.numeric(Dose), y=as.numeric(Response)), data=blue_data)
if (!is.null(more_responses)){
len = min(c(length(more_doses), length(more_responses)))
md=more_doses[1:len]
me=more_responses[1:len]
plot = plot + geom_smooth(aes(x=Dose, y=Response), data=data.frame(Dose=md, Response=me), linetype='dashed', col='black', level=0.95, se=FALSE)
}
if (!is.null(blue.random.samples) && !is.null(red.random.samples)){
max = min(length(blue.random.samples), length(red.random.samples))
for (i in seq(0,max/2)){
m.blue.s = blue.random.samples[2*i+1]
dm.blue.s = blue.random.samples[2*i+2]
m.red.s = red.random.samples[2*i+1]
dm.red.s = red.random.samples[2*i+2]
data.add.s = CI.add_curve(m.blue.s, m.red.s, dm.blue.s, dm.red.s, blue_dose, red_dose)
data.add.s = CI.subset_data(data.add.s, min.dose, max.dose)
plot = plot + geom_line(data=data.add.s, col='cyan', cex=2, linetype='solid', alpha=0.2)
}
}
plot = plot +
xlim(min.dose, max.dose) +
ylim(min.response, max.response) +
scale_x_log10() +
annotation_logticks(side='b') +
xlab("Dose") +
ylab("Response") +
geom_point(data=blue_data, col='blue',cex=3,alpha=0.8) +
geom_point(data=red_data, col='red',cex=3,alpha=0.8) +
geom_line(data=data.blue_me, col='blue', cex=2,alpha=0.8) +
geom_line(data=data.red_me, col='red', cex=2,alpha=0.8) +
geom_line(data=data.blue_drc, linetype='dashed', col='blue', cex=1,alpha=0.8) +
geom_line(data=data.red_drc, linetype='dashed', col='red', cex=1,alpha=0.8) +
geom_line(data=data.add, col='black', cex=2,alpha=0.8) +
geom_point(x=log10(blue_dose + red_dose), y=response, col='black',cex=5,alpha=0.8) +
geom_point(data=data.blue_me_points, col='blue', shape = 18, cex=7) +
geom_point(data=data.blue_me_points, col='white', shape = 18, cex=4) +
geom_point(data=data.red_me_points, col='red', shape = 18, cex=7) +
geom_point(data=data.red_me_points, col='white', shape = 18, cex=4)
if (!is.null(more_responses)){
for (i in seq(1, len)){
plot = plot + geom_point(x=log10(more_doses[i]), y=more_responses[i], col='black', cex=2, alpha=0.4)
}
}
if (!is.null(blue.fit_dose)){
plot = plot + geom_vline(x=blue.fit_dose*blue_ratio, col='blue', cex=1, linetype='dotted')
plot = plot + geom_vline(x=red.fit_dose*red_ratio, col='red', cex=1, linetype='dotted')
}
return(plot)
}
CI.plot_combination.bliss <- function(blue_dose, red_dose, response, blue_data, red_data, bliss_data, fix_ratio=FALSE, more_doses = NULL, more_responses = NULL, all_bliss_data = NULL){
max = max(c(blue_data$Dose, red_data$Dose))
min = min(c(blue_data$Dose, red_data$Dose))
if (fix_ratio){
blue_ratio = (blue_dose + red_dose)/blue_dose
red_ratio = (blue_dose + red_dose)/red_dose
}else{
blue_ratio = red_ratio = 1
}
blue_data$Dose = blue_data$Dose * blue_ratio
red_data$Dose = red_data$Dose * red_ratio
min.response=min(as.numeric(c(0,response, blue_data$Response, red_data$Response, bliss_data$Response)))
max.response=max(as.numeric(c(1,response, blue_data$Response, red_data$Response, bliss_data$Response)))
if (!is.null(more_responses)){
min.response=min(c(min.response, more_responses))
max.response=max(c(max.response, more_responses))
}
max.response = min(c(1.2, max.response))
min.response = max(c(-0.2, min.response))
all.doses = c(more_doses)
min.dose = min(all.doses)
max.dose = max(all.doses)
plot = ggplot(aes(x=as.numeric(Dose), y=as.numeric(Response)), data=blue_data) +
xlim(min.dose, max.dose) +
ylim(min.response, max.response)
str(max.response)
str(min.response)
if (!is.null(more_responses)){
len = min(c(length(more_doses), length(more_responses)))
md=more_doses[1:len]
me=more_responses[1:len]
plot = plot + geom_smooth(aes(x=Dose, y=Response), data=data.frame(Dose=md, Response=me), linetype='dashed', col='black', method="loess", level=0.95, se=FALSE)
}
alpha = 0.8
plot = plot +
scale_x_log10() +
annotation_logticks(side='b') +
xlab("Dose") +
ylab("Value") +
geom_point(data=blue_data, col='blue',cex=3,alpha=alpha) +
geom_point(data=red_data, col='red',cex=3,alpha=alpha) +
geom_point(data=bliss_data, col='purple',cex=3,alpha=alpha) +
geom_smooth(data=bliss_data, linetype='dashed', col='purple',method="loess", level=0.95, se=FALSE) +
geom_point(x=log10(blue_dose + red_dose), y=response, col='black',cex=5,alpha=alpha)
if (!is.null(all_bliss_data)){
plot = plot + geom_point(data=all_bliss_data, col='purple', cex=5, alpha=alpha/2)
}
if (!is.null(more_responses)){
for (i in seq(1, len)){
plot = plot + geom_point(x=log10(more_doses[i]), y=more_responses[i], col='black', cex=2, alpha=0.4)
}
}
return(plot)
}
CI.plot_combination.hsa <- function(blue_dose, red_dose, response, blue_data, red_data, hsa_data, fix_ratio=FALSE, more_doses = NULL, more_responses = NULL){
max = max(c(blue_data$Dose, red_data$Dose))
min = min(c(blue_data$Dose, red_data$Dose))
if (fix_ratio){
blue_ratio = (blue_dose + red_dose)/blue_dose
red_ratio = (blue_dose + red_dose)/red_dose
}else{
blue_ratio = red_ratio = 1
}
blue_data$Dose = blue_data$Dose * blue_ratio
red_data$Dose = red_data$Dose * red_ratio
min.response=min(as.numeric(c(0,response, blue_data$Response, red_data$Response, hsa_data$Response)))
max.response=max(as.numeric(c(1,response, blue_data$Response, red_data$Response, hsa_data$Response)))
if (!is.null(more_responses)){
min.response=min(c(min.response, more_responses))
max.response=max(c(max.response, more_responses))
}
max.response = min(c(1.2, max.response))
min.response = max(c(-0.2, min.response))
all.doses = c(more_doses)
min.dose = min(all.doses)
max.dose = max(all.doses)
plot = ggplot(aes(x=as.numeric(Dose), y=as.numeric(Response)), data=blue_data) +
xlim(min.dose, max.dose) +
ylim(min.response, max.response)
str(max.response)
str(min.response)
if (!is.null(more_responses)){
len = min(c(length(more_doses), length(more_responses)))
md=more_doses[1:len]
me=more_responses[1:len]
plot = plot + geom_smooth(aes(x=Dose, y=Response), data=data.frame(Dose=md, Response=me), linetype='dashed', col='black', method="loess", level=0.95, se=FALSE)
}
plot = plot +
scale_x_log10() +
annotation_logticks(side='b') +
xlab("Dose") +
ylab("Value") +
geom_point(data=blue_data, col='blue',cex=3,alpha=0.8) +
geom_point(data=red_data, col='red',cex=3,alpha=0.8) +
geom_point(data=hsa_data, col='purple',cex=3,alpha=0.8) +
geom_smooth(data=hsa_data, linetype='dashed', col='purple',method="loess", level=0.95, se=FALSE) +
geom_point(x=log10(blue_dose + red_dose), y=response, col='black',cex=5,alpha=0.8)
if (!is.null(more_responses)){
for (i in seq(1, len)){
plot = plot + geom_point(x=log10(more_doses[i]), y=more_responses[i], col='black', cex=2, alpha=0.4)
}
}
return(plot)
}
|
abe1647784377f4664eb8aab678a19e55106126c | 5065f337e3ffb1793b855e6b2264dfde4467fb97 | /[archive from before 11-2018]/R code/05 code split plot variety nitrogen [Splitplot 2Way ANOVA].R | fe59169043a3f6e6cf146c2b99475940165ad82a | [] | no_license | Reicharf/R-SAS.Introductory.Courses | faed787ec5c0eaa76d55dc476e6142f27c24ce6f | 69ec001f8961d5f1c4d1387f80249daa4cbc159b | refs/heads/master | 2022-06-16T23:21:33.677994 | 2020-04-26T13:50:08 | 2020-04-26T13:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,498 | r | 05 code split plot variety nitrogen [Splitplot 2Way ANOVA].R | rm(list=ls())
setwd("D:/Hohenheim/R-SAS.Introductory.Courses/Datasets")
library(data.table)
dt <- fread("05 split plot variety nitrogen.txt") # directly import as data.table format via fread()
dt$Block <- as.factor(dt$Block)
dt$N <- as.factor(dt$N)
dt$Var <- as.factor(dt$Var)
dt$Yield <- as.numeric(dt$Yield)
# Split Plot Design
# Two-Way ANOVA
### Split Plot Design
# When some factors (independent variables) are difficult
# or impossible to change in your experiment, a completely
# randomized design isn't possible. The result is a
# split-plot design, which has a mixture of hard to
# randomize (or hard-to-change) and easy-to-randomize
# (or easy-to-change) factors. The hard-to-change factors
# are implemented first, followed by the easier-to-change factors.
# boxplots for first impression
boxplot(data=dt, Yield ~ N + Var, las=2)
boxplot(data=dt, Yield ~ N , las=2)
boxplot(data=dt, Yield ~ Var , las=2)
# In a split-plot design, the (incomplete) mainplots should
# be taken as a "random effect". As a general principle,
# each randomization units needs to be represented by a random effect,
# so each randomization unit has its own error term.
# Since we then have random and fixed effects
# in one model, we are fitting a "mixed model".
# In R the most common packages for that are "lme4", "nlme", "asreml-R" and "sommer".
# If you use lme4, always load the lmerTest package, too
#install.packages("lme4")
#install.packages("lmerTest")
library(lme4)
library(lmerTest)
# Fit general linear mixed model
#################################
# Treatment effects: Variety, Fertilizer and their interaction
# Design effects: Block and mainplot(=random effect)
# Step 1: Check F-Test of ANOVA and perform backwards elimination
# Step 2: Compare adjusted means per level
mod <- lmer(data = dt,
formula = Yield ~ N + Var + N:Var +
Block + (1|Block:N))
# Note: In this example, Block*N identifies the incomplete blocks (=main plots) within each complete block.
# To read more about this example, see p. 59 of Prof. Piepho's lecture notes for "Mixed models for metric data"
anova(mod) # Interaction effect significant - final model
# plot(mod) # residual plot 1
# qqnorm(resid(mod)); qqline(resid(mod)) # residual plot 2
mod # Basic results
summary(mod) # More detailed results
# get adj. means for Variety effect and compare
library(emmeans)
# get means and comparisons
means <- emmeans(mod, pairwise ~ N | Var, adjust = "tukey") # to get t-test: adjust="none"
# Note that N | Var gets pairwise N comparisons for each
# Variety separately. You can use N:Var instead to get all
# pairwise comparisons.
means # look at means and comparisons
means$emmeans # look at means
means$contrasts # look at comparions
output <- CLD(means$emmeans, details=T, Letters = letters)
output # this data format is not good for ggplot
output <- as.data.table(output$emmeans) # reformatting into one table
output # this is better
# plot adjusted means
#install.packages("ggplot2")
library(ggplot2)
p <- ggplot(data=output, aes(x=N))
p <- p + geom_bar(aes(y=emmean), stat="identity", width=0.8)
p <- p + geom_errorbar(aes(ymin=emmean-SE, ymax=emmean+SE), width=0.4)
p <- p + geom_text(aes(y=emmean+1500, label=.group))
p <- p + facet_wrap(~Var) # one per variety
p # show plot
# save ggplot as file into your working directory
ggsave("test.jpeg", width = 20, height = 10, units = "cm")
|
2ed038142a7a5b785cc838a42d708fe49b2521e5 | 1ed93f1491f02dc355dbbe0cbe5e6931134f7a7d | /R/pcorstarsl.R | a12d6367ccf5147697ce7e8f7cbafd105cd9ff02 | [] | no_license | GiulioCostantini/markerIndex | b346ed40f13fa385d4e232687eda01116a33e02e | febb6fba8f538ee48698085f3f3a810d4ccc0e23 | refs/heads/master | 2021-07-13T02:28:41.310778 | 2021-02-23T11:31:20 | 2021-02-23T11:31:20 | 237,241,394 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,487 | r | pcorstarsl.R | # adapted from here to include partial correlation matrix
# https://github.com/kyuni22/ksmv/blob/master/functions/corstarsl.R
pcorstarsl <- function(pcm, n, np, digits = 2, full = FALSE){
# pcm = partial correlation matrix
# n = sample size
# np = number of variables partialled out
if(is.null(rownames(pcm)))
rownames(pcm) <- 1:nrow(pcm)
if(is.null(colnames(pcm)))
colnames(pcm) <- 1:ncol(pcm)
R <- pcm
p <- corr.p(R, n-np, adjust = "none")$p
## define notions for significance levels; spacing is important.
mystars <- ifelse(p < .001, "***",
ifelse(p < .01, "** ",
ifelse(p < .05, "* ", ifelse(p < .1, "+ ", " "))))
## trunctuate the matrix that holds the correlations to two decimal
R <- format(round(R, digits))
R <- apply(R, 2, str_replace, "0.", ".")
## build a new matrix that includes the correlations with their apropriate stars
Rnew <- matrix(paste(R, mystars, sep=""), ncol=ncol(pcm))
diag(Rnew) <- paste(diag(R), " ", sep="")
rownames(Rnew) <- colnames(pcm)
colnames(Rnew) <- paste(colnames(pcm), "", sep="")
## remove upper triangle
Rnew <- as.matrix(Rnew)
Rnew[upper.tri(Rnew, diag = TRUE)] <- ""
Rnew <- as.data.frame(Rnew)
## remove last column and return the matrix (which is now a data frame)
Rnew <- cbind(Rnew[1:length(Rnew)-1])
if(full)
out <- list("Rformatted" = Rnew, "R" = pcm, "p" = p)
else
out <- Rnew
out
}
|
9556fd0f837fd6be7dff163b3a22579d5a4017e1 | 1d0465c228a1e8fd4e1c271fbde2262bf24ccb59 | /Data Analysis in R. Part 2/1.4.1.r | ce225f2591e02ade103fea98a362afc2ea8dd46e | [] | no_license | framirov/R-course | 536ab92111a2a54c0160f82fab960ed8047877a9 | a01439fa0b1e530385c303bedf7f44af239a6683 | refs/heads/main | 2023-01-27T20:38:50.428334 | 2020-12-04T08:26:30 | 2020-12-04T08:26:30 | 312,372,536 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 81 | r | 1.4.1.r | positive_sum <- function(x){
lapply(x, function(x) sum(x[x>0], na.rm = T))
} |
bc9a57b404d34bd11caf8f915c4978d54b13afff | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/kzs/examples/kzs.2d.Rd.R | 8708e337f47e21858b995ed665b27f59471de060 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,501 | r | kzs.2d.Rd.R | library(kzs)
### Name: kzs.2d
### Title: Spatial Kolmogorov-Zurbenko Spline
### Aliases: kzs.2d
### Keywords: smooth ts nonparametric
### ** Examples
# EXAMPLE - Estimating the Sinc function in the interval (-3pi, 3pi)
# Load the LATTICE package
# Gridded data for X = (x1, x2) input variables
x1 <- seq(-3*pi, 3*pi, length = 60)
x2 <- x1
df <- expand.grid(x1 = x1, x2 = x2)
# Apply the Sinc function to the (x1, x2) coordinates
df$z <- sin(sqrt(df$x1^2 + df$x2^2)) / sqrt(df$x1^2 + df$x2^2)
df$z[is.na(df$z)] <- 1
# Any point outside the circle of radius 3pi is set to 0. This provides
# a better picture of the outcome solely for the purposes of this example.
dst <- sqrt((df$x1 - 0)^2 + (df$x2 - 0)^2)
df$dist <- dst
df$z[df$dist > 3*pi] <- 0
# Add noise to distort the signal
ez <- rnorm(length(df$z), mean = 0, sd = 1) * 1/4
df$zn <- ez + df$z
### (1) 3D plot of the signal to be estimated by kzs.2d()
wireframe(z ~ x1 * x2, df, main = "Signal to be estimated", drape = TRUE,
colorkey = TRUE, scales = list(arrows = FALSE))
### (2) 3D plot of the signal buried in noise
wireframe(zn ~ x1 * x2, df, main = "Signal buried in noise", drape = TRUE,
colorkey = TRUE, scales = list(arrows = FALSE))
### (3) Execute kzs.2d()
# kzs.2d() may take time to run; k = 1 iteration is used here, but k = 2
# will provide a smoother outcome.
sw <- c(1, 1)
sc <- c(0.2, 0.2)
kzs.2d(y = df[,5], x = df[,1:2], smooth = sw, scale = sc, k = 1, edges = TRUE,
plot = TRUE)
|
058aefb10cca6d9d609a98adc8fe1247b543112f | 761d9b095dd97a9fe0aa25db0966087cea34963d | /Section IV - Distance, Knn, Cross Validation, and Generative Models, Part 3 Generative Models.R | e1e536b19406826ab0cedf773a79fb6b69ee84ec | [] | no_license | mike-tex/machine-learning | 776e478ab2b01f837be762751985212e3e67a796 | c76c177a7d5550002dde5faa23a03471145966c9 | refs/heads/master | 2022-11-17T04:26:32.101227 | 2020-07-09T21:39:07 | 2020-07-09T21:39:07 | 271,375,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,918 | r | Section IV - Distance, Knn, Cross Validation, and Generative Models, Part 3 Generative Models.R | ## Section 4: Distance, Knn, Cross-validation,
## and Generative Models
## 4.3: Generative Models
## Generative Models
## Naive Bayes
# Generating train and test set
library(dslabs)
library(tidyverse)
library("caret")
data("heights")
y <- heights$height
set.seed(2, sample.kind = "Rounding")
test_index <-
createDataPartition(y, times = 1, p = 0.5, list = FALSE)
train_set <- heights %>% slice(-test_index)
test_set <- heights %>% slice(test_index)
# Estimating averages and standard deviations
params <- train_set %>%
group_by(sex) %>%
summarize(avg = mean(height), sd = sd(height))
params
# Estimating the prevalence
pi <- train_set %>%
summarize(pi=mean(sex=="Female")) %>% pull(pi)
pi
# Getting an actual rule
x <- test_set$height
f0 <- dnorm(x, params$avg[2], params$sd[2])
f1 <- dnorm(x, params$avg[1], params$sd[1])
p_hat_bayes <- f1*pi / (f1*pi + f0*(1 - pi))
## Controlling Prevalence
# Computing sensitivity
y_hat_bayes <- ifelse(p_hat_bayes > 0.5, "Female", "Male")
sensitivity(data = factor(y_hat_bayes),
reference = factor(test_set$sex))
# Computing specificity
specificity(data = factor(y_hat_bayes),
reference = factor(test_set$sex))
# Changing the cutoff of the decision rule
p_hat_bayes_unbiased <- f1 * 0.5 / (f1 * 0.5 + f0 * (1 - 0.5))
y_hat_bayes_unbiased <-
ifelse(p_hat_bayes_unbiased > 0.5, "Female", "Male")
sensitivity(data = factor(y_hat_bayes_unbiased),
reference = factor(test_set$sex))
specificity(data = factor(y_hat_bayes_unbiased),
reference = factor(test_set$sex))
# Draw plot
qplot(x, p_hat_bayes_unbiased, geom = "line") +
geom_hline(yintercept = 0.5, lty = 2) +
geom_vline(xintercept = 67, lty = 2)
## qda and lda
# QDA
# Quadratic discriminant analysis (QDA)
# Load data
data("mnist_27")
# Estimate parameters from the data
params <- mnist_27$train %>%
group_by(y) %>%
summarize(avg_1 = mean(x_1), avg_2 = mean(x_2),
sd_1 = sd(x_1), sd_2 = sd(x_2),
r = cor(x_1, x_2))
# Contour plots
mnist_27$train %>% mutate(y = factor(y)) %>%
ggplot(aes(x_1, x_2, fill = y, color = y)) +
geom_point(show.legend = FALSE) +
stat_ellipse(type="norm", lwd = 1.5)
# Fit model
library(caret)
train_qda <- train(y ~., method = "qda",
data = mnist_27$train)
# Obtain predictors and accuracy
y_hat <- predict(train_qda, mnist_27$test)
confusionMatrix(data = y_hat,
reference = mnist_27$test$y)$overall["Accuracy"]
# Draw separate plots for 2s and 7s
mnist_27$train %>% mutate(y = factor(y)) %>%
ggplot(aes(x_1, x_2, fill = y, color = y)) +
geom_point(show.legend = FALSE) +
stat_ellipse(type="norm") +
facet_wrap(~y)
# LDA - linear discriminant analysis
params <- mnist_27$train %>%
group_by(y) %>%
summarize(avg_1 = mean(x_1), avg_2 = mean(x_2),
sd_1 = sd(x_1), sd_2 = sd(x_2),
r = cor(x_1, x_2))
params <- params %>%
mutate(sd_1 = mean(sd_1), sd_2 = mean(sd_2), r = mean(r))
train_lda <-
train(y ~., method = "lda", data = mnist_27$train)
y_hat <- predict(train_lda, mnist_27$test)
confusionMatrix(data = y_hat,
reference = mnist_27$test$y)$overall["Accuracy"]
## Case Study: More than Three Classes
if(!exists("mnist"))mnist <- read_mnist()
# set.seed(3456)
set.seed(3456, sample.kind="Rounding") # in R 3.6 or later
index_127 <-
sample(which(mnist$train$labels %in% c(1,2,7)), 2000)
y <- mnist$train$labels[index_127]
x <- mnist$train$images[index_127,]
index_train <- createDataPartition(y, p=0.8, list = FALSE)
# get the quadrants
# temporary object to help figure out the quadrants
row_column <- expand.grid(row=1:28, col=1:28)
upper_left_ind <- which(row_column$col <= 14 &
row_column$row <= 14)
lower_right_ind <- which(row_column$col > 14 &
row_column$row > 14)
# binarize the values. Above 200 is ink, below is no ink
x <- x > 200
# cbind proportion of pixels in upper right quadrant
# and proportion of pixels in lower right quadrant
x <- cbind(rowSums(x[ ,upper_left_ind])/rowSums(x),
rowSums(x[ ,lower_right_ind])/rowSums(x))
train_set <- data.frame(y = factor(y[index_train]),
x_1 = x[index_train,1],
x_2 = x[index_train,2])
test_set <- data.frame(y = factor(y[-index_train]),
x_1 = x[-index_train,1],
x_2 = x[-index_train,2])
train_set %>% ggplot(aes(x_1, x_2, color=y)) + geom_point()
train_qda <- train(y ~ ., method = "qda", data = train_set)
predict(train_qda, test_set, type = "prob") %>% head()
predict(train_qda, test_set) %>% head()
confusionMatrix(predict(train_qda, test_set),
test_set$y)$table
confusionMatrix(predict(train_qda, test_set),
test_set$y)$overall["Accuracy"]
train_lda <- train(y ~ ., method = "lda", data = train_set)
confusionMatrix(predict(train_lda, test_set),
test_set$y)$overall["Accuracy"]
train_knn <- train(y ~ ., method = "knn",
tuneGrid = data.frame(k = seq(15, 51, 2)),
data = train_set)
confusionMatrix(predict(train_knn, test_set),
test_set$y)$overall["Accuracy"]
train_set %>% mutate(y = factor(y)) %>%
ggplot(aes(x_1, x_2, fill = y, color=y)) +
geom_point(show.legend = FALSE) +
stat_ellipse(type="norm")
## Comprehension Check: Generative Models
# Q1
# Create a dataset of samples from just cerebellum
# and hippocampus, two parts of the brain,
# and a predictor matrix with 10 randomly selected columns
# using the following code:
library(dslabs)
library(caret)
library(tidyverse)
data("tissue_gene_expression")
# set.seed(1993) #if using R 3.6 or later
set.seed(1993, sample.kind="Rounding")
ind <- which(tissue_gene_expression$y
%in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
# Use the train() function to estimate the accuracy of LDA.
# For this question, use the version of x and y created
# with the code above: do not split them or tissue_gene_expression
# into training and test sets (understand this can lead
# to overfitting). Report the accuracy from the train()
# results (do not make predictions).
train(y ~ x, method = "lda", data = tibble(x = x, y = y))
# What is the accuracy?
# Accuracy
# 0.8707879 ## YES!!!
## Explanation from the web site
# The following code can be used to estimate
# the accuracy of the LDA:
fit_lda <- train(x, y, method = "lda")
fit_lda$results["Accuracy"]
# Q2
# In this case, LDA fits two 10-dimensional
# normal distributions. Look at the fitted model by looking
# at the finalModel component of the result of train().
# Notice there is a component called means that includes
# the estimated means of both distributions. Plot
# the mean vectors against each other and determine
# which predictors (genes) appear to be driving the algorithm.
#
# Which TWO genes appear to be driving the algorithm
# (i.e. the two genes with the highest means)?
fit_lda$finalModel
# RAB1B
# OAZ2
# Explanation from the web site
# The following code can be used to make the plot:
t(fit_lda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
# Q3
# Repeat the exercise in Q1 with QDA.
# Create a dataset of samples from just cerebellum
# and hippocampus, two parts of the brain,
# and a predictor matrix with 10 randomly selected columns
# using the following code:
cat("\014")
rm(list = ls())
library(dslabs)
library(caret)
library(dplyr)
data("tissue_gene_expression")
# set.seed(1993) #
set.seed(1993, sample.kind="Rounding") # if using R 3.6 or later
ind <- which(tissue_gene_expression$y
%in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
# Use the train() function to estimate the accuracy of QDA.
# For this question, use the entire tissue_gene_expression
# dataset: do not split it into training and test sets
# (understand this can lead to overfitting).
fit_qda <- train(x, y, method = "qda")
fit_qda$results["Accuracy"]
# What is the accuracy?
# Accuracy
# 0.8147954
# Explanation
# The following code can be used to estimate
# the accuracy of QDA:
fit_qda <- train(x, y, method = "qda")
fit_qda$results["Accuracy"]
## Q4
# Which TWO genes drive the algorithm when using QDA
# instead of LDA (i.e. the two genes with the highest means)?
fit_qda$finalModel
t(fit_qda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
# RAB1B
# OAZ2
# Explanation from the web site
t(fit_qda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
## Q5
# One thing we saw in the previous plots is that
# the values of the predictors correlate in both groups:
# some predictors are low in both groups and others high
# in both groups. The mean value of each predictor found
# in colMeans(x) is not informative or useful for prediction
# and often for purposes of interpretation, it is useful
# to center or scale each column. This can be achieved
# with the preProcess argument in train(). Re-run LDA
# with preProcess = "center". Note that accuracy does not change,
# but it is now easier to identify the predictors
# that differ more between groups than based on
# the plot made in Q2.
cat("\014") ## clear the console
rm(list = ls())
library(dslabs)
library(caret)
library(dplyr)
data("tissue_gene_expression")
# set.seed(1993) #
set.seed(1993, sample.kind="Rounding") # if using R 3.6 or later
ind <- which(tissue_gene_expression$y
%in% c("cerebellum", "hippocampus"))
y <- droplevels(tissue_gene_expression$y[ind])
x <- tissue_gene_expression$x[ind, ]
x <- x[, sample(ncol(x), 10)]
fit_lda_pp <- train(x, y, method = "lda", preProcess = "center")
fit_lda_pp$results["Accuracy"]
fit_lda$finalModel
library(dplyr)
# Q2 plot
t(fit_lda_pp$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, label = predictor_name)) +
geom_point() +
geom_text() +
geom_abline()
t(fit_lda_pp$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(cerebellum, hippocampus, color = predictor_name)) +
geom_point() +
geom_abline()
# NickBova: I plotted predictor_name on the x axis
# and (scaled) means on the y
t(fit_lda_pp$finalModel$means) %>% data.frame() %>%
mutate(tissue_names = names(.))
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(x = predictor_name, label = tissue)) +
geom_point() +
geom_text() +
geom_abline()
varImp(fit_lda_pp)
temp1 <- fit_lda$finalModel$means %>% data.frame() %>% t()
temp1
temp2 <- as.data.frame(temp1)
temp2
temp3 <- as_tibble(temp2)
rowtemp <- rownames(temp2)
temp3 <- as_tibble(temp2, rownames = "rowtemp")
temp3
temp4 <-temp3 %>% mutate(avg = (hippocampus + cerebellum) / 2,
delta = abs(hippocampus - cerebellum),
hippo_error = abs(hippocampus - mean(hippocampus)),
cere_error = abs(cerebellum - mean(cerebellum)))
temp4
arrange(temp4, abs(avg))
temp4 %>% arrange(abs(hippocampus),
abs(cerebellum),
abs(avg))
temp4 %>% arrange(cere_error, hippo_error)
#
# Which TWO genes drive the algorithm after performing
# the scaling?
# PLCB1 ## wrong
# RAB1B ## correct
# above partially correct
# RAB1B ## correct
# OAZ2 ## right! but it showed wrong when I selected it!
# Explanation
# The following code can be used to make the plot to evaluate which genes are driving the algorithm after scaling:
fit_lda <- train(x, y, method = "lda", preProcess = "center")
fit_lda$results["Accuracy"]
t(fit_lda$finalModel$means) %>% data.frame() %>%
mutate(predictor_name = rownames(.)) %>%
ggplot(aes(predictor_name, hippocampus)) +
geom_point() +
coord_flip()
# You can see that it is different genes driving
# the algorithm now. This is because the predictor
# means change.
#
# In the previous exercises we saw that both LDA
# and QDA approaches worked well. For further exploration
# of the data, you can plot the predictor values
# for the two genes with the largest differences between
# the two groups in a scatter plot to see how they appear
# to follow a bivariate distribution as assumed
# by the LDA and QDA approaches, coloring the points
# by the outcome, using the following code:
d <- apply(fit_lda$finalModel$means, 2, diff)
ind <- order(abs(d), decreasing = TRUE)[1:2]
plot(x[, ind], col = y)
## Q6
# Now we are going to increase the complexity
# of the challenge slightly. Repeat the LDA analysis
# from Q5 but using all tissue types. Use the following
# code to create your dataset:
library(dslabs)
library(caret)
data("tissue_gene_expression")
# set.seed(1993) #
set.seed(1993, sample.kind="Rounding") # if using R 3.6 or later
y <- tissue_gene_expression$y
x <- tissue_gene_expression$x
x <- x[, sample(ncol(x), 10)]
fit_lda <- train(x, y, method = "lda", preProcess = "center")
fit_lda$results["Accuracy"]
# What is the accuracy using LDA?
# Accuracy
# 1 0.8194837 ## YES, correct
## Explanation from the website:
# The following code can be used to obtain
# the accuracy of the LDA:
fit_lda <- train(x, y, method = "lda", preProcess = c("center"))
fit_lda$results["Accuracy"]
|
8d76ab192d61c71612974b4f7e97ef4d7e8f01d2 | 30ad23d2b71cb8a59866fda53676f1dfe6b99cc0 | /Prediction.R | a825abe2bc2957714eeb7b629ff6d1a06bb187b3 | [
"MIT"
] | permissive | kaswani29/RestaurantRevenuePrediction | 35e1b0fb9a305b177272b5a6741c594e136005df | 25d7c9dbeb308f2bf8fd4f5c9eb95139693d06f2 | refs/heads/master | 2021-06-09T13:22:36.364427 | 2016-11-10T01:29:16 | 2016-11-10T01:29:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,783 | r | Prediction.R | library(caret)
library(doParallel)
set.seed(54321)
# 5/8/2015
#Revenue Prediction competition
#To download data visit:
# https://www.kaggle.com/c/restaurant-revenue-prediction/data
# Data Processing ---------------------------------------------------------
#download the files from
train <- read.csv("train.csv")
test <- read.csv("test.csv")
n.train <- nrow(train)
test$revenue <- 1
##Converting into single dataframe
myData <- rbind(train, test)
myData <- myData[,-1]
rm(train, test)
#normalize
normalize <- function(x){
return((x-min(x))/(max(x)-min(x)))}
#Tranform Time
myData$Open.Date <- as.POSIXlt("04/30/2015", format="%m/%d/%Y") - as.POSIXlt(myData$Open.Date, format="%m/%d/%Y")
myData$Open.Date<- normalize(as.numeric(myData$Open.Date))
# summary(myData$Open.Date)
##Variable wise transformation
city<- data.frame(table(myData$City[1:137]))
# View(city[!city$Freq==0,])
#Consolidating Cities
myData$City <- as.character(myData$City)
myData$City[myData$City.Group == "Other"] <- "Other"
myData$City[myData$City == unique(myData$City)[4]] <- unique(myData$City)[2]
myData$City <- as.factor(myData$City)
#Consolidate Types
myData$Type <- as.character(myData$Type)
myData$Type[myData$Type=="DT"] <- "IL"
myData$Type[myData$Type=="MB"] <- "FC"
myData$Type <- as.factor(myData$Type)
value<- read.csv("values.csv")
new1<-cbind(myData,clustering=value)
myData$clustering<- value[,1]
#########################################################################3
#Checking which columns are factor one at a time
checkfactor<- function(y){
x<- myData
x[,y]<- as.factor(x[,y])
# str(x)
#
fitControl <- trainControl(method = "repeatedcv",
number = 10,
## repeated ten times
repeats = 5) #2304291
set.seed(54321)
model1 <- train(revenue~.,
data=x[1:n.train,],trControl = fitControl,method = "rf",
importance=TRUE)
return (min(model1$results$RMSE))
}
a<- c(5,9,10,11,12,13:16,18:29,34:41)
b<- c(6:8,17,30:33)
fac_rmse1<- sapply(b,checkfactor)
summary(myData$P12)
vec<- c(5,9,10,11,12,13:16,18:29,34:41,6:8,17,30:33)
fac<- c(fac_rmse,fac_rmse1)
outp<- data.frame(cbind(fac,vec))
outp$flag<- output$fac<2304291
outp$score<- 2304291
factors<- outp$vec[output$flag] #8 9 12 16 17 18 21 23 27 29 33 40
factors<- sort(factors)
numeral<- c(1,outp$vec[!outp$flag])
numeral<- sort(c(numeral))
numeral<-C(1,5,6,7,10,11,13,14,15,19,20,22,24,25,26,28,30,31,32,34,35,36,37,38,39,41)
str(myData[,numeral])
######factors#################
factors<- c(8,9, 12 ,16, 17, 18, 21, 23, 27, 29, 33, 40,43)
myData[,c(2,3,4,factors)]<- lapply(myData[,c(2,3,4,factors)],factor)
str(myData)
#############################################
#Preprocess
numeral<-c(1,5,6,7,10,11,13,14,15,19,20,22,24,25,26,28,30,31,32,34,35,36,37,38,39,41)
#Normalizing data by transformation
preProcValues <- preProcess(myData[1:n.train,numeral], method = "BoxCox")
myData <- predict(preProcValues, myData)
# myData$revenue <- log(myData$revenue)
summary(preProcValues)
# View(myData[1:n.train,])
######Cluster Analysis###########33
d <- dist(myData[,c(1,5:7,10:11,13:15,19,20,22,24,25,28,30:32,34:39,41)], method = "euclidean")
hc <- hclust(d)
plot(hc,labels=myData[1:n.train,4])
rect.hclust(hc,k=3)
myData$deg<-as.factor(cutree(hc, k=3))
table(deg)
str(myData)
# Model prepration --------------------------------------------------------
#
# #run model in parallel
####################Random Forest##############################3
cl <- makeCluster(detectCores())
registerDoParallel(cl)
set.seed(54321)
# Control Parameters
fitControl <- trainControl(method = "repeatedcv",
number = 10,
## repeated ten times
repeats = 7)
set.seed(54321)
model_rf <- train(revenue~.,
data=myData[1:n.train,],trControl = fitControl,method = "rf",
importance=TRUE)
model_rf
x<-model1$finalModel
x
importance <- varImp(model1, scale=FALSE)
importance
#############################other model#######################
# SVM with grid search
set.seed(54321)
# Control Parameters
fitControl <- trainControl(method = "repeatedcv",
number = 10,
## repeated ten times
repeats = 7)
#c("P5","P10","P13","P17","P20","P21","P28","P29","P36","P2","P23","revenue")
# #8 9 12 16 17 18 21 23 27 29 33 40
#8, ##16, 17,18,9,13
set.seed(54321)
model_radial<- train(revenue~.,data=myData[1:n.train,c(numeral,42,43,16,18)],trControl = fitControl,method = "svmRadial",
tuneGrid = expand.grid(.sigma=c(.05),.C=c(seq(.88))))
model_radial$results$RMSE
model_radial
rm(model_radial)
#Polynomial kernel svm
# set.seed(54321)
# model_poly<- train(revenue~.,data=myData[1:n.train,c(numeral,42)],trControl = fitControl,method = "svmPoly",
# tuneGrid = expand.grid(.degree=c(2),.scale= (seq(0.01,.1,.01),.C=c(seq(.1,1,.05))))
# )
# model_poly
# degree = 2, scale = 0.01 and C = 0.25.
rm(model)
###########Feature Selection###############
##Genetic Algorithm for kernel
ptm <- proc.time()
ga_ctrl <- gafsControl(functions = rfGA,
method = "repeatedcv",
number = 6,
repeats = 5,
allowParallel = T,
genParallel = T)
rf_ga <- gafs(x = myData[1:n.train,1:41], y = myData[1:n.train,42],
iters = 150,
popSize = 100,
gafsControl = ga_ctrl)
rf_ga
proc.time() - ptm
plot(rf_ga) + theme_bw()
summary()
#####################rfe#########################################
##Recursive feature selection
control <- rfeControl(functions = rfFuncs, method = "repeatedcv", verbose = FALSE,
returnResamp = "final", number = 10, repeats = 10, allowParallel = TRUE)
subsets <- c(5:25)
ref1<- rfe(x = myData[1:n.train,c(1,3:41)], y = myData[1:n.train,42],
rfeControl = control, sizes = subsets)
ref1
#############################AFter GA and RFE ################################
# Results of GA and RFE
cols<- c("P5","P10","P13","P17","P20","P21","P28","P29","P36","P2","P23","revenue")
gaData<-myData[,cols]
gaData[,fac] <- lapply(gaData[,fac], factor)
str(gaData)
# Prediction --------------------------------------------------------------
df_yhat_test <- predict(model_rf,myData[138:nrow(myData),])
df_yhat_test1 <- predict(model_radial,myData[138:nrow(myData),])
df_yhat_ensemble<- (.7*df_yhat_test + .3*df_yhat_test1)
output<- cbind("Id"= c(0:99999),"Prediction"= (df_yhat_ensemble))
write.csv(output,"df_yhat_ensemble.csv",row.names=FALSE,quote=FALSE)
|
c6f29c9dfa16b32b441206a999c80a345a4d2c2f | eb836d0d03b1f50f8cd7524cbeaf0b839a58c774 | /hw1/hw1_prob3_try2.R | 6e485ef789ece88e074a513b1c18149455ac18a5 | [] | no_license | nquist/R-Class | af65be1dd2a5bd8132c7dd4a91308448e15e32bc | 45d57366cabc89a0e284cbd494231b545852fafa | refs/heads/master | 2020-04-22T07:32:20.946866 | 2019-09-05T19:32:19 | 2019-09-05T19:32:19 | 170,219,810 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 180 | r | hw1_prob3_try2.R | # Regrade: 5/5
# Excellent!
print("This is an edit to the code for a second try.")
rm(list = ls())
print("Hello world!")
samp <- rnorm(100, mean = 10, sd = 3)
print(mean(samp))
|
edfd3dbccfad754c74bcee6733eb19eec4479b48 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/codemetar/examples/write_codemeta.Rd.R | 345c57861cef2724fe95ca38138544f1e6210638 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 220 | r | write_codemeta.Rd.R | library(codemetar)
### Name: write_codemeta
### Title: write_codemeta
### Aliases: write_codemeta
### ** Examples
## No test:
write_codemeta("codemetar", path = "example_codemetar_codemeta.json")
## End(No test)
|
0e0063a56f1718c4891e4ac985487b105c3a9459 | cbdeccd83cc91da4b9da37e858617dc6ab4c5a41 | /man/simplexample-package.Rd | 05f5acc08043fc1232a47ee95fcf6a7e43b8213e | [] | no_license | Sleepingwell/SimpleCInterfaceRPackage | 223fc644034da791af227a8bf918b4bdb91f6943 | 2045651a6167467337465a9da220ee1a9047266a | refs/heads/master | 2021-01-01T19:10:32.677467 | 2012-08-16T15:41:51 | 2012-08-16T15:41:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 746 | rd | simplexample-package.Rd | \name{simplexample-package}
\alias{simplexample-package}
\alias{simplexample}
\docType{package}
\title{Demonstrate calls to a .C, .Call and .External functions.}
\description{
A package that shows how one might use the .C .Call and .External interfaces for calculating a dot product.
}
\details{
\tabular{ll}{
Package: \tab simplexample\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2012-08-16\cr
License: \tab What license is it under?\cr
}
The only function in this package is \code{dotproduct} which calculate the dot product of its arguments using each of the calling conventions.
}
\author{
Simon Knapp
Maintainer: Simon Knapp <simon.knapp@verusol.com>
}
\examples{
x <- rnorm(10)
y <- rnorm(10)
dp <- dotproduct(x, y)
}
|
5baaa41318979bd0466e47615b2fc6ab9f6aaa6d | ec843d0167f14b8f70faf6d9223b94fd6e1f3491 | /Programi/cor_analisys_number_of_days_to_peak_deaths_and_proportion_of_infected.R | 8a4732650255998dbc7255facb391eed8f9b3fe6 | [] | no_license | KalcMatej99/Seminarska-VS-Covid-19 | c6757885d16d50c6a0f6ad2999cdfe45df1de05d | e592a3142afa96660d6938e69385fb74e169ba92 | refs/heads/master | 2022-12-19T11:08:59.724253 | 2020-09-10T10:58:43 | 2020-09-10T10:58:43 | 281,406,020 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 807 | r | cor_analisys_number_of_days_to_peak_deaths_and_proportion_of_infected.R | library(pracma)
fileDB <- './../Podatki/db.csv'
db<- read.csv(fileDB, header=TRUE, sep=",")
dataFrameDates <- data.frame(date=db$Date_of_peak_of_deaths,tx_start=db$Date_of_first_death)
dataFrameDates$date_diff <- as.Date(as.character(dataFrameDates$date), format="%Y-%m-%d")-
as.Date(as.character(dataFrameDates$tx_start), format="%Y-%m-%d")
N <- as.numeric(dataFrameDates$date_diff)
P <- 100 * db$Infected_to_peak/db$Population
plot(N,P,
main = "Vpliv števila dni do vrha prvega vala mrtvih na delež okuženih",
xlab = "Število dni do vrha prvega vala mrtvih",
ylab = "Delež okuženih")
r <- cor(N, P, method = "pearson")
s <- cor(N, P, method = "spearman")
rtest <- cor.test(N,P, method = "pearson")
stest <- cor.test(N,P, method = "spearman") |
3ce1291b4abcdf0cb3c5cda8adf1968478436f35 | a6ecdfb7028b10fadff7fdc9fc7110c51814761c | /day4/lineplots.R | c398df70ed97fff033b14d702c63bb00a46596b2 | [] | no_license | C-MOOR/jhu-intersession-2017 | 7ac21ecc3c1d0e20b2ae86e3396515b20651b80a | 7610aff9b622ba697fe43f49b221769a3c14493d | refs/heads/master | 2020-04-01T18:25:00.840344 | 2018-10-17T18:52:12 | 2018-10-17T18:52:12 | 153,491,047 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,326 | r | lineplots.R | selected_genes <- c(2:5)
library( "DESeq" )
# Use DESeq to normalize the read counts for each sample so they can be averaged
gut_metadata <- read.table( "/home/intro-rna/2017/midgut.tsv", header=TRUE )
cds <- newCountDataSetFromHTSeqCount( gut_metadata, directory="/home/intro-rna/2017" )
cds <- estimateSizeFactors( cds )
# make a data.frame of read counts in the correct orientation for the aggregate function
gene_counts <- data.frame(t(counts(cds, normalized=TRUE)[selected_genes,]))
# make a new data.frame of the mean count for each region
mean_counts <- aggregate(gene_counts,by=list(gut_metadata$condition), mean)
# scale counts so they can be plotted on one graph
scaled_counts <- apply(mean_counts[,-1], 2, function(x) x/max(x))
plot(mean_counts[,2], type='l', xaxt='n', col="red", ylim=c(0,2000))
lines(mean_counts[,3], type='l', xaxt='n', col="green")
lines(mean_counts[,4], type='l', xaxt='n', col="blue")
lines(mean_counts[,5], type='l', xaxt='n', col="purple")
axis( 1, at=1:10, labels=mean_counts$Group.1)
plot(scaled_counts[,1], type='l', xaxt='n', col="red", ylim=c(0.0, 1.0))
lines(scaled_counts[,2], type='l', xaxt='n', col="green")
lines(scaled_counts[,3], type='l', xaxt='n', col="blue")
lines(scaled_counts[,4], type='l', xaxt='n', col="purple")
axis( 1, at=1:10, labels=mean_counts$Group.1)
|
3a035d0055c1c7c0339a86c75d4a80054a8eba7a | 88c0b70954bd568717719b01b20d090c475e29c5 | /plot1.R | 84167f97e51d908b536f75dd18b1ebf4d75cadaf | [] | no_license | fish515/ExData_Plotting1 | fc249ec6051c7455cb4ec465b9591a3ab4e0c99b | bcd349fc6daf0941a65acb8c07ce6442b5189081 | refs/heads/master | 2020-12-26T21:37:52.770221 | 2016-03-07T07:02:01 | 2016-03-07T07:02:01 | 53,297,145 | 0 | 0 | null | 2016-03-07T05:02:47 | 2016-03-07T05:02:47 | null | UTF-8 | R | false | false | 334 | r | plot1.R | data<-read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings = "?")
subsetdata<- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
png("plot1.png", width=480, height=480)
hist(subsetdata$Global_active_power,col="red",xlab="Global Active Power(kilowatts)",ylab = "Frequency",main="Global Active Power")
dev.off() |
12d11fa23cf66cef9fb7a2febef858930d730ba6 | 23e87af56a0d581c32532fadb463913b55d3deb5 | /首日基線/predict.R | fe20814b24f2b5170861f973f5308e33453e1a98 | [] | no_license | ji1ai1/201907-AT | 15fb66ccc4840db6546929892e5dd41dfafaf294 | 22d13287a9b6d9967849021ebf55a2f4eeb7ac23 | refs/heads/master | 2023-01-14T02:47:04.882158 | 2020-11-07T16:04:18 | 2020-11-07T16:04:18 | 232,135,787 | 5 | 1 | null | null | null | null | UTF-8 | R | false | false | 917 | r | predict.R | #R version 3.6.1 (2019-07-05) -- "Action of the Toes"
#R package data.table 1.12.2
#输入:
# Antai_AE_round2_item_attr_20190813.csv
# Antai_AE_round2_test_20190813.csv
# Antai_AE_round2_train_20190813.csv
#输出:
# result.csv
#0.1606
library(data.table)
Testing = fread("Antai_AE_round1_test_20190626.csv")
Testing$si = as.double(as.POSIXct(Testing$create_order_time)) - as.double(as.POSIXct("2018-09-01"))
Items = Testing[, .(item_nrecords = .N), .(item_id)][order(-item_nrecords)]
Prediction = Testing[, .(score = sum(1 / (si / 86400) ** 64)), .(buyer_admin_id, item_id)]
Prediction = Prediction[order(-score)]
Prediction = Prediction[, .(item_id = unique(c(item_id, Items$item_id[1:30]))[1:30]), .(buyer_admin_id)]
Prediction = Prediction[, .(prediction_string = paste(item_id, collapse ="," )), .(buyer_admin_id)]
write.table(Prediction, "result.csv", sep = ",", quote = F, row.names = F, col.names = F)
|
f74151c6b0768028646f95cda33b06c5c41a3add | accc3c168b0f9d6973b44a9a19211899b69384a6 | /fcbmmo/R/transform_log.R | 5a049a63069c91117a24aafb4c0a854057afccf9 | [] | no_license | fcbny/fcbmmo | f54d77a34ff3fcd99a15b5176beab553629159c5 | 6bdc4e1742a973fd55c72d4354ea9dc4f0424589 | refs/heads/master | 2021-01-10T19:47:10.368161 | 2014-10-31T18:54:54 | 2014-10-31T18:54:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 98 | r | transform_log.R | transform_log <-
function(x,base=exp(1),add=0) {
r <- log((x + add),base=base)
return(r)
}
|
a05a337e78fe6895b9ea77bf243e107f32520cc1 | 92e828aeed0eb4203cd1890fbdcee430d6cc9773 | /src/API_pathways_KEGG.R | d1a056da12cb5abbb383cd8d7cb1e1d5bfbf7c5b | [
"MIT"
] | permissive | dy-lin/stat540-project | 46d9ee4f4638dbe933bdb607f3f8232e872a6847 | 20f040d9399ab5b9df0341b317e0f22eefafe5e8 | refs/heads/master | 2022-11-18T03:27:53.049420 | 2020-07-19T22:47:39 | 2020-07-19T22:47:39 | 259,114,940 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,879 | r | API_pathways_KEGG.R | # Script for performance of pathway enrichment analysis in KEGG using PathDIP portal (Rahmati et al., 2016) API
# For more information: http://ophid.utoronto.ca/pathDIP/API.jsp
library(httr)
library(tidyverse)
library(here)
# const values
url <- "http://ophid.utoronto.ca/pathDIP/Http_API"
searchOnGenesymbols <- function(IDs, component, sources) {
parameters <- list(
typeChoice = "Gene Symbol",
IDs = IDs,
TableName = component,
DataSet = sources
)
# ... send http POST
res <- POST(url, body = parameters, encode = "form", verbose())
}
# make results-map as keyword - value
makeMap <- function(res) {
ENTRY_DEL = "\001"
KEY_DEL = "\002"
response = content(res, "text")
arr = unlist(strsplit(response, ENTRY_DEL, fixed = TRUE))
list_map <- list("")
vec_map_names <- c("");
for (str in arr) {
arrKeyValue = unlist(strsplit(str, KEY_DEL, fixed = TRUE));
if (length(arrKeyValue) > 1) {
list_map[length(list_map) + 1] <- arrKeyValue[2]
vec_map_names[length(vec_map_names) + 1] <- arrKeyValue[1]
}
}
names(list_map) <- vec_map_names
list_map
}
#####################################
# Example of search on Gene Symbols #
#####################################
# Gene Symbols
# - Comma delimited.
# - Mind case.
genes <- read.csv(here("results","final","primary_topGenes_limma2.csv"))
IDs <- toString(genes$gene)
# Data component
# - Uncomment the only one of those five:
# component <- "Literature curated (core) pathway memberships"
component <- "Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.99"
# component <- "Extended pathway associations. Protein interaction set: Experimentally detected PPIsMinimum confidence level for predicted associations: 0.95"
# component <- "Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.99"
# component <- "Extended pathway associations. Protein interaction set: Experimentally detected and computationally predicted PPIs (full IID)Minimum confidence level for predicted associations: 0.95"
# Data sources
# - Use some or all of those:
# BioCarta, EHMN, HumanCyc, INOH, IPAVS, KEGG, NetPath, OntoCancro, PharmGKB, PID, RB - Pathways, Reactome, stke, systems - biology.org, Signalink, SIGNOR, SMPDB, Spike, UniProt_Pathways, WikiPathways
# - Comma delimited.
# - Mind exact spelling.
#sources <- "BioCarta,EHMN,HumanCyc,INOH,IPAVS,KEGG,NetPath,OntoCancro,PharmGKB,PID,RB-Pathways,Reactome,stke,systems-biology.org,Signalink,SIGNOR,SMPDB,Spike,UniProt_Pathways,WikiPathways";
# Soureces specifically set to KEGG for our pathway enrichment analysis
sources<- "KEGG"
res <- searchOnGenesymbols(IDs, component, sources)
responseCode = status_code(res)
if (responseCode != 200) {
cat("Error: Response Code : ", responseCode, "\r\n")
} else {
list_map <- makeMap(res)
# print results
cat("\r\n", "Search on Uniprot IDs:", "\r\n")
cat("Generated at: ", unlist(list_map["GeneratedAt"]), "\r\n")
cat("IDs: ", unlist(list_map["IDs"]), "\r\n")
cat("DataComponent: ", unlist(list_map["TableName"]), "\r\n")
cat("Sources: ", unlist(list_map["DataSet"]), "\r\n")
cat("\r\n", "Summary size: ", unlist(list_map["SummarySize"]), "\r\n")
sm <- unlist(list_map["Summary"])
cat("Summary: \r\n", sm, "\r\n") # formatted as tab - delimited spreadsheet
cat("\r\n", "Details size: ", unlist(list_map["DetailsSize"]), "\r\n")
dl <- unlist(list_map["Details"])
cat("Details: \r\n", dl, "\r\n") # formatted as tab - delimited spreadsheet
df.Summary <- read_tsv(sm)
df.Details <- read_tsv(dl)
write.csv(df.Details,here("results","final","pathway_KEGG_primary_genes"))
} |
a7a65fc736fbb1742dbb2ead45c2517b2db66549 | 3d0787d24620c700303ecdc4453325b6235c0e5d | /01-variables_constants_operators/arithmetic.R | 507c7fbe77d819c80c71e52f662c494ddd87aab0 | [] | no_license | lincolnbrito/r-examples | 2244e0160944f65c276a58c9e97b5a4ead8923b4 | ab75c88576026ee839befb0f9acccf52ec6f2e4e | refs/heads/master | 2020-03-08T10:55:10.518422 | 2018-04-05T03:21:00 | 2018-04-05T03:21:00 | 128,084,946 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 190 | r | arithmetic.R | "
+ addition
- subtraction
* multiplication
/ division
^ exponent
%% modulus (remainder from division)
%/% integer division
"
x <- 5
y <- 16
x+y
x-y
x*y
y%/%x
y%%x
y^x |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.