blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6920f7bed0a6fcd1f70fdbb1281c373cae2eccc8 | 474f7358911271f9911197b3b96a4451b6f8da7e | /Diagrama_de_barras.R | fbd21355e3b8e904151fd4ef4231f8554796a3a0 | [] | no_license | imarcosenciso/BigDataCovid | 923f17945a80748bf427390d01a8ad42f2a9d9af | fea5afb0c1c1e292292d8878f7edcfc43aa21786 | refs/heads/main | 2023-02-15T06:05:43.463761 | 2021-01-10T19:26:13 | 2021-01-10T19:26:13 | 317,830,642 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,123 | r | Diagrama_de_barras.R | rm(list=ls())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir()
cat
library(ggplot2)
library(ggpubr)
library(stringr)
theme_set(theme_pubr())
#Adquirir datos
datos_generales = read.csv("Datos/03.csv",stringsAsFactors = FALSE, sep=';')[1:12,1:17]
#Cambiar nombres
names(datos_generales)[1] <- "Edad"
names(datos_generales)[3] <- "PositivosM"
names(datos_generales)[4] <- "PositivosH"
names(datos_generales)[16] <- "LetalM"
names(datos_generales)[17] <- "LetalH"
#hacer datasets indviduales para los ggplots
datos_generoEd = datos_generales[1:10,c(1,3,4)]
muerte_genEd = datos_generales[1:10,c(1,16,17)]
#cambiar string a Int cambiando , a .
muerte_genEd$LetalH = str_replace(muerte_genEd$LetalH, "," , ".")
muerte_genEd$LetalM = str_replace(muerte_genEd$LetalM, "," , ".")
muerte_genEd$LetalH = as.double(muerte_genEd$LetalH)
muerte_genEd$LetalM = as.double(muerte_genEd$LetalM)
summary(muerte_genEd)
######################
# Gráficos #
######################
H=ggplot(datos_generoEd, aes(x=Edad, y=(PositivosH))) +
geom_bar(stat = "identity")+
geom_bar(stat = "identity")+
coord_flip()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
)+
scale_y_reverse(limits=c(10000,0))+
scale_x_discrete(position = "top")
M=ggplot(datos_generoEd, aes(x=Edad, y=(PositivosM))) +
geom_bar(stat = "identity")+
geom_bar(stat = "identity")+
coord_flip()+
ylim(0,10000)+
theme(axis.title.y=element_blank())
figure <- ggarrange(H, M,
ncol = 2, nrow = 2)
figure
MM=ggplot(muerte_genEd, aes(x=Edad, y=(LetalM))) +
geom_bar(stat = "identity")+
geom_bar(stat = "identity")+
coord_flip()+
theme(axis.title.y=element_blank(),
axis.text.y=element_blank(),
)+
scale_y_reverse(limits=c(0.4,0))+
scale_x_discrete(position = "top")
HM=ggplot(muerte_genEd, aes(x=Edad, y=(LetalH))) +
geom_bar(stat = "identity")+
geom_bar(stat = "identity")+
coord_flip()+
ylim(0,0.4)+
theme(axis.title.y=element_blank())
figureM <- ggarrange(MM, HM,
ncol = 2, nrow = 2)
figureM
|
256a743d2b7f56dd6e9b2d1781cdb1322abef5c3 | e6e6768e6be64663485beaf7e5bef7a2f0204046 | /R/Microarray-Stuff/matrix_.R | 745e8ab410b1efe60d4a52d958cbe6daf97fa251 | [] | no_license | BJWiley233/Practical-Computer-Concepts-Files | 9d8a8424c574ca5bfc70658d9b724349d7119524 | f157f2dcc2c9974546ad3939afefbac1100b1c27 | refs/heads/master | 2023-04-27T19:54:42.882383 | 2023-04-18T23:42:09 | 2023-04-18T23:42:09 | 203,980,842 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,201 | r | matrix_.R | library(affy)
setwd("/home/coyote/JHU_Fall_2020/Data_Analysis/project")
my.gse <- "GSE79196"
mat <- as.matrix(read.table(paste0(my.gse,"justRMA_keep_matrix.txt"),
sep = "\t", header = T))
dim(mat)
annot <- AnnotatedDataFrame(read.table(paste0(my.gse,"_SelectPhenoData.txt"),
sep = "\t", header = T))
colnames(annot)
b.cell.expr.set <- ExpressionSet(assayData = mat,
phenoData = annot)
sampleNames(AnnotatedDataFrame(
read.table(paste0(my.gse,"_SelectPhenoData.txt"),
sep = "\t", header = T)))
groups <- gsub(" .*", "", pData(b.cell.expr.set)$ertitle)
## B-CLPD: B-cell chronic lymphoproliferative disorders
## CLL: Chronic Lymphocytic Leukemia
## cMCL: conventional Mantle cell lymphoma (MCL)
## nnMCL: leukemic nonnodal Mantle cell lymphoma (MCL)
## SMXL: Splenic marginal zone lymphoma (SMZL)
table(groups)
df <- exprs(b.cell.expr.set)
colnames(df) <- gsub("\\..*", "", colnames(df))
colnames(df) <- paste0(groups, "_", colnames(df))
means = apply(df, 2, function(x) mean(x, na.rm=T))
cvs = apply(df, 2, function(x) sd(x)/mean(x))
outliers <- c(which(means>5.23), which(cvs<0.407)) |
6f07e66cc0173b9dcce5220eefa2a90ec95f68cd | 1b27943fccccbe193c36d332ebfdb5cf2cb15bcf | /scatMatLegend.R | 8399fe84f1fcefdea8aee936b788a63ba8516761 | [] | no_license | Shians/bigPint | 5d3dca62351f54b125b25ac9e9ad839ccc5cdd28 | 85eee64c2d1be153833b9161f99248b2f1f72a55 | refs/heads/master | 2021-01-01T16:44:03.859672 | 2017-07-21T03:28:35 | 2017-07-21T03:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,916 | r | scatMatLegend.R | library(plotly)
library(shiny)
library(ggplot2)
library(hexbin)
library(RColorBrewer)
#rm(list=ls())
set.seed(1)
#dat <- data.frame(ID = paste0("ID", 1:1010), A.1 = c(rep(0.5, 1000), abs(rnorm(10))), A.2 = c(rep(0.5, 1000), abs(rnorm(10))), B.1 = c(rep(0.5, 1000), abs(rnorm(10))), B.2 = c(rep(0.5, 1000), abs(rnorm(10))), C.1 = c(rep(0.5, 1000), abs(rnorm(10))), C.2 = c(rep(0.5, 1000), abs(rnorm(10))), C.3 = c(rep(0.5, 1000), abs(rnorm(10))), stringsAsFactors = FALSE
dat <- data.frame(ID = paste0("ID", 1:10000), A.1 = abs(rnorm(10000)), A.2 = abs(rnorm(10)), B.1 = abs(rnorm(10000)), B.2 = abs(rnorm(10000)), C.1 = abs(rnorm(10000)), C.2 = abs(rnorm(10000)), stringsAsFactors = FALSE)
sampleIndex <- which(sapply(colnames(dat), function(x) unlist(strsplit(x,"[.]"))[1]) %in% c("A", "C"))
datSel <- dat[,c(1, sampleIndex)]
sampleIndex1 <- which(sapply(colnames(datSel), function(x) unlist(strsplit(x,"[.]"))[1]) %in% c("A"))
sampleIndex2 <- which(sapply(colnames(datSel), function(x) unlist(strsplit(x,"[.]"))[1]) %in% c("C"))
minVal = min(datSel[,-1])
maxVal = max(datSel[,-1])
maxRange = c(minVal, maxVal)
xbins= 8
buffer = (maxRange[2]-maxRange[1])/(xbins/2)
x <- c()
y <- c()
for (i in 1:length(sampleIndex1)){
for (j in 1:length(sampleIndex2)){
x <- c(x, unlist(datSel[,(sampleIndex1[i])]))
y <- c(y, unlist(datSel[,(sampleIndex2[j])]))
}
}
h <- hexbin(x=x, y=y, xbins=xbins, shape=1, IDs=TRUE, xbnds=maxRange, ybnds=maxRange)
hexdf <- data.frame (hcell2xy (h), hexID = h@cell, counts = h@count)
attr(hexdf, "cID") <- h@cID
my_breaks = c(2, 4, 6, 8, 20, 1000)
clrs <- brewer.pal(length(my_breaks)+3, "Blues")
clrs <- clrs[3:length(clrs)]
hexdf$countColor <- cut(hexdf$counts, breaks=c(0, my_breaks, Inf), labels=as.character(c(0, my_breaks)), include.lowest=TRUE, right=FALSE)
# No problem (but not colored into breaks)
# ggplot(hexdf, aes(x=x, y=y, hexID=hexID, fill=countColor)) + geom_hex(stat="identity") + geom_abline(intercept = 0, color = "red", size = 0.25) + labs(x = "A", y = "C") + coord_fixed(xlim = c(-0.5, (maxRange[2]+buffer)), ylim = c(-0.5, (maxRange[2]+buffer))) + theme(aspect.ratio=1)
# Legend shows the wrong label counts
p <- ggplot(hexdf, aes(x=x, y=y, hexID=hexID, counts=counts, fill=countColor)) + geom_hex(stat="identity") + scale_fill_manual(labels = as.character(c(0, my_breaks)), values = rev(clrs), name = "Count") + geom_abline(intercept = 0, color = "red", size = 0.25) + labs(x = "A", y = "C") + coord_fixed(xlim = c(-0.5, (maxRange[2]+buffer)), ylim = c(-0.5, (maxRange[2]+buffer))) + theme(aspect.ratio=1)
# Legend counts are not labelled correctly (but does not do >=, just >)
ggplotly(p)
######################
Service <- c("Satisfied", "Dissatisfied", "Neutral", "Satisfied", "Neutral")
Service2 <- c("Dissatisfied", "Dissatisfied", "Neutral", "Satisfied", "Satisfied")
Services <- data.frame(Service, Service2)
ServicesProp <- Services %>% select(Service) %>% group_by(Service) %>%
summarise(count=n()) %>% mutate(percent = count / sum(count))
p <- ggplot(ServicesProp, aes(x = Service, y = percent, fill = Service)) +
geom_bar(stat = "identity", position = "dodge") +
scale_fill_manual(values = c("red", "grey", "seagreen3"))
# scale_fill_distiller and setting fill = counts (SO suggestion)
######################
library(RColorBrewer)
library(ggplot2)
clrs <- brewer.pal(8, "Blues")
ggplot(d, aes(x=x, y=y, colour=z)) + geom_point(size=5)
ggplot(d, aes(x=x, y=y, colour=factor(z))) + geom_point(size=5) + scale_colour_manual(values=c("2"=clrs[8], "4"=clrs[7], "8"=clrs[6],"16"=clrs[5], "32"=clrs[4], "64"=clrs[3], "128"=clrs[2], "1024"=clrs[1]))
######################
colfunc <- colorRampPalette(c("blue", "red"))
colfunc(10)
ggplotly(p)
ggplotly(p) %>% layout(height = 200, width = 200)
ggplotly(p, height=400, width=400)
##########
colfunc <- colorRampPalette(c("black", "white"))
# use the scales package color ramp (or brewer.pal), Could also return a longer vector and only choose a subset.
# start by default value of 0 to their minimum values (0-2, 2-4, 4-6, 6-8, 8-20, 20-1000, 1000+)
# boxCox transformation (same as logging, but with p close to 0)
# can do scale_x_log10 (but can't do scale_fill_log10)
colfunc(length(my_breaks))
getBreakVal <- function(my_breaks, vector){
for (i in 1:length(vector)){
}
}
vector <- c(2,20,50,100,900,1000)
# scale_fill_identity (set column with hex Colors)
hexdf$scaleColor <-
###### Creating legend for hex bins
# https://stackoverflow.com/questions/8069837/is-there-a-built-in-way-to-do-a-logarithmic-color-scale-in-ggplot2
require(ggplot2)
n <- 1e5
df <- data.frame(x = rexp(n), y = rexp(n))
p <- ggplot(df, aes(x = x, y = y)) + stat_binhex()
my_breaks = c(2, 10, 50, 100, 200, 6000)
p <- p + scale_fill_gradient(name = "count", trans = "log", breaks = my_breaks, labels = my_breaks, guide="legend")
ggPS <- ggplotly(p)
ggPS
|
167d9abfd0d514c66fcf80464677428444e63cfc | fa571db675071e4b322f49d0d70702e148c24543 | /cleaning_plots.R | 901dfcc8be89bf252e622b9977e925fc8aace8e4 | [] | no_license | CYGUBICKO/hh | 49fcf28cde43d0908b50db45ebc9fef9acb5b293 | 198dcd66e4d948707c3fa3ebe177e54d647497ed | refs/heads/master | 2021-06-19T13:38:03.177664 | 2021-03-02T02:14:12 | 2021-03-02T02:14:12 | 187,945,920 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,289 | r | cleaning_plots.R | #### ---- Project: APHRC Wash Data ----
#### ---- Task: Cleaning raw data ----
#### ---- Fit switch data ----
#### ---- By: Steve and Jonathan ----
#### ---- Date: 2020 Mar 01 (Sun) ----
library(data.table)
library(dplyr)
library(tidyr)
library(tibble)
library(ggplot2)
theme_set(theme_bw() +
theme(panel.spacing=grid::unit(0,"lines")))
library(scales)
load("mergeWash.rda")
summaryFunc <- function(var){
summary_df <- (full_df
%>% filter(grepl("^improve|^unimprove", .data[[var]], ignore.case = TRUE))
%>% mutate(intvwyear = as.numeric(intvwyear))
%>% mutate_at(var, function(x)ifelse(grepl("^improve", x, ignore.case = TRUE), 1, 0))
%>% select(var, intvwyear)
%>% rename("temp_var" = var)
%>% group_by(intvwyear)
%>% summarise(prop = mean(temp_var, na.rm = TRUE))
%>% mutate(services = var, overall = mean(prop))
)
return(summary_df)
}
#### ---- Overall service proportion per year ----
## Wash
wash_prop <- lapply(c("cat_hhwatersource", "cat_hhgarbagedisposal", "cat_hhtoilettype"), summaryFunc)
wash_prop_df <- (bind_rows(wash_prop)
%>% mutate(services = gsub("cat\\_hh", "", services)
, df = "wash"
)
)
## HH
hh_prop <- lapply(c("drinkwatersource_new", "garbagedisposal_new", "toilet_5plusyrs_new"), summaryFunc)
hh_prop_df <- (bind_rows(hh_prop)
%>% mutate(services = gsub("drink|\\_new", "", services)
, services = ifelse(grepl("toilet\\_", services), "toilettype", services)
, df = "hh"
)
)
### Combine wash and HH
prop_df <- bind_rows(wash_prop_df, hh_prop_df)
prop_df
## SB -> JD: Should the denominator be the number of HH?
prop_plot <- (ggplot(prop_df, aes(x = intvwyear, y = prop, colour = services, lty=df))
+ geom_point()
+ geom_line()
+ geom_hline(aes(yintercept = overall, colour = services, lty = df))
# + geom_text(aes(x = max(intvwyear)-1, y = overall, label = paste0("Overall = ", scales::percent(overall)))
# , vjust = -1.5
# , show.legend = FALSE
# )
+ labs(x = "Years"
, y = "Proportions of\nimproved services"
, colour = "Services"
)
+ scale_y_continuous(labels = percent, limits = c(0,1))
+ scale_colour_discrete(breaks = c("watersource"
, "garbagedisposal"
, "toilettype"
)
)
# + facet_wrap(~df)
+ theme(legend.position = "bottom"
, plot.title = element_text(hjust = 0.5)
)
)
print(prop_plot)
|
cc412d162fc3853eeddd63e4d44f83409284d736 | e5ebddef173d10c4722c68f0ac090e5ecc626b8b | /IL2/bin/pstat5-cd25-ratio.R | 9791de938b320ace8ba97c804c271b079182137e | [] | no_license | pontikos/PhD_Projects | 1179d8f84c1d7a5e3c07943e61699eb3d91316ad | fe5cf169d4624cb18bdd09281efcf16ca2a0e397 | refs/heads/master | 2021-05-30T09:43:11.106394 | 2016-01-27T15:14:37 | 2016-01-27T15:14:37 | 31,047,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 702 | r | pstat5-cd25-ratio.R | #!/usr/bin/env Rscript
suppressPackageStartupMessages(library("optparse"))
option_list <- list(
make_option(c('-f', "--fcs"), help = "fcsfile to parse")
)
OptionParser(option_list=option_list) -> option.parser
parse_args(option.parser) -> opt
suppressMessages(suppressWarnings(suppressPackageStartupMessages(library(flowCore, quietly=TRUE, verbose=FALSE, warn.conflicts=FALSE))))
trans <- logicleTransform()
suppressWarnings(read.FCS(opt$fcs, alter.names=TRUE)) -> f
cd25 <- ( f@exprs[,grep('cd25', f@parameters@data$desc, ignore.case=T)] )
pstat5 <- ( f@exprs[,grep('pstat5', f@parameters@data$desc, ignore.case=T)] )
r <- pstat5/cd25
cat( median(r, na.rm=T), sd(r, na.rm=T), '\n', sep=',' )
|
35bb51a2d72cff3a42f9d991390f00cba7e17950 | 2c88aced697d5ddc87f8421683e20aa99f70f96c | /plot1.R | f14b1b071aa9ad9e4566e0e5ef392b4c59e84017 | [] | no_license | VitorioProject/ExData_Plotting1 | 97c7bd05a3d1ccdc53bc1b6e9b7f5894ad58a377 | fc7444ff1b1db79f325fcc0c1c07b8d3d052c12c | refs/heads/master | 2020-12-03T03:33:01.804893 | 2015-05-07T21:17:26 | 2015-05-07T21:17:26 | 35,238,455 | 0 | 0 | null | 2015-05-07T19:08:56 | 2015-05-07T19:08:56 | null | UTF-8 | R | false | false | 358 | r | plot1.R | ## Reads the data from Internet, filters it and converts it to the format needed
if ( !exists("selData")) source('loadData.R')
## Creates plot1.png
png(filename = "plot1.png", width=480, height=480)
par(mfrow=c(1,1), new=F)
hist(selData$Global_active_power, xlab = "Global Active Power (Kilowatts)", main = "Global Active Power", col = "red")
dev.off() |
8945b405ba0b59b84b963d51441a17846d170e22 | 164cfd8fceaf6053f6a32d842c161272a4057dc1 | /R/fusteUnico.R | 41f2328ad7f35e0afcb9386e80ccabcd5f9173d4 | [] | no_license | adalardo/Rppsp | 10e505e33a30912f2d89345c1a0a4a94d04e69c1 | 5fca2cc87f55df930a06016cc84d64dda5352b48 | refs/heads/master | 2022-09-28T16:02:41.270959 | 2022-09-16T12:35:30 | 2022-09-16T12:35:30 | 41,364,041 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 705 | r | fusteUnico.R | fusteUnico <-
function(dados, fuste = "stemtag", tag= "tag", dap= "dbh", censos= c("01","02"), sufixo= ".mm")
{
namedbh=paste(dap, censos,sufixo, sep="")
new_namedbh= paste(dap, censos,".cm", sep="")
dap.cm <- aggregate(dados[,namedbh], list(tag=dados[,tag]), FUN= dap2cm)
dap.cm[dap.cm==0]<-NA
dados.cm<-dados[dados[,fuste] ==1, ]
mtag<-match(dados.cm[,tag], dap.cm[,tag])
dados.cm[,new_namedbh] <- dap.cm[mtag, namedbh]
dados.cm <- dados.cm[, !(names(dados.cm) %in% c(namedbh, "fuste")) ]
return(dados.cm)
}
####### calculate dap.cm from multistem dap.mm
dap2cm <- function(x)
{
ab <- sum(pi*(x/20)^2, na.rm=TRUE)
round((4* ab/pi)^(1/2), 2)
}
|
815444933cdb53424103d5a4230a046e6f8b75fc | 2b0e7454e2c87076f4f97d35000bf3426b7d9aaa | /man/stockID2name.Rd | 54d915e8e7b190557e102c6541550f9a864106a0 | [] | no_license | raphael210/QDataGet | 52df9d791d7d1d8933555dbdfa9d81e42558a5ee | 83531020e180fe8d07fdfa4a75413fd2b95cd6b4 | refs/heads/master | 2020-04-12T06:29:33.198718 | 2019-02-01T07:50:14 | 2019-02-01T07:50:14 | 64,194,185 | 0 | 5 | null | 2017-03-16T03:29:45 | 2016-07-26T06:00:12 | R | UTF-8 | R | false | true | 629 | rd | stockID2name.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pub02_dataGettingFuncs.R
\name{stockID2name}
\alias{stockID2name}
\title{stockID2name}
\usage{
stockID2name(stockID, datasrc = defaultDataSRC())
}
\arguments{
\item{stockID}{a vector}
}
\value{
a vector
}
\description{
stockID2name
}
\examples{
stockID2name("EQ000527")
}
\seealso{
Other SecuMain functions: \code{\link{SecuCategory}},
\code{\link{SecuMarket}}, \code{\link{stockID2stockID}},
\code{\link{stockID2tradeCode}},
\code{\link{stockName2ID}},
\code{\link{tradeCode2stockID}}, \code{\link{trday.IPO}},
\code{\link{trday.unlist}}
}
|
5dbdec3194668bacb509f1b0b1168c60af130a9d | c24dcd5fc08973fc0a55348293231f17d064d5f5 | /R/data.R | 1380ead207557a357c1770ee7d9283904041507c | [
"MIT"
] | permissive | sebastianueckert/vpc | 2b2ab1bb1186046ba9d0985d120a2f82ef5d882f | 1da3d4dc9a6d46dde36c31cd995f252e4b435798 | refs/heads/master | 2020-12-31T07:19:30.309908 | 2017-08-01T16:34:39 | 2017-08-01T16:34:39 | 86,567,724 | 0 | 0 | null | 2017-03-29T10:13:11 | 2017-03-29T10:13:10 | null | UTF-8 | R | false | false | 240 | r | data.R | #' A small rich dataset
#' @details
#' a list containing the obs and sim data for an example dataset to run a
#' simple vpc.
#' @examples
#' \dontrun{
#' vpc(simple_data$sim, simple_data$obs)
#' }
#' @docType data
#' @name simple_data
NULL |
c43500ee4a1ae55dec7d4638213a2d7d1d4ce678 | 8ee1326bab89b20e1bd63e839a701019f01e29ae | /man/mu.mbl.varknown.Rd | d70b5f59463187475ad774d928c6ab33a4be603d | [] | no_license | cran/SampleSizeMeans | f059c95aa13cb9b1af280f0178acbad1cb1ee766 | 8924ac8d89e8999707b3efd711bdc9336b104bf5 | refs/heads/master | 2023-02-25T10:55:10.170509 | 2023-02-02T12:10:06 | 2023-02-02T12:10:06 | 17,693,632 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,858 | rd | mu.mbl.varknown.Rd | \name{mu.mbl.varknown}
\alias{mu.mbl.varknown}
\title{Bayesian sample size determination for estimating a single normal mean with known variance using the Mixed Bayesian/Likelihood criteria}
\description{The function \code{mu.mbl.varknown} returns the required sample size
to reach a desired posterior credible interval length and coverage probability for a normal mean - using a mixed Bayesian/likelihood approach - when the variance is known.}
\usage{mu.mbl.varknown(len, lambda, level = 0.95)}
\arguments{
\item{len}{The desired total length of the posterior credible interval for the mean}
\item{lambda}{The known precision (reciprocal of variance)}
\item{level}{The desired coverage probability of the posterior credible interval (e.g., 0.95)}
}
\details{Assume that a sample will be collected in order to estimate
the mean of a normally distributed random variable with known precision \emph{lambda} (where the precision is the reciprocal of the variance).
The function \code{mu.mbl.varknown} returns the
required sample size to attain the desired length \emph{len} and
coverage probability \emph{level} for the posterior credible interval for the unknown mean.\cr\cr
This function uses a Mixed Bayesian/Likelihood (MBL) approach.
MBL approaches use the prior information to derive the predictive distribution of the data, but use only the likelihood function for final inferences.
This approach is intended to satisfy investigators who recognize that prior information is important for planning purposes but prefer to base final inferences only on the data.}
\note{The sample size returned by this function is exact.}
\value{The required sample size given the inputs to the function.}
\references{Joseph L, Belisle P.\cr
Bayesian sample size determination for Normal means and differences between Normal means\cr
The Statistician 1997;46(2):209-226.}
\author{Lawrence Joseph \email{lawrence.joseph@mcgill.ca} and Patrick Belisle}
\seealso{\code{\link{mu.mblacc}}, \code{\link{mu.mblalc}}, \code{\link{mu.mblmodwoc}}, \code{\link{mu.varknown}}, \code{\link{mu.acc}}, \code{\link{mu.alc}}, \code{\link{mu.modwoc}}, \code{\link{mu.freq}}, \code{\link{mudiff.mbl.varknown}}, \code{\link{mudiff.mblacc}}, \code{\link{mudiff.mblalc}}, \code{\link{mudiff.mblmodwoc}}, \code{\link{mudiff.mblacc.equalvar}}, \code{\link{mudiff.mblalc.equalvar}}, \code{\link{mudiff.mblmodwoc.equalvar}}, \code{\link{mudiff.varknown}}, \code{\link{mudiff.acc}}, \code{\link{mudiff.alc}}, \code{\link{mudiff.modwoc}}, \code{\link{mudiff.acc.equalvar}}, \code{\link{mudiff.alc.equalvar}}, \code{\link{mudiff.modwoc.equalvar}}, \code{\link{mudiff.freq}}}
\examples{mu.mbl.varknown(len=0.2, lambda=1/4)}
\keyword{design}
\concept{Bayesian sample size}
\concept{Normal mean}
\concept{Mixed Bayesian/Likelihood MBL}
\concept{known variances}
|
3b56e065cbe1373633ce4aaf43a7a0aa6cd614d7 | 2b872ebbe0f8537e41e99ce3c7fb31b4468e2d04 | /plot3.R | 8fe06e5aec66fd8c577af61d595c287cacb5b05f | [] | no_license | marcocaldascruz/ExData_Plotting1 | 025a98424fed045b5e08a3c19d26ad5961a91ff3 | 150569d0c568c0880a9c8d13ae4a95f065526eef | refs/heads/master | 2021-01-17T22:47:36.703041 | 2015-08-10T01:39:48 | 2015-08-10T01:39:48 | 40,441,662 | 0 | 0 | null | 2015-08-09T15:53:51 | 2015-08-09T15:53:49 | null | UTF-8 | R | false | false | 1,298 | r | plot3.R | #Read the data.
#data <- read.table("household_power_consumption.txt", sep=";", header=TRUE);
data_wk <- data;
data_wk$Date <- as.Date(data_wk$Date, "%d/%m/%Y");
filter_date <- (data_wk$Date == "2007-02-01" | data_wk$Date == "2007-02-02");
data_wk <- data_wk[filter_date, ];
#Convert to numeric the column of the Global Active Power.
data_wk$Global_active_power = as.numeric(data_wk$Global_active_power);
data_wk$Sub_metering_1 = as.numeric(data_wk$Sub_metering_1);
data_wk$Sub_metering_2 = as.numeric(data_wk$Sub_metering_2);
data_wk$Sub_metering_3 = as.numeric(data_wk$Sub_metering_3);
data_wk <- cbind(data_wk, DateTime=strptime(paste(data_wk$Date, data_wk$Time), "%Y-%m-%d %H:%M:%S"));
#Plot 3.
png(file="plot3.png", width=480, height=480);
Sub_metering_2_trf <- data_wk$Sub_metering_2;
Sub_metering_2_trf[Sub_metering_2_trf==14] <- 4;
par(lab=c(3,4,10));
plot(data_wk$DateTime, data_wk$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering", fg="black", ylim=c(0, 35));
lines(data_wk$DateTime, Sub_metering_2_trf, type="l", col="red", xlab="")
lines(data_wk$DateTime, data_wk$Sub_metering_3, type="l", col="blue", xlab="")
legend('topright', c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1, text.col="black");
dev.off();
|
a9df8c790d6b3bc40ea93ae855d884be9d4083b3 | 67efbed42d64fd5df2b3a9fb0242ff6c9205365f | /man/rdrobust_RDsenate.Rd | c760d775bbc83517e3c05ab714759f4d08826ef7 | [] | no_license | cran/rdrobust | 673a77c95a2af57e36d1aa695e732067d8bbe5a2 | 9ae83e4bb8f685d43cb19d604a6400eac2d33b38 | refs/heads/master | 2022-12-14T13:15:25.214049 | 2022-12-05T07:10:02 | 2022-12-05T07:10:02 | 20,614,735 | 4 | 5 | null | 2017-06-02T14:05:17 | 2014-06-08T10:44:29 | R | UTF-8 | R | false | false | 1,213 | rd | rdrobust_RDsenate.Rd | \name{rdrobust_RDsenate}
\alias{rdrobust_RDsenate}
\docType{data}
\title{RD Senate Data}
\description{
Extract of the dataset constructed by Cattaneo, Frandsen, and Titiunik (2015), which include measures of incumbency advantage in the U.S. Senate for the period 1914-2010.
}
\usage{data(rdrobust_RDsenate)}
\format{
A data frame with 1390 observations on the following 2 variables.
\describe{
\item{\code{margin}}{a numeric vector.}
\item{\code{vote}}{a numeric vector.}
}
}
\source{
Cattaneo, M. D., Frandsen, B., and R. Titiunik. 2015. \href{https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf}{Randomization Inference in the Regression Discontinuity Design: An Application to the Study of Party Advantages in the U.S. Senate.} \emph{Journal of Causal Inference} 3(1): 1-24.
}
\references{
Cattaneo, M. D., Frandsen, B., and R. Titiunik. 2015. \href{https://rdpackages.github.io/references/Cattaneo-Frandsen-Titiunik_2015_JCI.pdf}{Randomization Inference in the Regression Discontinuity Design: An Application to the Study of Party Advantages in the U.S. Senate.} \emph{Journal of Causal Inference} 3(1): 1-24.
}
\keyword{datasets}
|
0dea17f70dc66d4e825661dcbdba7b5a24a8abaa | fb21f877d876327542d0cee3fe9c625953cc30e5 | /man/dmix.Rd | c2dbf948f6ba5e9afbd78b93acd584187fbd49c1 | [] | no_license | cran/fitmix | 5553338ebcafeec3be109ac9c78b3b2634c3ef48 | 29879e03d17a407f3e3870e82bf5ace314d876ed | refs/heads/master | 2023-04-12T00:03:45.486129 | 2021-04-19T07:50:04 | 2021-04-19T07:50:04 | 359,513,558 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 879 | rd | dmix.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dmix.R
\name{dmix}
\alias{dmix}
\title{The mixture distribution}
\usage{
dmix(lifespan, model, K, param)
}
\arguments{
\item{lifespan}{Vector of samples}
\item{model}{choice of one of the mixture models; \code{gompertz}, \code{log-logistics}, \code{log-normal}, and \code{weibull}.}
\item{K}{number of components}
\item{param}{Vector of weight \eqn{\omega}, shape \eqn{\alpha}, and scale \eqn{\beta} parameters.}
}
\value{
A vector of the same length as lifespan data, given the pdf of the one of the mixture models computed at \code{lifespan}.
}
\description{
Computing probability density function for the well-known mixture models.
}
\examples{
lifespan<-seq(0,30,0.2)
K<-2
weight<-c(0.6,0.4)
alpha<-c(0.5,1)
beta<-c(1,0.5)
param<-c(weight,alpha,beta)
dmix(lifespan, "log-logistic", K, param)
}
|
1e017531e8b773a6155e6a8a41f6d08ca43f65f5 | 7fea3e7e2c7fa26b16d4580167207660e1f0c1fb | /Cost Savings plot script.R | 8bb01d059a3ef6255023fe320313ff0ed1096a1d | [] | no_license | CuceDough/Aim-3 | 4d003c144f827adc40e262996673714729fa1a48 | 6ade4016e26c07490ab1cd002759b23d5c777f27 | refs/heads/master | 2020-04-05T22:52:04.859321 | 2015-03-17T04:50:56 | 2015-03-17T04:50:56 | 32,021,240 | 0 | 0 | null | 2015-03-11T21:28:17 | 2015-03-11T14:12:10 | R | UTF-8 | R | false | false | 1,557 | r | Cost Savings plot script.R |
require(reshape2)
require(ggplot2)
require(colorbrewer)
### Plotting Cost savings
cost.dt <- data.table(read.csv("cost savings.csv")) #input
cost.melt <- melt(cost.dt, id.vars = "Vax", variable.name = "type" ) #melted
cost.melt$type <- cost.melt$type ==
#graph
ggplot(data=cost.melt, aes(x=Vax, y=value, colour=type)) + geom_line() + geom_point() + #
theme(axis.text.x = element_text(colour = 'black', angle = 90, size = 13, hjust = 0.5, vjust = 0.5),axis.title.x=element_blank()) +
ylab("Cost Savings in Millions, $)") + theme(axis.text.y = element_text(colour = 'black', size = 12), axis.title.y = element_text(size = 12, hjust = 0.5, vjust = 0.2)) +
theme(strip.text.y = element_text(size = 11, hjust = 0.5, vjust = 0.5, face = 'bold'))
ggplot(data=cost.melt, aes(x=Vax, y=value, colour=type)) + theme_bw() + geom_line(aes(group=factor(type)),size=2) + geom_point() +
ylab("Cost Savings in Millions, $") + xlab("Vaccination Rate in 5-18 year olds") +
theme(axis.title.y = element_text(size = rel(1.3), angle = 90, face = "bold")) + #works making font larger
theme(axis.title.x = element_text(size = rel(1.3), angle = 00, face = "bold")) + #works
theme(axis.text = element_text(size = rel(1.2), colour = "black")) + # text sixe for axis number
theme(legend.title = element_text(colour="Black", size=12, face="bold"))+
scale_color_discrete(name="Group") + #names legend
theme(legend.text=element_text(size=10, face= "bold")) + # makes size and font for legend
theme(legend.key = element_blank()) # gets rid of borders
|
d6adedc564dff98b7342547f279f7ed86d3f0c77 | c6391f5061c1e6f37d2d67bb636e61703a1da5e7 | /get_clean_data/run_analysis.R | 6007e97c5457e306ca161cc7e29f034804205e41 | [] | no_license | shukohChin/datasciencecoursera | d53ac9fd7e2e41d44671b38ffe0b4597266d117b | faffffaf8fb64bd62b06b4c47ba01f8d8b13a1bb | refs/heads/master | 2021-01-15T18:50:48.848961 | 2014-09-26T13:52:43 | 2014-09-26T13:52:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,739 | r | run_analysis.R | RunAnalysis <- function() {
# assume the UCI CHAR Dataset and R script to be in the working directory
# create a txt file of tidy data required
x.train <- read.table(paste0(getwd(), '/UCI HAR Dataset/train/X_train.txt'))
subject.train <- read.table(paste0(getwd(), '/UCI HAR Dataset/train/subject_train.txt'))
y.train <- read.table(paste0(getwd(), '/UCI HAR Dataset/train/Y_train.txt'))
x.test <- read.table(paste0(getwd(), '/UCI HAR Dataset/test/X_test.txt'))
subject.test <- read.table(paste0(getwd(), '/UCI HAR Dataset/test/subject_test.txt'))
y.test <- read.table(paste0(getwd(), '/UCI HAR Dataset/test/Y_test.txt'))
data <- rbind(x.train, x.test)
subject <- rbind(subject.train, subject.test)
activity <- rbind(y.train, y.test)
features <- read.table(paste0(getwd(), '/UCI HAR Dataset/features.txt'))
features$V3 <- gsub("[(),-]", "", features$V2)
colnames(data) <- features$V3
colnames(subject) <- c("subject")
colnames(activity) <- c("activity")
tidy.features <- grep("std|mean", features$V2)
tidy.features.data <- subset(data, select = names(data)[tidy.features])
result.data <- cbind(subject, activity, tidy.features.data)
activity.label <- read.table(paste0(getwd(), '/UCI HAR Dataset/activity_labels.txt'))
act.name.merged.data <- merge(activity.label, result.data, by.x = "V1", by.y = "activity", all = TRUE)
tidy.data <- subset(act.name.merged.data, select = -c(V1))
names(tidy.data)[names(tidy.data) == 'V2'] <- 'activity'
library(reshape2)
melt.tidy.data <- melt(tidy.data, id = c("subject","activity"))
cast.data <- dcast(melt.tidy.data, subject + activity ~ variable, fun.aggregate = mean)
write.table(cast.data, "tidyData.txt", sep = "\t")
}
|
d227ae1b52297da074fd6e7ef7cca99fd14c70a1 | c4539e51bf7495d84473e11e4a05ad80644c5335 | /ayesha_hargey/Biostatistics_Assignment_Code.R | 3d9d994c91aa055c3f8614169e968790af588d21 | [] | no_license | ahargey/Intro_R_UWC | a905b96e2d869def399e1d3976f26e5f57fbc65c | 33975eb73b968b2d471a2073cf0ca1a87067aa13 | refs/heads/master | 2020-04-19T09:20:05.965394 | 2019-05-22T11:45:43 | 2019-05-22T11:45:43 | 168,107,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,246 | r | Biostatistics_Assignment_Code.R | #AYESHA HARGEY
#3650393
#Biostatistics Assignment
#13 May 2019
#Egg morphology in the common cuckoo (Cuculus canorus)
#Load libraries
library(fitdistrplus)
library(tidyverse) #loaded second to prevent masking of the select function
library(ggpubr)
library(RColorBrewer) #for palettes
library(ggthemes)
library(logspline)
library(e1071)
library(corrplot)
library(forcats)
library(reshape2)
#Dataset chosen
if (!require(DAAG)) install.packages('DAAG')
library(DAAG) #datasets and functions used in the book
#"Data Analysis and Graphics Using R"
cuckoos <- DAAG::cuckoos #naming and selecting the dataset
#Explore the data
glimpse(cuckoos) #overall preview of data, shows every column
head(cuckoos) #first six rows
tail(cuckoos) #last six rows
nrow(cuckoos) #number of row
ncol(cuckoos) #number of columns
any(is.na(cuckoos)) #is there any missing data?
summary(cuckoos) #gives a summary of the mean, median, quartiles and min/max values
skewness(cuckoos$length) #skewness
#left-skewed / negative skewness
kurtosis(cuckoos$length) %>%
round(0)
#kurtosis is 0 which means data is normal
skewness(cuckoos$breadth) #skewness
#left-skewed / negative skewness
kurtosis(cuckoos$breadth) %>%
round(0)
#kurtosis is 0 which means data is normal
ggqqplot(cuckoos, x = "length") #checks for normality visually
ggqqplot(cuckoos, x = "breadth") #checks for normality visually
descdist(cuckoos$length, discrete = FALSE, boot = 100) #data is normal
descdist(cuckoos$breadth, discrete = FALSE, boot = 100) #data is normal
cuckoos %>% #test for homoscedasticity
group_by(species) %>%
summarise(length_var = var(length),
breadth_var = var(breadth))
#variance is not 2-4 times greater
#data is homoscedastic
cuckoos_stats <- cuckoos %>% #breakdown of general statistics
group_by(species) %>%
summarise(mean_len = mean(length, na.rm = TRUE), #mean
count = n(), #n
sd_len = sd(length, na.rm = TRUE), #standard deviation
sum_len = sum(length), #sum
min_len = min(length), #minimum
qrt1_len = quantile(length, p = 0.25), #q1
med_len = median(length), #median
qrt3_len = median(length, p = 0.75), #q3
max_len = max(length)) #max
cuckoos_stats
#Boxplot for egg length
cuckoo_boxplot_length <- ggplot(cuckoos, aes(x = fct_reorder(species, breadth, fun = median, .desc = TRUE), y = length)) +
geom_boxplot(aes(fill = fct_reorder(species, breadth, fun = median, .desc = TRUE))) + #reordered for efficency
scale_fill_manual(values = brewer.pal(6, "Accent"), guide = guide_legend(title = "Species"),
labels = c("Hedge Sparrow", "Meadow Pipit", "Tree Pipit","Pied Wagtail", "Robin","Wren")) + #palette
geom_jitter(position=position_jitter(0.2)) +
labs(x = "Species", y = "Length (mm)", title = "Cuckoo Egg Length") +
theme(axis.text.x = element_blank(), #custom theme
axis.text.y = element_text(hjust = 1, colour = "black", size=12),
plot.background = element_rect(fill = "#f0eae8"),
panel.background = element_rect(fill = "#ffffff", colour = "#C0C0C0",
size = 2, linetype = "solid"),
plot.title = element_text(size=16, face="bold", hjust=0.5))
cuckoo_boxplot_length #displays graph
#Boxplot for egg breadth
cuckoo_boxplot_breadth <- ggplot(cuckoos, aes(x = fct_reorder(species, length, fun = median, .desc = TRUE), y = breadth)) +
geom_boxplot(aes(fill = fct_reorder(species, length, fun = median, .desc = TRUE))) +
scale_fill_manual(values = brewer.pal(6, "Accent"), guide = guide_legend(title = "Species"),
labels = c("Hedge Sparrow", "Meadow Pipit", "Tree Pipit","Pied Wagtail", "Robin","Wren")) +
geom_jitter(position=position_jitter(0.2)) +
labs(x = "Species", y = "Breadth (mm)", title = "Cuckoo Egg Breadth") +
theme(axis.text.x = element_blank(), #custom theme
axis.text.y = element_text(hjust = 1, colour = "black", size=12),
plot.background = element_rect(fill = "#f0eae8"),
panel.background = element_rect(fill = "#ffffff", colour = "#C0C0C0",
size = 2, linetype = "solid"),
plot.title = element_text(size=16, face="bold", hjust=0.5))
cuckoo_boxplot_breadth
boxplots <- ggarrange(cuckoo_boxplot_length, cuckoo_boxplot_breadth, common.legend = TRUE, legend = "top")
#STATISTICAL ANALYSIS OF LENGTH
#TWO-SIDED ANOVA
#HYPOTHESIS
#H0: Species of host bird does NOT have an effect on the length of cuckoo eggs
#H1: Species of host bird does have an effect on the length of cuckoo eggs
cuckoo_anova_length <- aov(length ~ species, data = cuckoos)
summary(cuckoo_anova_length) #summary of anova results
#p is smaller than 0.05 which means there is a significant difference
#in order to determine where the difference a Tukey test is done
TK_length <- TukeyHSD(cuckoo_anova_length) #Tukey analysis
#turning the results into a dataframe to be visually analyzed
TK_length <- TukeyHSD(cuckoo_anova_length, "species", ordered = TRUE)
TKdatalength <- as.data.frame(TK_length$species, rownames = FALSE)
TKdatalength <- cbind(species = rownames(TKdatalength), TKdatalength)
rownames(TKdatalength) <- 1:nrow(TKdatalength) #making the index into a column
length_tk_bar <- ggplot(TKdatalength, aes(x = species, y = diff, fill = species)) + #visual depiction of Tukey
geom_bar(stat = "identity") +
labs(x = "Species", y = "Difference in Length") +
title("Tukey Analysis") +
theme(legend.position = "top",
axis.text.x = element_blank(),
axis.text.y = element_text(hjust = 1, colour = "black", size=12),
plot.background = element_rect(fill = "#f0eae8"),
panel.background = element_rect(fill = "#ffffff", colour = "#C0C0C0",
size = 2, linetype = "solid"))
length_tk_bar
#STATISTICAL ANALYSIS OF BREADTH
#TWO-SIDED ANOVA
#HYPOTHESIS
#H0: Species of host bird does NOT have an effect on the breadth of cuckoo eggs
#H1: Species of host bird does have an effect on the breadth of cuckoo eggs
cuckoo_anova_breadth <- aov(breadth ~ species, data = cuckoos)
summary(cuckoo_anova_breadth)
#p is smaller than 0.05 which means there is a significant difference
#in order to determine where the difference a Tukey test is done
TK_breadth <- TukeyHSD(cuckoo_anova_breadth, "species", ordered = TRUE)
TKdatabreadth <- as.data.frame(TK_breadth$species, rownames = FALSE)
TKdatabreadth <- cbind(species = rownames(TKdatabreadth), TKdatabreadth)
rownames(TKdatabreadth) <- 1:nrow(TKdatabreadth) #making the index into a column
breadth_tk_bar <- ggplot(TKdatabreadth, aes(x = species, y = diff, fill = species)) +
geom_bar(stat = "identity") +
labs(x = "Species", y = "Difference in Breadth") +
title("Tukey Analysis") +
theme(legend.position = "top",
axis.text.x = element_blank(),
axis.text.y = element_text(hjust = 1, colour = "black", size=12),
plot.background = element_rect(fill = "#f0eae8"),
panel.background = element_rect(fill = "#ffffff", colour = "#C0C0C0",
size = 2, linetype = "solid"))
breadth_tk_bar
TK_full <- ggarrange(length_tk_bar, breadth_tk_bar, common.legend = TRUE, legend = "top") #combined graph of Tukey analysis
TK_full
#CORRELATION
pearson_cuckoos <- cor.test(x = cuckoos$length, cuckoos$breadth) #correlation test
pearson_cuckoos #0.5 slightly strong
r_print <- paste0("r = 0.5")
correlation_cuckoos <- ggplot(data = cuckoos, aes(x = length, y = breadth)) +
geom_smooth(method = "lm", colour = "slategray2", se = F) +
geom_point(colour = "tomato2") +
geom_label(x = 20, y = 17.3, label = r_print) +
labs(x = "Egg length (mm)", y = "Egg breadth (mm)") +
theme_pubclean()
correlation_cuckoos #visual depiction of correlation
#CUCKOO EGG MATCHING
cuckoohosts <- DAAG::cuckoohosts #using dataframe from same study
cuckoohosts <- cbind(species = rownames(cuckoohosts), cuckoohosts)
rownames(cuckoohosts) <- 1:nrow(cuckoohosts)
cuckoohosts <- cuckoohosts %>%
slice(1:6) #delete other rows as they involve species not being discussed
#exploring dataset
#Explore the data
glimpse(cuckoohosts) #overall preview of data, shows every column
head(cuckoohosts) #first six rows
tail(cuckoohosts) #last six rows
nrow(cuckoohosts) #number of row
ncol(cuckoohosts) #number of columns
any(is.na(cuckoohosts)) #is there any missing data? YES.
summary(cuckoohosts) #gives a summary of the mean, median, quartiles and min/max values
#CUCKOO HOST MATCHING
cuckoohosts1 <- cuckoohosts #making a dataframe for graph creation
cuckoohosts1 <- cuckoohosts %>%
select(1, 12, 13)
bar_cuckoohosts <- melt(cuckoohosts1, id.vars='species') #adjusting data to fit idealized bar graph
bar_match <- ggplot(bar_cuckoohosts, aes(x = species, y = value, fill = variable)) +
geom_bar(stat = 'identity', position = 'dodge') + #bar plot of variables
scale_fill_manual(values = brewer.pal(2, "Accent"), guide = guide_legend(title = "Egg Matching"),
labels = c("Match", "Not a Match")) +
labs(x = "Species", y = "Amount of Eggs", title = "Matched Cuckoo Eggs") +
scale_x_discrete(labels = c("meadow.pipit" = "Meadow Pipit", "tree.pipit" = "Tree Pipit",
"hedge.sparrow" = "Hedge Sparrow", "wagtails" = "Pied Wagtail", "robin" = "Robin", "wren" = "Wren")) +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size = 12), #custom theme
axis.text.y = element_text(hjust = 1, colour = "black", size = 12),
plot.background = element_rect(fill = "#f0eae8"),
panel.background = element_rect(fill = "#ffffff", colour = "#C0C0C0",
size = 2, linetype = "solid"),
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
bar_match
#EGG NUMBERS
cuckoohosts2 <- cuckoohosts #making a dataframe for graph creation
cuckooeggnumbers <- cuckoohosts2 %>%
select(1, 6, 11) %>%
slice(1:5) #wren is deselected as there is an 'na' value
bar_cuckooeggs <- melt(cuckooeggnumbers, id.vars='species')
bar_eggnumber <- ggplot(bar_cuckooeggs, aes(x = species, y = value, fill = variable)) +
geom_bar(stat = 'identity', position = 'dodge') + #needs to be prettier +
scale_fill_manual(values = brewer.pal(2, "Set2"), guide = guide_legend(title = "Egg Numbers"),
labels = c("Cuckoo Eggs", "Host Eggs")) +
labs(x = "Species", y = "Amount of Eggs", title = "Cuckoo Eggs") +
scale_x_discrete(labels = c("meadow.pipit" = "Meadow Pipit", "tree.pipit" = "Tree Pipit",
"hedge.sparrow" = "Hedge Sparrow", "wagtails" = "Pied Wagtail", "robin" = "Robin")) +
theme(axis.text.x = element_text(angle = 40, hjust = 1, colour = "black", size = 12), #custom theme
axis.text.y = element_text(hjust = 1, colour = "black", size = 12),
plot.background = element_rect(fill = "#f0eae8"),
panel.background = element_rect(fill = "#ffffff", colour = "#C0C0C0",
size = 2, linetype = "solid"),
plot.title = element_text(size = 16, face = "bold", hjust = 0.5))
bar_eggnumber
### END ###
|
3fa92e94bfc1f3ab595dc2796e8ffd020cbe25df | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/coxrt/R/coxrt.R | a2a12db89ab0f389296810d48915cbcf62c61674 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,659 | r | coxrt.R | #' A Package to Fit the Cox Regression from Right Truncated Data
#'
#' The method assumes that truncation is independent of covariates,
#' and of lifetime, and that there is no censoring.
#' The method uses Inverse-Probability-Weighting estimating equations with stabilized weights,
#' IPW-S and IPW-SA, as described in Vakulenko-Lagun et al. (2018).
#' Currently the code allows only time-independent covariates.
#'
#' The \pkg{coxrt} package provides two functions:
#' \code{\link{coxph.RT}} (IPW-S) that assumes positivity
#' and \code{\link{coxph.RT.a0}} (IPW-SA) that allows
#' for adjustment of estimation using plugged-in \code{a0}.
#' The illustrative examples in these functions include analysis of AIDS
#' latency data with age as a covariate, where the AIDS cases were retrospectively
#' ascertained at June 30, 1986, and only those who developed AIDS by that time were
#' included in the analysis (Kalbfeisch and Lawless, 1989).
#'
#'
#' @references Vakulenko-Lagun, B., Mandel, M., Betensky, R.A. Inverse probability weighting methods for Cox regression with right-truncated data. 2019, submitted to \emph{Biometrics}
#' @references Kalbfeisch, J.D. and Lawless, J.F. Inference based on retrospective ascertainment: an analysis
#' of the data on transfusion-related AIDS. Journal of the American Statistical Association,
#' 84 (406):360-372, 1989.
#' @import survival
#' @import BB
#' @import inline
#' @import gss
#' @import ggplot2
#' @importFrom stats pnorm qnorm quantile sd var
#' @importFrom Rcpp evalCpp
#' @useDynLib coxrt, .registration = TRUE
#'
#' @docType package
#' @name coxrt
NULL
|
8ef60806e30f31ee041736e118b47b3b0383106d | 432f99ef4741d77e26ce625ab107de3116932314 | /avaliacao_risco_de_credito.R | 8ba8ce5d3af6c1409577019adc4fa43e833c2796 | [] | no_license | fthara/Avaliacao_de_risco_de_credito | 10b258872fa853538428620bea40bd4f409bce45 | 148cd148b183aed2c8bf0e210f0b5ce598415882 | refs/heads/master | 2022-11-13T05:15:46.030649 | 2020-07-11T18:28:04 | 2020-07-11T18:28:04 | 278,918,985 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,323 | r | avaliacao_risco_de_credito.R | getwd()
setwd("/Users/fernando/Google Drive/DSA/BigDataRAzure/Projetos/Risco_de_Credito")
df <- read.csv("credit_dataset.csv", header = TRUE, sep = ",")
str(df)
# O data frame foi lido com todas a variáveis como numéricas, mas a maioria delas
# é categóricas, vou mudando essas variáveis ao longo da análise exploratória.
# Verificação de valores missing
df[, is.na(df) == TRUE]
# Analisando a variável target (credit.racing)
library(ggplot2)
df$credit.rating <- as.factor(df$credit.rating)
ggplot(df, aes(x=credit.rating)) +
geom_bar(stat="count", width=0.7, fill=c("#9c0000", "#0000a2"))+
labs(x = "Análise de Crédito", y = "Quantidade",
title = "Concessão de Análise de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
table(df$credit.rating)
prop.table(table(df$credit.rating))
# Como podemos ver os dados da variável target estão bem desbalanceados, sendo 30% amostras de crédito não concedido e 70% de amostras com crédito cocedidos. Precisamos balancear esses daddos de forma que fique aproximadamente 50% cada um, mas isso só será feito após a criação das variáveis de treino e teste.
# Análise das variáveis para a predição.
# Acount.balance
# Tranformando a variável de numérica para fator
df$account.balance <- as.factor(df$account.balance)
# Quantidade de cada fator nessa variável
table(df$account.balance)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(account.balance, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2") ) +
labs(x = "Status da Conta", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Status da Conta x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Como podemos ver pelo gráfico, pessoas que tem algum montante na conta corrente têm mais chances de conseguir um crédito financeiro do que as demais, assim como pessoas com alguma conta aberta, porém sem saldo possuem mais chances de conseguir um crédito do que as pessas sem conta corrente.
# credit.duration.months
# Histograma da duração do empréstimo separado por créditos iagual a sim e não.
ggplot(df, aes(credit.duration.months, fill = credit.rating)) +
geom_histogram(alpha = 0.5, aes(y = ..density..), position = 'identity',
bins=7) +
scale_fill_manual(values = c("#9c0000", "#0000a2") )
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(as.factor(credit.duration.months), ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2") ) +
labs(x = "Duração do Empréstimo", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Duração do Empréstimo x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Como podemos ver nos 2 gráficos, a decisão de concessão de crédito financeiro muda e acordo com tempo do emprétimo, basicamente, quanto mais longo, mais difícil de se conseguir o crédito.
# A partir dessa variável será criada uma nova variável que irá juntar os valores acima em grupos, que eu acho que irá melhorar o cálculo do modelo preditivo.
# Criando a nova variável
df$fact.credit.duration.months<-findInterval(df$credit.duration.months,
c(0,6,12,18,24,30,36))
# Dando nomes as observações da variável.
library(dplyr)
df<-df %>%
mutate(fact.credit.duration.months=as.factor(fact.credit.duration.months))
levels(df$fact.credit.duration.months)<-c("Menos de 6","6 a 12", "12 a 18",
"18 a 24", "24 a 30", "30 a 36",
"mais de 36")
# Visualizando os primeiros dados das duas colunas.
head(df[, c("credit.duration.months", "fact.credit.duration.months")])
#Análise Gráfica
ggplot(df, aes(fact.credit.duration.months, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2") ) +
labs(x = "Duração do Empréstimo", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Duração do Empréstimo x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Agora podemos visualizar melhor a conclusão dita acima.
# previous.credit.payment.status
# Tranformando a variável de numérica para fator
df$previous.credit.payment.status <- as.factor(df$previous.credit.payment.status)
# Quantidade de cada fator nessa variável
table(df$previous.credit.payment.status)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(previous.credit.payment.status, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2") ) +
labs(x = "Histórico de Emprétimos", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Histórico de Emprétimos x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Como podemos ver pelo gráfico quem tem mais problemas históricos com empréstimos tende a ter menos chances de conseguir um novo crédito, como já era esperado.
# credit.purpose
# Tranformando a variável de numérica para fator
df$credit.purpose <- as.factor(df$credit.purpose)
# Quantidade de cada fator nessa variável
table(df$credit.purpose)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(credit.purpose, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Valor Emprestado", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Valor Emprestado x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Me parece que essa variável também influencia na concessão do crédito, mas acredito que não seja muito forte essa correlação.
# credit.amount
# Histograma do valor do emprétimo separado por créditos iaguais a sim e não.
ggplot(df, aes(credit.amount, fill = credit.rating)) +
geom_histogram(alpha = 0.5, aes(y = ..density..), position = 'identity',
bins=30) +
scale_fill_manual(values = c("#9c0000", "#0000a2") )
# Não consigo tirar uma idéia muito clara dessa variável, mas acredito que ela faz uma pequena diferença para valores maiores também.
# Para tentar visualizar melhor vou usar a mesma estratégia empregada na variável numérica anterior. Dividir em grupos e visualizar em um gráfico.
#Criando a variável fact.credit.amount
df$fact.credit.amount<-findInterval(df$credit.amount,
c(0, 2500, 5000, 10000))
df<-df %>%
mutate(fact.credit.amount=as.factor(fact.credit.amount))
levels(df$fact.credit.amount) <- c("Menos de 2500","2500 a 5000",
"5000 a 10000", "mais de 10000")
# Visualizando os primeiros dados das duas colunas.
head(df[, c("credit.amount", "fact.credit.amount")])
# Análise Gráfica
ggplot(df, aes(fact.credit.amount, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Valor Emprestado", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Valor Emprestado x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Agora sim podemos ter uma visão mais clara sobre essa variável. Quem solicita menos dinheiro, tem mais chance de receber o crédito concedido.
# savings
# Tranformando a variável de numérica para fator
df$savings <- as.factor(df$savings)
# Quantidade de cada fator nessa variável
table(df$savings)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(savings, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Valor na Poupança", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Valor na Poupança x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Como esperávamos quem tem mais dinheiro na poupança tem mais chance de ter o crédito concedido.
# employment.duration
# Tranformando a variável de numérica para fator
df$employment.duration <- as.factor(df$employment.duration)
# Quantidade de cada fator nessa variável
table(df$employment.duration)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(employment.duration, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Condição no Emprego", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Condição no Emprego x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Essa variável também influencia diretamente em na variável target. Quem está desempregado, ou a menos tempo no trabalho tem menos chance de ter o crédito concedido do que quem está a mais tempo empregado.
# installment.rate
# Tranformando a variável de numérica para fator
df$installment.rate <- as.factor(df$installment.rate)
# Quantidade de cada fator nessa variável
table(df$installment.rate)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(installment.rate, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Taxa de Renda Disponível", y = "Quantidade de Crédito Concedido",
fill = "Crédito",
title = "Taxa de Renda Disponível x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Esta é outra variável interessante, pois como vemos, quem tem mais renda disponível, tem tem mais chance de ter o crédito concedido.
# marital.status
# Tranformando a variável de numérica para fator
df$marital.status <- as.factor(df$marital.status)
# Quantidade de cada fator nessa variável
table(df$marital.status)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(marital.status, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Estado Civil", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Estado Civil x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Podemos ver nesse gráfico que homens casados / viúvos têm mais chance de ter o crédito concedido do que as outras categorias.
# guarantor
# Tranformando a variável de numérica para fator
df$guarantor <- as.factor(df$guarantor)
# Quantidade de cada fator nessa variável
table(df$guarantor)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(guarantor, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Fiador", y = "Quantidade de Crédito Concedido", fill = "Crédito", title = "Fiador x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Essa variável não parece fazer muita diferença para o modelo preditivo, pois além de termos poucas observações do tipo 2 se olharmos no gráfico a relação entre os dois tipos com a concessão de crédito é quase a mesma.
perct.guarantor <- group_by(df, guarantor) %>%
mutate(group_size = n()) %>%
group_by(guarantor, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.guarantor
# Apenas retificar o que eu disse acima fiz essa tabela para vermos como esses dados estão distribuidos em porcentagem.
# residence.duration
# Tranformando a variável de numérica para fator
df$residence.duration <- as.factor(df$residence.duration)
# Quantidade de cada fator nessa variável
table(df$residence.duration)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(residence.duration, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Tempo de Mordia", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Tempo de Mordia x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.residence.duration <- group_by(df, residence.duration) %>%
mutate(group_size = n()) %>%
group_by(residence.duration, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.residence.duration
# Conforme podemos ver no gráfico e na tabela, o percentual de chance de conseguir ou não crédito é quase o mesmo, independente do tempo em que a pessoa vive na sua residência. Portanto essa variável não será viável em meu modelo
# current.assets
# Quantidade de cada fator nessa variável
df$current.assets <- as.factor(df$current.assets)
# Quantidade de cada fator nessa variável
table(df$current.assets)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(current.assets, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Recursos Disponíveis", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Recursos Disponíveis x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.current.assets <- group_by(df, current.assets) %>%
mutate(group_size = n()) %>%
group_by(current.assets, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.current.assets
# Aparentemente quem é proprietário de uma casa tem menos chance de conseguir um empréstimo do que os outros. Talvez a hipoteca da casa seja um impeditivo.
# Age
# Histograma da idade separado por créditos igual a sim e não.
ggplot(df, aes(age, fill = credit.rating)) +
geom_histogram(alpha = 0.5, aes(y = ..density..), position = 'identity',
bins=10) +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Idade", y = "Frequência", fill = "Crédito",
title = "Histograma da Idade por Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Boxplot
ggplot(df, aes(x=credit.rating, y=age)) +
geom_boxplot() +
labs(x = "Idade", y = "Frequência",
fill = "Crédito", title = "Boxplot da Idade por Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Podemos ver pelo histograma e pelo boxplot que os mais jovens normalmente têm menos chance de conseguir um empréstimo.
# Vou criar a coluna de grupo de idade também para poder analisar melhor esses dados.
# Criando a variável fact.age
summary(df$age)
df$fact.age<-findInterval(df$age, c(18, 25, 33, 38, 45, 55))
df<-df %>%
mutate(fact.age=as.factor(fact.age))
levels(df$fact.age) <- c("Menos de 25", "25 a 33", "33 a 38", "38 a 45", "45 a 55",
"mais de 55")
# Visualizando os primeiros dados das duas colunas.
head(df[, c("age", "fact.age")])
# Análise Gráfica
ggplot(df, aes(fact.age, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Idade", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Idade x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Podemos perceber a relação entre a idade e a concessão de crédito de uma forma mais clara agora.
# other.credits
# Quantidade de cada fator nessa variável
df$other.credits <- as.factor(df$other.credits)
# Quantidade de cada fator nessa variável
table(df$other.credits)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(other.credits, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Outros Créditos", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Outros Créditos x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Pessoas que tem créditos em outros bancos têm mais dificuldade em conseguir o crédito.
# apartment.type
# Quantidade de cada fator nessa variável
df$apartment.type <- as.factor(df$apartment.type)
# Quantidade de cada fator nessa variável
table(df$apartment.type)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(apartment.type, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Tipo de Moradia", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Tipo de Moradia x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Essa variável também irá entrar em nosso modelo preditivo.
# bank.credits
# Quantidade de cada fator nessa variável
df$bank.credits <- as.factor(df$bank.credits)
# Quantidade de cada fator nessa variável
table(df$bank.credits)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(bank.credits, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Quantidade de Créditos ja Concedidos", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Quantidade de Créditos ja Concedidos x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.bank.credits <- group_by(df, bank.credits) %>%
mutate(group_size = n()) %>%
group_by(bank.credits, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.bank.credits
# A diferença no número de créditos ja concedidos é tão pouca que não vale a pena considerar no modelo preditivo.
# occupation
# Quantidade de cada fator nessa variável
df$occupation <- as.factor(df$occupation)
# Quantidade de cada fator nessa variável
table(df$occupation)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(occupation, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Ocupação", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Ocupação x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.occupation <- group_by(df, occupation) %>%
mutate(group_size = n()) %>%
group_by(occupation, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.occupation
# A diferença percentual de cada categoria é bem pouca, acho que não compensa utilizar essa variável em nosso algoritmo.
# dependents
# Quantidade de cada fator nessa variável
df$dependents <- as.factor(df$dependents)
# Quantidade de cada fator nessa variável
table(df$dependents)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(dependents, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Dependentes", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Dependentes x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.dependents <- group_by(df, dependents) %>%
mutate(group_size = n()) %>%
group_by(dependents, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.dependents
# Essa variável também não compensa se utilizada em nosso modelo preditivo.
# telephone
# Quantidade de cada fator nessa variável
df$telephone <- as.factor(df$telephone)
# Quantidade de cada fator nessa variável
table(df$telephone)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(telephone, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Telefone", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Telefone x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.telephone <- group_by(df, telephone) %>%
mutate(group_size = n()) %>%
group_by(telephone, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.telephone
# A existência de telefone também não muda muito em relação à concessão ou não de crédito.
# foreign.worker
# Quantidade de cada fator nessa variável
df$foreign.worker <- as.factor(df$foreign.worker)
# Quantidade de cada fator nessa variável
table(df$foreign.worker)
# Gráfico da contagem da variável por crédito financeiro concedido ou não.
ggplot(df, aes(foreign.worker, ..count..)) +
geom_bar(aes(fill = credit.rating), position = "dodge") +
scale_fill_manual(values = c("#9c0000", "#0000a2")) +
labs(x = "Trabalha Fora", y = "Quantidade de Crédito Concedido",
fill = "Crédito", title = "Trabalha Fora x Concessão de Crédito") +
theme(plot.title = element_text(hjust = 0.5))
# Verificando o percentual das variáveis
perct.foreign.worker <- group_by(df, foreign.worker) %>%
mutate(group_size = n()) %>%
group_by(foreign.worker, credit.rating) %>%
summarise(perc = (n()/max(group_size)*100))
perct.foreign.worker
# Como podemos ver pessoas que trabalham e moram na mesma cidade têm mais chances de conseguir um empréstimo, mas a diferença de coletas de um de outro está muito grande. Do mesmo jeito irei usar essa variável em meu primeiro modelo.
require(caTools)
#Criando uma seed
set.seed(123)
#Dividindo o data frame em treino e teste.
sample = sample.split(df, SplitRatio = 0.70)
train = subset(df, sample ==TRUE)
test = subset(df, sample==FALSE)
#Verificando o número de linhas de cada data frame
nrow(train)
nrow(test)
# Agora é chegou a hora de rodar os algoritmos de modelo preditivo.
# Vamos começar treinando o modelo com as variáveis que eu achei mais interessantes
# durante a fase de análise.
formula_v1 <- as.formula('credit.rating ~ account.balance +
fact.credit.duration.months +
previous.credit.payment.status + credit.purpose +
fact.credit.amount + savings + employment.duration +
installment.rate + marital.status + current.assets +
fact.age + other.credits + apartment.type +
foreign.worker')
# Treinando o modelo com o algoritmo de regressão logística
model_glm_v1 <- glm(formula = formula_v1, data = train, family = "binomial")
# Verificando alguns resultados do modelo treinado
summary(model_glm_v1)
# Realizando a predição com o modelo treinado
pred_glm_v1 <- predict(model_glm_v1, test, type="response")
# Arredondando para 0 ou 1
pred_glm_v1 <- round(pred_glm_v1)
#Confusion Matrix da predição.
library(caret)
confusionMatrix(table(data = pred_glm_v1, reference = test$credit.rating),
positive = '1')
# A regressão logística nos entregou um bom resultado, mas vamos verificar como esses dados se comportam com outros modelos.
## Criando o modelo com o algoritmo Árvore de Decissão
library(C50)
modelo_tree_v1 = C5.0(formula_v1, data = train)
# Previsões nos dados de teste
pred_tree_v1 = predict(modelo_tree_v1, test, type='class')
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_tree_v1, positive = '1')
# Este modelo teve um desempenho pouco pior do que o modelo de regessão
# logística
# Criando o modelo com o algoritmo SVM (Suport Vector Machine)
library(e1071)
modelo_svm_v1 <- svm(formula_v1, data = train,
type = 'C-classification', kernel = 'radial')
# Previsões nos dados de teste
pred_svm_v1 = predict(modelo_svm_v1, test)
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_svm_v1, positive = '1')
# O modelo de regressão logística ainda está se saindo melhor por enquanto.
# Criando o modelo com o algoritmo Random Forest
library(rpart)
modelo_rf_v1 = rpart(formula_v1, data = train, control = rpart.control(cp = .0005))
# Previsões nos dados de teste
pred_rf_v1 = predict(modelo_rf_v1, test, type='class')
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_rf_v1, positive = '1')
# Esse foi o pior resultado até o momento.
# Criando o modelo com o algoritmo Naive Bayes
model_nb_v1 = naiveBayes(formula_v1, data=train)
# Previsões nos dados de teste
pred_nb_v1 <- predict(model_nb_v1, newdata=test)
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_nb_v1, positive = '1')
# Este foi o melhor resultado encontrado.
#Antes temos que normalizar os dados numéricos que ainda não foram utilizados.
normaliza_dados <- function(df, var){
for(v in var)
df[[v]] <- scale(df[[v]], center=T, scale=T)
return(df)
}
var <- c('credit.duration.months', 'credit.amount', 'age')
df<- normaliza_dados(df, var)
# Atualizando train e test
train = subset(df, sample ==TRUE)
test = subset(df, sample==FALSE)
# Feature Selection com o Random Forest
require(randomForest)
model_rf_imp_var <- randomForest(credit.rating ~ ., data = df, ntree = 100,
nodesize = 10, importance = TRUE)
# Plotando as variáveis por grau de importância
varImpPlot(model_rf_imp_var)
# Neste gráfico podemos ver as variáveis mais relevantes na predição do modelo.
# Vamos utilizar o modelo as primeiras 10 variaveis do modelo de random forest para treinar o data frame novamente.
formula_v2 <- as.formula('credit.rating ~ account.balance +
previous.credit.payment.status + savings +
fact.credit.duration.months + credit.duration.months +
age + credit.amount + bank.credits + fact.credit.amount +
other.credits')
# Criando o modelo com o algoritmo Naive Bayes
model_nb_v2 = naiveBayes(formula_v2, data=train)
# Previsões nos dados de teste
pred_nb_v2 <- predict(model_nb_v2, newdata=test)
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_nb_v2, positive = '1')
# O primeiro modelo está com um melhor desempenho até o momento.
#Vamos utilizar as primeiras 15 variáveis agora.
formula_v3 <- as.formula('credit.rating ~ account.balance +
previous.credit.payment.status + savings +
fact.credit.duration.months + credit.duration.months +
age + credit.amount + bank.credits + fact.credit.amount +
other.credits + guarantor + employment.duration +
installment.rate + current.assets + residence.duration')
# Criando o modelo com o algoritmo Naive Bayes
model_nb_v3 = naiveBayes(formula_v3, data=train)
# Previsões nos dados de teste
pred_nb_v3 <- predict(model_nb_v3, newdata=test)
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_nb_v3, positive = '1')
# Obtivemos a mesma acuracia do primeiro modelo, vamos utilizar todas as varuáveis agora para ver como o modelo se comporta.
formula_v4 <- as.formula('credit.rating ~ .')
# Criando o modelo com o algoritmo Naive Bayes
model_nb_v4 = naiveBayes(formula_v4, data=train)
# Previsões nos dados de teste
pred_nb_v4 <- predict(model_nb_v4, newdata=test)
# Confusion Matrix
confusionMatrix(test$credit.rating, pred_nb_v4, positive = '1')
# Antes de fazer o balanceamento precisamos transformar as variáveis que foram normalizadqos para o tipo numérico, pois durante a normalização eles ficaram em um tipo de fator numérico.
train$credit.duration.months <- as.numeric(train$credit.duration.months)
train$credit.amount <- as.numeric(train$credit.amount )
train$age <- as.numeric(train$age )
test$credit.duration.months <- as.numeric(test$credit.duration.months)
test$credit.amount <- as.numeric(test$credit.amount )
test$age <- as.numeric(test$age )
library(ROSE)
# Balanceando os dados
# ROSE nos dados de treino
rose_train <- ROSE(credit.rating ~ ., data = train, seed = 1)$data
prop.table(table(rose_train$credit.rating))
# ROSE nos dados de teste
rose_test <- ROSE(credit.rating ~ ., data = test, seed = 1)$data
prop.table(table(rose_test$credit.rating))
# Criando o modelo com o algoritmo Naive Bayes
model_nb_v5 = naiveBayes(formula_v3, data=rose_train)
# Previsões nos dados de teste
pred_nb_v5 <- predict(model_nb_v5, newdata=rose_test)
# Confusion Matrix
confusionMatrix(rose_test$credit.rating, pred_nb_v5, positive = '1')
# Curva roc para o modelo_v3 (balanceado)
roc.curve(test$credit.rating, pred_nb_v2, plotit = T, col = "red")
# Curva roc para o modelo_v5 (desbalanceado)
roc.curve(rose_test$credit.rating, pred_nb_v5, plotit = T,
col = "green", add.roc = T)
|
69ae9c9fc1fcc32f3484bdc7b7c9de007ccc272b | 85593c2102ef58eb8e6d5e34c94695e0e80f7fd0 | /Data_Cleaning/dataset.R | b233b8feb68fe03e53676391150fedd27228bfce | [] | no_license | Gilda95/Intersectionality-Bayesian-Analysis | 84b0b1e7b545954b14dece1c7521eacec953dfb8 | 226044692b24bd7a312b8936891f20ebeb1c057f | refs/heads/main | 2023-03-06T15:12:21.962042 | 2021-02-17T00:30:35 | 2021-02-17T00:30:35 | 338,579,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,460 | r | dataset.R | # - - - - - - - - - - DATASET - - - - - - - - - - #
#### loading libraries ####
library(haven)
#### loading dataset ####
# You should download this data from https://www.europeansocialsurvey.org/
# (round 9, 2018 year)
dataset <- read_sas('ESS9e02/ess9e02.sas7bdat', NULL)
#### selecting variables of interest ####
# DEMOGRAPHIC - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# idno
# agea: Age
# gndr: Gender
# cntry: Country
# ctzcntr: Citizen of country
# ctzshipd: Citizenship
# cntbrthd: Country of birth
# brncntr: Born in country
# blgetmg: Belong to minority ethnic group in country
# FAMILY & EDUCATION - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# isco08: Occupation, ISCO08
# isco08p Occupation partner, ISCO08
# domicil: Domicile, respondent's description
# nbthcld: Number of children ever given birth to/ fathered
# fcldbrn: Year (first) child was born
# eisced: Highest level of education, ES - ISCED
# edulvlb: Highest level of education
# edulvlpb: Highest level of education for partner
# WORK - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# pdwrk: Doing last 7 days: paid work
# fvgabc
# infqbst: Your [pay/pensions/social benefits], which frequency do you know best
# grspnum: What is your usual [weekly/monthly/annual] gross pay
# grsplet: Which letter describes your gross pay
# mnactic: Main activity, last 7 days. All respondents. Post coded
# wkhct: Total contracted hours per week in main job overtime excluded
# DATASET INFO - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# anweight: Analysis weight
# stratum: Sampling stratum
# psu Primary: sampling unit
var.sel <- c('idno', 'agea', 'gndr', 'cntry', 'ctzcntr', 'ctzshipd', 'cntbrthd', 'brncntr', 'blgetmg',
'isco08', 'isco08p', 'domicil', 'nbthcld', 'fcldbrn', 'edulvlb', 'edulvlpb', 'pdwrkp',
'pdwrk', 'fvgabc', 'infqbst', 'grspnum', 'grsplet', 'mnactic', 'wkhct',
'anweight', 'psu', 'stratum')
data.sel <- dataset[, var.sel]
# selecting people with age between 25 and 55
data.sel <- data.sel[(data.sel$agea<=55) & (data.sel$agea>=25), ]
rm(var.sel)
#### uniformizing 6, 7, 8, 9 (not answer and/or cannot answer) ####
# 6, ... of ctzcntr
data.sel$ctzcntr[data.sel$ctzcntr == 6] <- 6666666666
data.sel$ctzcntr[data.sel$ctzcntr == 7] <- 7777777777
data.sel$ctzcntr[data.sel$ctzcntr == 8] <- 8888888888
data.sel$ctzcntr[data.sel$ctzcntr == 9] <- 9999999999
# 6666, ... of ctzshipd
data.sel$ctzshipd[data.sel$ctzshipd == 6666] <- 6666666666
data.sel$ctzshipd[data.sel$ctzshipd == 7777] <- 7777777777
data.sel$ctzshipd[data.sel$ctzshipd == 8888] <- 8888888888
data.sel$ctzshipd[data.sel$ctzshipd == 9999] <- 9999999999
# 6666, ... of cntbrthd
data.sel$cntbrthd[data.sel$cntbrthd == 6666] <- 6666666666
data.sel$cntbrthd[data.sel$cntbrthd == 7777] <- 7777777777
data.sel$cntbrthd[data.sel$cntbrthd == 8888] <- 8888888888
data.sel$cntbrthd[data.sel$cntbrthd == 9999] <- 9999999999
# 6, ... of brncntr
data.sel$brncntr[data.sel$brncntr == 6] <- 6666666666
data.sel$brncntr[data.sel$brncntr == 7] <- 7777777777
data.sel$brncntr[data.sel$brncntr == 8] <- 8888888888
data.sel$brncntr[data.sel$brncntr == 9] <- 9999999999
# 6, ... of blgetmg
data.sel$blgetmg[data.sel$blgetmg == 6] <- 6666666666
data.sel$blgetmg[data.sel$blgetmg == 7] <- 7777777777
data.sel$blgetmg[data.sel$blgetmg == 8] <- 8888888888
data.sel$blgetmg[data.sel$blgetmg == 9] <- 9999999999
# 6, ... of isco08
data.sel$isco08[data.sel$isco08 == 66666] <- 6666666666
data.sel$isco08[data.sel$isco08 == 77777] <- 7777777777
data.sel$isco08[data.sel$isco08 == 88888] <- 8888888888
data.sel$isco08[data.sel$isco08 == 99999] <- 9999999999
# 6, ... of isco08p
data.sel$isco08p[data.sel$isco08p == 66666] <- 6666666666
data.sel$isco08p[data.sel$isco08p == 77777] <- 7777777777
data.sel$isco08p[data.sel$isco08p == 88888] <- 8888888888
data.sel$isco08p[data.sel$isco08p == 99999] <- 9999999999
# 6, of domicil
data.sel$domicil[data.sel$domicil == 6] <- 6666666666
data.sel$domicil[data.sel$domicil == 7] <- 7777777777
data.sel$domicil[data.sel$domicil == 8] <- 8888888888
data.sel$domicil[data.sel$domicil == 9] <- 9999999999
# 6, ... of nbthcld
data.sel$nbthcld[data.sel$nbthcld == 66] <- 6666666666
data.sel$nbthcld[data.sel$nbthcld == 77] <- 7777777777
data.sel$nbthcld[data.sel$nbthcld == 88] <- 8888888888
data.sel$nbthcld[data.sel$nbthcld == 99] <- 9999999999
# 6666, ... of fcldbrn
data.sel$fcldbrn[data.sel$fcldbrn == 6666] <- 6666666666
data.sel$fcldbrn[data.sel$fcldbrn == 7777] <- 7777777777
data.sel$fcldbrn[data.sel$fcldbrn == 8888] <- 8888888888
data.sel$fcldbrn[data.sel$fcldbrn == 9999] <- 9999999999
# 6, ... of fvgabc
data.sel$fvgabc[data.sel$fvgabc == 6] <- 6666666666
data.sel$fvgabc[data.sel$fvgabc == 7] <- 7777777777
data.sel$fvgabc[data.sel$fvgabc == 8] <- 8888888888
data.sel$fvgabc[data.sel$fvgabc == 9] <- 9999999999
# 6 of infqbst
data.sel$infqbst[data.sel$infqbst == 6] <- 6666666666
data.sel$infqbst[data.sel$infqbst == 7] <- 7777777777
data.sel$infqbst[data.sel$infqbst == 8] <- 8888888888
data.sel$infqbst[data.sel$infqbst == 9] <- 9999999999
# 666666666, ... of grspnum
data.sel$grspnum[data.sel$grspnum == 666666666] <- 6666666666
data.sel$grspnum[data.sel$grspnum == 777777777] <- 7777777777
data.sel$grspnum[data.sel$grspnum == 888888888] <- 8888888888
data.sel$grspnum[data.sel$grspnum == 999999999] <- 9999999999
# 66, ... of grsplet
data.sel$grsplet[data.sel$grsplet == 66] <- 6666666666
data.sel$grsplet[data.sel$grsplet == 77] <- 7777777777
data.sel$grsplet[data.sel$grsplet == 88] <- 8888888888
data.sel$grsplet[data.sel$grsplet == 99] <- 9999999999
# 66 of mnactic
data.sel$mnactic[data.sel$mnactic == 66] <- 6666666666
data.sel$mnactic[data.sel$mnactic == 77] <- 7777777777
data.sel$mnactic[data.sel$mnactic == 88] <- 8888888888
data.sel$mnactic[data.sel$mnactic == 99] <- 9999999999
# 6, ... of wkhct
data.sel$wkhct[data.sel$wkhct == 666] <- 6666666666
data.sel$wkhct[data.sel$wkhct == 777] <- 7777777777
data.sel$wkhct[data.sel$wkhct == 888] <- 8888888888
data.sel$wkhct[data.sel$wkhct == 999] <- 9999999999
#### make income annual ####
cond <- (data.sel$grspnum != 6666666666) & (data.sel$grspnum != 7777777777) &
(data.sel$grspnum != 8888888888) & (data.sel$grspnum != 9999999999)
data.sel$grspnum[(data.sel$infqbst == 1) & cond] <- data.sel$grspnum[(data.sel$infqbst == 1) & cond]*52 # weekly
data.sel$grspnum[(data.sel$infqbst == 2) & cond] <- data.sel$grspnum[(data.sel$infqbst == 2) & cond]*12 # monthly
#### convert income in euro ####
curr.cz <- (25.855 + 25.701 + 25.477) / 3 # mean of the currency exchange in 2018
curr.pl <- mean(4.3060, 4.3029, 4.2905, 4.2954, 4.3157) # mean of the currency exchange in 2018
curr.rs <- 100/0.87 # mean of the currency exchange in 2018
data.sel$grspnum[(data.sel$cntry == 'BG') & cond] <- data.sel$grspnum[(data.sel$cntry == 'BG') & cond]/1.95583 # BULGARIA BG
data.sel$grspnum[(data.sel$cntry == 'HR') & cond] <- data.sel$grspnum[(data.sel$cntry == 'HR') & cond]/7.44 # CROATIA HR
data.sel$grspnum[(data.sel$cntry == 'CZ') & cond] <- data.sel$grspnum[(data.sel$cntry == 'CZ') & cond]/curr.cz # CZECHIA CZ
data.sel$grspnum[(data.sel$cntry == 'HU') & cond] <- data.sel$grspnum[(data.sel$cntry == 'HU') & cond]/322 # HUNGARY HU
data.sel$grspnum[(data.sel$cntry == 'NO') & cond] <- data.sel$grspnum[(data.sel$cntry == 'NO') & cond]/9.63 # NORWAY NO
data.sel$grspnum[(data.sel$cntry == 'PL') & cond] <- data.sel$grspnum[(data.sel$cntry == 'PL') & cond]/curr.pl # POLAND PL
data.sel$grspnum[(data.sel$cntry == 'RS') & cond] <- data.sel$grspnum[(data.sel$cntry == 'RS') & cond]/curr.rs # SERBIA RS
data.sel$grspnum[(data.sel$cntry == 'SE') & cond] <- data.sel$grspnum[(data.sel$cntry == 'SE') & cond]/10.17 # SWEDEN SE
data.sel$grspnum[(data.sel$cntry == 'CH') & cond] <- data.sel$grspnum[(data.sel$cntry == 'CH') & cond]/1.14 # SWITZERLAND CH
data.sel$grspnum[(data.sel$cntry == 'GB') & cond] <- data.sel$grspnum[(data.sel$cntry == 'GB') & cond]/1.12 # UNITED KINDOM GB
rm(curr.cz, curr.pl, curr.rs, cond)
#### renomination fvgabc ####
data.sel$fvgabc[data.sel$fvgabc == 1] <- 'Pay'
data.sel$fvgabc[data.sel$fvgabc == 2] <- 'Pension'
data.sel$fvgabc[data.sel$fvgabc == 3] <- 'Social benefits'
#### renomination gender #####
data.sel$gndr[data.sel$gndr == 1] <- 'male'
data.sel$gndr[data.sel$gndr == 2] <- 'female'
#### aggregation isco08 ####
for (i in 0:9) {
data.sel$isco08[(data.sel$isco08 >= i*1000) & (data.sel$isco08 < (i+1)*1000)] <- i
}
data.sel$isco08[data.sel$isco08 == 0] <- 'military'
data.sel$isco08[data.sel$isco08 == 1] <- 'manager'
data.sel$isco08[data.sel$isco08 == 2] <- 'professionals and employees'
data.sel$isco08[data.sel$isco08 == 3] <- 'professionals and employees'
data.sel$isco08[data.sel$isco08 == 4] <- 'professionals and employees'
data.sel$isco08[data.sel$isco08 == 5] <- 'services and sales workers'
data.sel$isco08[data.sel$isco08 == 6] <- 'skilled manual workers'
data.sel$isco08[data.sel$isco08 == 7] <- 'skilled manual workers'
data.sel$isco08[data.sel$isco08 == 8] <- 'skilled manual workers'
data.sel$isco08[data.sel$isco08 == 9] <- 'elementary occupations'
#### aggregation isco08p ####
for (i in 0:9) {
data.sel$isco08p[(data.sel$isco08p >= i*1000) & (data.sel$isco08p < (i+1)*1000)] <- i
}
data.sel$isco08p[data.sel$isco08p == 0] <- 'military'
data.sel$isco08p[data.sel$isco08p == 1] <- 'manager'
data.sel$isco08p[data.sel$isco08p == 2] <- 'professionals and employees'
data.sel$isco08p[data.sel$isco08p == 3] <- 'professionals and employees'
data.sel$isco08p[data.sel$isco08p == 4] <- 'professionals and employees'
data.sel$isco08p[data.sel$isco08p == 5] <- 'services and sales workers'
data.sel$isco08p[data.sel$isco08p == 6] <- 'skilled manual workers'
data.sel$isco08p[data.sel$isco08p == 7] <- 'skilled manual workers'
data.sel$isco08p[data.sel$isco08p == 8] <- 'skilled manual workers'
data.sel$isco08p[data.sel$isco08p == 9] <- 'elementary occupations'
#### renomination ethnic minorities ####
data.sel$blgetmg[data.sel$blgetmg == 1] <- 'yes'
data.sel$blgetmg[data.sel$blgetmg == 2] <- 'no'
#### create variable for citizenship ####
data.sel$ctzmod <- NA
data.sel$ctzmod[(data.sel$ctzcntr == 1) & (data.sel$brncntr == 1) & (data.sel$blgetmg == 'no')] <- 'autochthonous'
data.sel$ctzmod[(data.sel$ctzcntr == 1) & (data.sel$brncntr == 2) & (data.sel$blgetmg == 'no')] <- 'native'
data.sel$ctzmod[((data.sel$ctzcntr == 2) & (data.sel$brncntr == 1)) | ((data.sel$ctzcntr == 1) & (data.sel$blgetmg == 'yes'))] <- 'ethnic minorities'
data.sel$ctzmod[(data.sel$ctzcntr == 2) & (data.sel$brncntr == 2)] <- 'immigrant'
#### create variable for born country ####
data.sel$brnmod <- NA
data.sel$cntbrthd[data.sel$brncntr == 1] <- data.sel$cntry[data.sel$brncntr == 1]
# aggregating country of origin
rich <- c('AU', '53', 'AT', 'BE', 'CA', 'CL', 'CO', 'CZ', 'DK', 'EE', 'FI', 'FR',
'DE', 'GR', 'HU', 'IS', 'IE', 'IL', 'IT', 'JP', 'KP', 'KR', 'LV', 'LT',
'LU', 'MX', 'NL', 'NZ', 'NO', 'PL', 'PT', 'SK', 'SI', 'ES', 'SE', 'CH',
'TR', 'GB', 'US', 'MC', '1000', '3000')
africa <- c("11", "14", "15", "17", "18", "2", "202", "AO", "BF", "BI", "BJ", "BW",
"CD", "CF", "CG", "CI", "CM", "CV", "DJ", "DZ", "EG", "EH", "ER", "ET",
"GA", "GH", "GM", "GN", "GQ", "GW", "KE", "KM", "LR", "LS", "LY", "MA",
"MG", "ML", "MR", "MU", "MW", "MZ", "NA", "NE", "NG", "RE", "RW", "SC",
"SD", "SH", "SL", "SN", "SO", "SS", "ST", "SZ", "TD", "TG", "TN", "TZ",
"UG", "YT", "ZA", "ZM", "ZW")
america <- c("13", "19", "21", "29", "419", "5", "AG", "AI", "AR", "AW", "BB", "BL",
"BM", "BO", "BQ", "BR", "BS", "BZ", "CR", "CU", "CW", "DM", "DO", "EC",
"FK", "GD", "GF", "GL", "GP", "GT", "GY", "HN", "JM", "KN", "KY", "LC",
"MF", "MQ", "MS", "NI", "PA", "PE", "PM", "PR", "PY", "SR", "SV", "SX",
"TC", "TT", "UM", "UY", "VC", "VE", "VG", "VI")
asia <- c("142", "143", "145", "30", "34", "35", "5000", "AE", "AF", "AM", "BD",
"BH", "BN", "BT", "CC", "CN", "CX", "HK", "HT", "ID", "IN", "IO", "IQ",
"IR", "JO", "KG", "KH", "KW", "LA", "LB", "LK", "MM", "MN", "MO", "MV",
"MY", "NP", "OM", "PH", "PK", "PS", "QA", "SA", "SG", "SY", "TH", "TJ",
"TL", "TM", "TW", "UZ", "VN", "YE")
europe <- c("150", "151", "154", "155", "39", "4000", "6000", "AD", "AL", "AX", "BA",
"BG", "BY", "CY", "FO", "GG", "GI", "HR", "IM", "JE", "LI", "MD", "ME",
"MK", "MT", "RO", "RS", "SJ", "SM", "UA", "VA", "XK")
oceania <- c("54", "57", "61", "9", "AS", "CK", "FJ", "FM", "GU", "KI", "MH", "MP", "NC",
"NF", "NR", "NU", "PF", "PG", "PN", "PW", "SB", "TK", "TO", "TW", "VU", "WF",
"WS")
antartica <- c('AQ', 'GS', 'HM', 'TF')
for (i in rich) {
data.sel$brnmod[data.sel$cntbrthd == i] <- 'Rich'
}
for (i in africa){
data.sel$brnmod[data.sel$cntbrthd == i] <- 'Africa'
}
for (i in america){
data.sel$brnmod[data.sel$cntbrthd == i] <- 'America'
}
for (i in asia){
data.sel$brnmod[data.sel$cntbrthd == i] <- 'Asia'
}
for ( i in europe){
data.sel$brnmod[data.sel$cntbrthd == i] <- 'Europe'
}
for (i in oceania){
data.sel$brnmod[data.sel$cntbrthd == i] <- 'Oceania'
}
for (i in antartica){
data.sel$brnmod[data.sel$cntbrthd == i] <- 'Antartica'
}
# table(data.sel$brnmod)
# Africa America Asia Europe Oceania Rich
# 348 184 328 3941 4 16794
#
# Note that there are only 4 people from Oceania and 0 from Antardide
data.sel$ctzcntr <- data.sel$ctzshipd <- data.sel$cntbrthd <- data.sel$brncntr <- NULL
rm(africa, america, antartica, asia, europe, oceania, rich)
#### renomination domicil #####
data.sel$domicil[data.sel$domicil == 1] <- 'Big City'
data.sel$domicil[data.sel$domicil == 2] <- 'Suburbs or Outskirts of Big City'
data.sel$domicil[data.sel$domicil == 3] <- 'Small City'
data.sel$domicil[data.sel$domicil == 4] <- 'Country Village'
data.sel$domicil[data.sel$domicil == 5] <- 'Farm or Countryside'
# table(data.sel$domicil)
# Big City Country Village Farm or Countryside Small City Suburbs or Outskirts of Big City
# 4729 6795 1050 6810 2415
#### create variable for educazione ####
# for the subject
data.sel$edumod <- NA
for (i in 0:8) {
data.sel$edumod[(data.sel$edulvlb >= i*100) & (data.sel$edulvlb < (i+1)*100)] <- i
}
data.sel$edumod[data.sel$edulvlb == 7777] <- NA
data.sel$edumod[data.sel$edulvlb == 5555] <- NA
data.sel$edumod[data.sel$edulvlb == 8888] <- NA
data.sel$edumod[data.sel$edulvlb == 9999] <- NA
data.sel$edulvlb <- NULL
# for the partner
data.sel$edupmod <- NA
for (i in 0:8) {
data.sel$edupmod[(data.sel$edulvlpb >= i*100) & (data.sel$edulvlpb < (i+1)*100)] <- i
}
data.sel$edupmod[data.sel$edulvlpb == 5555] <- NA
data.sel$edupmod[data.sel$edulvlpb == 6666] <- 6666666666
data.sel$edupmod[data.sel$edulvlpb == 7777] <- NA
data.sel$edupmod[data.sel$edulvlpb == 8888] <- NA
data.sel$edupmod[data.sel$edulvlpb == 9999] <- NA
data.sel$edulvlpb <- NULL
# table(data.sel$edupmod)
# 0 1 2 3 4 5 6 7 8
# 87 372 1530 6020 990 936 1899 2219 204
#### create variable for children ####
dataset1 <- dataset[(dataset$agea<=55) & (dataset$agea>=25), ]
data.eta <- dataset1[,c('yrbrn2', 'yrbrn3', 'yrbrn4', 'yrbrn5', 'yrbrn6', 'yrbrn7', 'yrbrn8',
'yrbrn9', 'yrbrn10', 'yrbrn11', 'yrbrn12', 'yrbrn13', 'yrbrn14', 'yrbrn15')]
data.rel <- dataset1[,c('rshipa2', 'rshipa3', 'rshipa4', 'rshipa5', 'rshipa6', 'rshipa7', 'rshipa8',
'rshipa9', 'rshipa10', 'rshipa11', 'rshipa12', 'rshipa13', 'rshipa14', 'rshipa15')]
data.year <- dataset1$inwyys
data.sel$chld3 <- NA # number of children under 3 years old
data.sel$chld10 <- NA # number of children under 10 years old
data.sel$chld14 <- NA # number of children under 14 years old
for (i in 1:dim(dataset1)[1]) {
n3 <- 0
n10 <- 0
n14 <- 0
for (j in 1:dim(data.rel)[2]) {
if ( !is.na(data.eta[i,j]) & !is.na(data.rel[i,j]) ) {
if ( (data.rel[i,j] == 2) & ((data.year[i] - data.eta[i,j]) <= 3) ) {
n3 <- n3 + 1
}
if ( (data.rel[i,j] == 2) & ((data.year[i] - data.eta[i,j]) <= 10) ) {
n10 <- n10 + 1
}
if ( (data.rel[i,j] == 2) & ((data.year[i] - data.eta[i,j]) <= 14) ) {
n14 <- n14 + 1
}
}
}
data.sel$chld3[i] <- n3
data.sel$chld10[i] <- n10
data.sel$chld14[i] <- n14
}
rm(n3, n10, n14, i, j, dataset1, data.rel, data.eta, data.year)
#### create binary variable for partner/no partner ####
dataset1 <- dataset[(dataset$agea<=55) & (dataset$agea>=25), ]
part.cont <- dataset1[,c('rshipa2', 'rshipa3', 'rshipa4', 'rshipa5', 'rshipa6', 'rshipa7', 'rshipa8',
'rshipa9', 'rshipa10', 'rshipa11', 'rshipa12', 'rshipa13', 'rshipa14', 'rshipa15')]
data.sel$prtnr <- 0
for (i in 1:dim(part.cont)[1]) {
for (j in 1:dim(part.cont)[2]) {
if(part.cont[i,j] == 1 & !is.na(part.cont[i,j])){
data.sel$prtnr[i] <- 1
}
}
}
rm(i, j, dataset1, part.cont)
#### excluding subjects ####
# eliminating low numerosity categories and not answered
# military from isco08 and isco08p
data.sel <- data.sel[data.sel$isco08 != 'military',]
data.sel <- data.sel[data.sel$isco08p != 'military',]
# oceania from brnmod
data.sel <- data.sel[data.sel$brnmod != 'Oceania',]
# 666666666 from education of partner partner
data.sel <- data.sel[(data.sel$prtnr == 0) | (data.sel$prtnr == 1 & data.sel$edupmod != 6666666666), ]
#### create a variable for region ####
data.exp <- na.omit(data.exp)
OVEST = c( "AT" , "BE", "CH", "DE" , "FR" , "NL")
EST = c("EE", "LT" , "LV" ,"BG", "CZ" , "HU" , "PL" , "SK")
NORD = c( "FI" , "GB" , "IE","NO" , "SE")
SUD = c("CY", "ES" ,"IT", "PT", "SI" )
BALK = c("ME" , "HR", "RS")
data.sel$rgn <- NA
for( i in 1:dim(data.sel)[1] ){
if(is.na(data.sel$cntry[i]))
next
for( est in EST ){
if( data.sel$cntry[i] == est ){
data.sel$rgn[i] <- 'East Europe'
break
}
}
for( ovest in OVEST ){
if( data.sel$cntry[i] == ovest ){
data.sel$rgn[i] <- 'West Europe'
break
}
}
for( nord in NORD ){
if( data.sel$cntry[i] == nord ){
data.sel$rgn[i] <- 'North Europe'
break
}
}
for( sud in SUD ){
if( data.sel$cntry[i] == sud ){
data.sel$rgn[i] <- 'South Europe'
break
}
}
for( balk in BALK ){
if( data.sel$cntry[i] == balk ){
data.sel$rgn[i] <- 'Balkans'
break
}
}
}
#### create a new variable for education with only 3 levels ####
# for subject
data.sel$edutre <- NULL
data.sel$edutre[data.sel$edumod == 0 | data.sel$edumod == 1 | data.sel$edumod == 2 ] <- 1
data.sel$edutre[data.sel$edumod == 3 | data.sel$edumod == 4] <- 2
data.sel$edutre[data.sel$edumod == 5 | data.sel$edumod == 6 | data.sel$edumod == 7 | data.sel$edumod == 8 ] <- 3
# for partner
data.sel$eduptre <- NULL
data.sel$eduptre[data.sel$edupmod == 0 | data.sel$edupmod == 1 | data.sel$edupmod == 2 ] <- 1
data.sel$eduptre[data.sel$edupmod == 3 | data.sel$edupmod == 4] <- 2
data.sel$eduptre[data.sel$edupmod == 5 | data.sel$edupmod == 6 | data.sel$edupmod == 7 | data.sel$edupmod == 8 ] <- 3
data.sel$eduptre[data.sel$edupmod == 6666666666] <- 6666666666
#### create a column for homogamy ####
data.sel$omogamia <- NULL
pippo <- data.sel$edutre - data.sel$eduptre
data.sel$omogamia[pippo == 0] <- 1
data.sel$omogamia[pippo != 0] <- 0
data.sel$omogamia[abs(pippo) > 10] <- 6666666666
#### export data for the analysis ####
var.sel1 <- c('gndr', 'agea', 'cntry', 'rgn' , 'ctzmod', 'brnmod', 'domicil', 'prtnr', 'pdwrkp',
'isco08p', 'edumod', 'edutre', 'edupmod', 'eduptre' , 'omogamia' , 'chld3', 'chld10', 'chld14',
'anweight', 'psu', 'stratum', 'pdwrk')
data.exp <- data.sel[, var.sel1]
data.exp[data.exp == 7777777777] <- NA
data.exp[data.exp == 8888888888] <- NA
data.exp[data.exp == 9999999999] <- NA
data.exp <- na.omit(data.exp)
#write.csv(data.exp, file = "Data_Cleaning/data_work.csv", row.names = FALSE)
|
ddc81e63ae70677bc45c443c97662e698f8819f1 | 6fbb5b1e55e155897b7198f1960c231b24f3148d | /Exercise9.R | 7908f1b0cdfec8ec053e2b8bbb01fdbbf8d4f9d0 | [] | no_license | xfeng3/ICB2019_Exercise09 | aa2b57fe26544339fd784a4b8190fbb5f889769f | 276fd35b434f74052073b646c5beaa7e169c914f | refs/heads/master | 2020-09-15T13:36:39.875474 | 2019-12-02T05:07:32 | 2019-12-02T05:07:32 | 223,462,026 | 0 | 0 | null | 2019-11-22T18:27:03 | 2019-11-22T18:27:02 | null | UTF-8 | R | false | false | 960 | r | Exercise9.R | #Question 1
library(ggplot2)
performance = read.table("KCS-1 catalytic performance.txt",header = TRUE, sep="\t",stringsAsFactors = FALSE)
p = ggplot(data=performance, aes(x=Temperature, y = Mass))
p + geom_point() + stat_smooth(method="lm")+ theme_bw()
#Question 2
library(ggplot2)
datafile = read.table("data.txt",header=TRUE,sep=",", stringsAsFactors=FALSE)
#barplot of the means of the four populations
ggplot(data =datafile,aes(x = region, y = observations)) + xlab('region') + ylab("means of observations") + stat_summary(geom="bar", fun.y = "mean",fill="red") + theme_bw()
#scatter plot of all the observations
ggplot(data=datafile,aes(x=region,y=observations)) + xlab("region") + ylab("observations") + geom_jitter(alpha=0.1) + theme_bw()
#the barplot tells the means of the four populations are quite similar
#the scatterplot suggests the distribution of all the populaitons varied
#the mean value cannot represent the real data |
696e5ad22d05538c9c5fc7fad602136e3d583a3b | c3bd69562a080767188a3df5c202a5b600e2c81a | /man/ldNe.Rd | b338405384b401ecf0796eb73b43a831932061d1 | [] | no_license | EricArcher/strataG | 007bde64c4f999ddea609e0e18333c9bfe87d155 | d89348cb390379522202beed20be49fa77cd5eae | refs/heads/master | 2023-02-27T16:01:24.665136 | 2023-02-09T21:11:11 | 2023-02-09T21:11:11 | 34,808,785 | 25 | 16 | null | 2020-04-21T16:36:11 | 2015-04-29T17:46:44 | HTML | UTF-8 | R | false | true | 2,229 | rd | ldNe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ldNe.R
\name{ldNe}
\alias{ldNe}
\title{ldNe}
\usage{
ldNe(
g,
maf.threshold = 0,
by.strata = FALSE,
ci = 0.95,
drop.missing = FALSE,
num.cores = 1
)
}
\arguments{
\item{g}{a \linkS4class{gtypes} object.}
\item{maf.threshold}{smallest minimum allele frequency permitted to include
a locus in calculation of Ne.}
\item{by.strata}{apply the `maf.threshold` by strata. If `TRUE`
then loci that are below this threshold in any strata will be removed
from the calculation of Ne for all strata. Loci below `maf.threshold`
within a stratum are always removed for calculations of Ne for that
stratum.}
\item{ci}{central confidence interval.}
\item{drop.missing}{drop loci with missing genotypes? If `FALSE`, a slower
procedure is used where individuals with missing genotypes are removed
in a pairwise fashion.}
\item{num.cores}{The number of cores to use to distribute computations over.
If set to \code{NULL}, the value will be what is reported
by \code{\link[parallel]{detectCores} - 1}.}
}
\value{
a data.frame with one row per strata and the following columns:
\describe{
\item{\code{stratum}}{stratum being summarized}
\item{\code{S}}{harmonic mean of sample size across pairwise comparisons of
loci}
\item{\code{num.comp}}{number of pairwise loci comparisons used}
\item{\code{mean.rsq}}{mean r^2 over all loci}
\item{\code{mean.E.rsq}}{mean expected r^2 over all loci}
\item{\code{Ne}}{estimated Ne}
\item{\code{param.lci, param.uci}}{parametric lower and upper CIs}
}
}
\description{
Estimate Ne from linkage disequilibrium based on Pearson
correlation approximation following Waples et al 2016. Adapted from code
by R. Waples and W. Larson.
}
\references{
Waples, R.S. 2006. A bias correction for estimates of effective
population size based on linkage disequilibrium at unlinked gene loci.
Conservation Genetics 7:167-184. \cr
Waples RK, Larson WA, and Waples RS. 2016. Estimating contemporary
effective population size in non-model species using linkage
disequilibrium across thousands of loci. Heredity 117:233-240;
doi:10.1038/hdy.2016.60
}
\author{
Eric Archer \email{eric.archer@noaa.gov}
}
|
f5698ea83a52394fc5914158ee0d0074493d79cc | cd2a7d03dee24e0245b89370e2c1e497959b9b2b | /deprecated/Location Finder/Received/1. R script/4.0_flood_impact_analysis_script.R | bdd2ec3ff66b5351c648bb62d0e30e15ad0a8aa9 | [] | no_license | rodekruis/text_mining | 5fc8f916182ceb7e538081fc7056a2b37849de16 | fce6a3b092cd5fecdde18f1d6b05c3cd05b9c107 | refs/heads/master | 2022-02-23T21:11:12.542976 | 2022-02-21T14:13:13 | 2022-02-21T14:13:13 | 154,344,621 | 5 | 2 | null | 2021-02-16T09:00:15 | 2018-10-23T14:40:27 | Python | UTF-8 | R | false | false | 10,897 | r | 4.0_flood_impact_analysis_script.R | # Author: Michael Osunga
# Date: 7th, May 2019
# Task: Historical flood data analysis
# Table of contents
# 1. House keeping
# 2. Loading dataframes
# 3. Data cleaning
# 4. Parsing county, subcounty and ward names on 'Comments' strings
## 1. Housekeeping ------------------------------------------------------------
## clear workspace
rm(list = ls(all = TRUE))
# installing and reading multiple packages
packages <- c("tidyverse", "dplyr","ggplot2", "anytime", "lubridate", "rgdal")
ipak <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
ipak(packages)
# setting working directory
setwd("E:/ICHA/Projects/flood_impact/data/raw_data")
## 2. Loading flood dataframes and Kenya shapefile ----------------------------
floodWRA<- read.csv("Flood Impact Database Main.csv")
floodEoC<- read.csv("Floods Historical data.csv")
desinventar <- read.csv("Desinventar_kenya_impact_data_v1.csv",stringsAsFactors = FALSE)
# loading shapefiles
KENadmin<-readOGR(dsn="E:/ICHA/Projects/flood_impact/data/raw_data/shapefiles/KenAdminData", layer="cooksKE_data_v6")
KENadmin.old <- readOGR(dsn="E:/ICHA/Geospatial Data/KEN_adm", layer = "KEN_adm5")
# cleaning new admin shapefile
KENadmin@data <- KENadmin@data %>%
dplyr::select(1:3) %>%
mutate_if(is.factor,as.character)
# cleaning old admin shapefile
KENadmin.old@data<-KENadmin.old@data %>%
dplyr::select(matches("NAME")) %>%
mutate_if(is.factor, as.character) %>%
mutate_if(is.character, str_trim)
# coercing old official administrative names to vectors
admin5.names<-as.character(unique(KENadmin.old@data$NAME_5))
admin4.names<-as.character(unique(KENadmin.old@data$NAME_4))
admin3.names<-as.character(unique(KENadmin.old@data$NAME_3))
# coercing official administrative names to vectors
county.names<-as.character(unique(KENadmin@data$countis))
subcounty.names<-as.character(unique(KENadmin@data$subcnts))
ward.names<-as.character(unique(KENadmin@data$wards))
## 3. Data cleaning -----------------------------------------------------------
# cleaning WRA data
floodWRA.clean <- floodWRA %>%
dplyr::select(1:14) %>%
mutate_if(is.factor,as.character) %>%
mutate_if(is.character, str_trim) %>%
filter(!X24.04.18=="") %>%
mutate(X24.04.18.1=as.character(X24.04.18.1),
X24.04.18.1=lubridate::dmy(X24.04.18.1),
Kiaoni..Makueni.County.=as.character(Kiaoni..Makueni.County.)) %>%
rename(date=X24.04.18.1,
Location=Kiaoni..Makueni.County.,
Houses.Destroyed=X150)
# setting variable names based on value names in row 10
my.names <- floodWRA.clean[10,]
my.names<-as.character(my.names)
colnames(floodWRA.clean) <- my.names
# further cleaning
floodWRA.cleaned<-floodWRA.clean[-10,] %>%
mutate_if(is.character, str_to_title) %>%
filter(`Type (Riverine, flash, fluctuating lake levels, Dam failure)` %in% c("Flash",
"Flash Floods",
"Flash Flood",
"Flood", "Riverine/Flash Flood")) %>%
mutate(Start=lubridate::dmy(Start)) %>%
rename(Date=Start,
Event=`Type (Riverine, flash, fluctuating lake levels, Dam failure)`,
Deaths=`Human losses (Dead)`,
Directly.affected=`Total Affected Residents`,
Lost.Livestock=`Livestock losses`,
Affected.Area=`Affected Area (km2)`,
Losses..Local= `Estimated Damage (Ksh)`)
# exporting dataframe as .csv
write.csv(floodWRA.cleaned, "floodWRA_data.csv", row.names = F)
# cleaning Desinventar data
desinventar.clean <- desinventar %>%
dplyr::select(Date,County,Event,Location,Deaths:Damages.in.roads.Mts,Comments) %>%
mutate_if(is.character,str_trim) %>%
mutate_if(is.character, str_to_title) %>%
mutate(Location=gsub("Flash Reported In |Floods |Floods In |Flash Floods In| Sub-County| Area|At |Flash Floods At| District| Division| Town| Village| village| Sub County| Subcounty| County| Location|Sub-Location| Sub-Location","",Location)) %>%
mutate(Date=lubridate::dmy(Date)) %>%
filter(!Location=="") %>%
mutate_if(is.character,str_trim) %>%
mutate_if(is.character, str_to_title) %>%
mutate(source="Desinventar") %>%
mutate_if(is.numeric,replace_na, replace = NA) %>%
mutate(Directly.affected=gsub("[^0-9]","",Directly.affected),
Relocated=gsub("[^0-9]","",Relocated))
# cleaning EoC data
floodEoC.clean <- floodEoC %>%
mutate(Injured=MINOR_CAS+CRITICAL_CAS) %>%
dplyr::select(DATE_TIME,COUNTIES,TYPE,SCENE,DEAD,Injured,MISSING,HH.AFFECTED,DAMAGE,RESCUED,SITUATION) %>%
rename(Date=DATE_TIME,
County=COUNTIES,
Event=TYPE,
Location=SCENE,
Deaths=DEAD,
Missing=MISSING,
Houses.Destroyed=HH.AFFECTED,
Houses.Damaged=DAMAGE,
Evacuated=RESCUED,
Comments=SITUATION) %>%
mutate_if(is.factor, as.character) %>%
mutate_if(is.character,str_trim) %>%
mutate_if(is.character,str_to_title) %>%
mutate(Date=lubridate::dmy(Date)) %>%
mutate(Location=gsub("Rainfall Situation | Constituency| Coun|Situation Update|Mudslide |Mudslide Incident |Landslides |Landslide |Lightning Strikes |Lightening Strike | Sub-County| Sub Counties| Flash Floods|In |Flash Flood In |Floods |Floods In |Flash Floods |Flash Floods In |At | County| Sub County| Sub Location| Sub -County| Sub-Location| Flash Floods In","",Location)) %>%
mutate(source="EoC") %>%
mutate(Houses.Damaged=gsub("[^0-9]"," ",Houses.Damaged),
Houses.Destroyed=gsub("[^0-9]"," ",Houses.Destroyed)) %>%
tidyr::separate(col = Houses.Damaged,
into = "Houses.Damaged",
sep = " ",
remove = FALSE) %>%
mutate(Directly.affected=0,
Indirectly.Affected=0,
Relocated=0,
Losses..Local=0,
Education.centers=0,
Damages.in.crops.Ha.=0,
Lost.Cattle=0,
Lost.Goats=0,
Lost.Livestock=0)
# selecting desinventar variables based on variable names in EoC data
desinventar.clean2 <- desinventar.clean %>%
dplyr::select(names(floodEoC.clean))
# appending EoC and desinventar dataframes
EocDesinventar<-rbind(floodEoC.clean,desinventar.clean2) %>%
mutate_if(is.character,str_trim) %>%
mutate_if(is.numeric,replace_na, replace = NA) %>%
dplyr::select(1:10,13:21,12:13,11)
# Note that geocoding was done in python
# 4. Parsing county, subcounty and ward names on 'Comments' strings -----------
# loading the geocoded dataframe
EocDesinventarGeocoded<-read.csv("EocDesinventar_data3_geocoded.csv", stringsAsFactors = F)
# further cleaning of county names
EocDesinventar.admin.coded<-EocDesinventarGeocoded %>%
mutate(Date1=anydate(Date),
Date3=if_else(is.na(Date1),lubridate::dmy(Date),Date1)) %>%
dplyr::select(-Date, -Date1,-X) %>%
rename(Date=Date3) %>%
dplyr::select(23,1:22) %>%
mutate(County=if_else(County=="Taita-Taveta","Taita Taveta",
if_else(County=="Keiyo-Marakwet","Elgeyo-Marakwet",
if_else(County=="Tana-River","Tana River",
if_else(str_detect(County,"Muran"),"Muranga",
if_else(County=="Uasin-Gishu","Uasin Gishu",County)))))) %>%
mutate(Comments=str_to_title(Comments),
Country="Kenya") %>%
# parsing county, subcounty and ward names on 'Comments' strings
mutate(Wards.comments=str_extract(Comments, paste(ward.names, collapse = "|"))) %>%
mutate(Subcounty.comments=str_extract(Comments, paste(subcounty.names, collapse = "|"))) %>%
mutate(County.comments=str_extract(Comments, paste(county.names, collapse = "|")))
# checking for NAs in newly created admin variables
sapply(EocDesinventar.admin.coded, function(x) sum(is.na(x)))
# subsetting flood events with 0 lats and lons
flood_events_lats0<-EocDesinventar.admin.coded %>%
filter(Long==0.0000000)
# subsetting flood events where County name is None
EocDesinventarGeocoded_None <- EocDesinventar %>%
filter(County=="None") %>%
mutate(County=if_else(County=="Taita-Taveta","Taita Taveta",
if_else(County=="Keiyo-Marakwet","Elgeyo-Marakwet",
if_else(County=="Tana-River","Tana River",
if_else(str_detect(County,"Muran"),"Muranga",
if_else(County=="Uasin-Gishu","Uasin Gishu",County)))))) %>%
mutate(Comments=str_to_title(Comments),
Country="Kenya",
Lat=0,
Long=0) %>%
# parsing county, subcounty and ward names on 'Comments' strings
mutate(Wards.comments=str_extract(Comments, paste(ward.names, collapse = "|"))) %>%
mutate(Subcounty.comments=str_extract(Comments, paste(subcounty.names, collapse = "|"))) %>%
mutate(County.comments=str_extract(Comments, paste(county.names, collapse = "|")))
# appending the 2 dataframes
EocDesinventarGeocoded.master<-rbind(EocDesinventarGeocoded_None,EocDesinventar.admin.coded)
# checking for NAs amongst variables
sapply(EocDesinventarGeocoded.master, function(x) sum(is.na(x)))
# checking for wards that did not parse by source
(wards.x<-EocDesinventarGeocoded.master %>%
filter(is.na(Wards.comments)) %>%
dplyr::select(Wards.comments,source) %>%
group_by(source) %>%
count(source))
# checking for sub-counties that did not parse by source
(county.x<-EocDesinventarGeocoded.master %>%
filter(is.na(Subcounty.comments)) %>%
dplyr::select(Subcounty.comments,source) %>%
group_by(source) %>%
count(source))
# exporting dataframe to .csv
write.csv(EocDesinventarGeocoded.master,"E:/ICHA/Projects/flood_impact/data/clean_data/EocDesinventar_master.csv", row.names = FALSE)
admin5.names<-as.character(unique(KENadmin.old@data$NAME_5))
admin4.names<-as.character(unique(KENadmin.old@data$NAME_4))
admin3.names<-as.character(unique(KENadmin.old@data$NAME_3))
floodWRA.cleaned.geocoded <- floodWRA.cleaned %>%
mutate(Location=str_to_title(Location)) %>%
# parsing county, subcounty and ward names on 'Comments' strings
mutate(Wards=str_extract(Location, paste(ward.names, collapse = "|"))) %>%
mutate(Subcounty=str_extract(Location, paste(subcounty.names, collapse = "|"))) %>%
mutate(County=str_extract(Location, paste(county.names, collapse = "|"))) %>%
mutate(admin3=str_extract(Location,paste(admin3.names, collapse = "|"))) %>%
mutate(admin4=str_extract(Location,paste(admin4.names, collapse = "|"))) %>%
mutate(admin5=str_extract(Location,paste(admin5.names, collapse = "|"))) %>%
dplyr::select(Location,County,Subcounty,Wards,admin3,admin4,admin5)
#### END OF SCRIPT
|
e5750a45acf49931fdf08a1c96f0792f4cb6ed94 | cc2d0590bb9b7733d4875c3dac25f9ba5b4bf513 | /inst/example_8.R | b4d9d19c72f6d3ae1fa4d2eadfab1a99c6bf055b | [] | no_license | korterling/jsReact | 2dd66c329fc07468b044413f2af63f8841c13eee | 304439317bca56a1078cfc6d24654f57ca0f824e | refs/heads/master | 2021-08-12T05:34:15.623422 | 2017-11-14T13:25:25 | 2017-11-14T13:25:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,135 | r | example_8.R | # In example 2, we learnt about using slider with plotly. What good does it do us?
# In this example, we reproduce Han Rosling's famous data visualisation.
rm(list = ls())
library(jsReact)
library(magrittr)
library(tweenr)
df <- tweenr_example()
plot_df <- tween_elements(df, 'time', 'id', 'ease', nframes = 500)
plot_df$alpha <- plot_df$alpha / max(plot_df$alpha) # cater for plotly
# Create web interface (html)
my_html <- create_html() %>%
add_js_library("plotly") %>%
add_title("Frame slider") %>%
add_slider(type = "range", id = "slide_input", min = "0", max = "500", value = "0",
oninput = "send_value(this.Value)") %>%
add_title("Plotly plot") %>%
#add_div(id = "output") %>%
add_div(id = "plotly_plot") %>%
add_style("input#slide_input { width: 30em; }")
my_html %<>% add_script("
function send_value(input) {
// this function sends the frame value to R.
var input = document.getElementById('slide_input');
ws.send(input.value);
}
ws.onmessage = function(msg) {
// this function gets the plot data from R.
var data0 = JSON.parse(msg.data);
var trace1 = {
x: data0.x, y: data0.y,
marker: { size: data0.size, opacity: data0.alpha },
mode: 'markers', type: 'scatter'
};
var layout = {
xaxis: {range: [Math.min(...data0.plot_range.xmin), Math.max(...data0.plot_range.xmax)]},
yaxis: {range: [Math.min(...data0.plot_range.ymin), Math.max(...data0.plot_range.ymax)]},
margin: {t:10, l:30}
};
Plotly.newPlot('plotly_plot', [trace1], layout);
}
")
write_html_to_file(my_html, file = "inst/sample.html")
# Create the R function to handle the interaction calculation
my_r_fun <- function(msg) {
# use function binding if one doesn't like calling to global variable
to_js <- as.list(plot_df %>% dplyr::filter(.frame == msg)) %>%
append(list(plot_range = list(
xmin = min(plot_df$x) - 0.5, xmax = max(plot_df$x) + 0.5,
ymin = min(plot_df$y) - 1, ymax = max(plot_df$y) + 1
)))
to_js
}
# Create and start app
my_app <- create_app("inst/sample.html", user_function = my_r_fun)
start_app(my_app)
|
7a03ab96e182f76fca7df21adb44ee059c40e0d1 | 98dc55859093633e1ad5aaed6a16d8513ce0aa80 | /R functions/choose_qat_sensors_CL.r | 5012ef41a76e0b00f29b46e6bdd20fe862784aa0 | [] | no_license | reidsteele2/Fixed-Stations-BioChem-Reload | 7ca1fbf637f92802a22f9c4c5ba4134635291d5f | 912931734402745c8fa4c09136162b27cefcb65b | refs/heads/master | 2022-12-03T16:31:51.005131 | 2020-08-19T13:09:49 | 2020-08-19T13:09:49 | 288,531,120 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,354 | r | choose_qat_sensors_CL.r | # QAT file usualy has primary and secondary sensors for oxy, temp, sal, and cond, named oxy1, oxy2 etc.
# This function allows user to choose between primary or secondary sensor for each parameter.
#
# Input: qat data frame with all columns and sensors
#
# Output: data frame containing ID, pressure, and sensors of choice.
# The name of each parameter does not contain number, just namre (oxy, temp etc.)
#
# Gordana lazin, 13 April 2016
choose_qat_sensors <- function(qat) {
# CHOOSE BETWEEN MULTIPLE SENSORS (primary or secondary)
# define qat parameters that have primary and secondary sensor
sensors=c("oxy","sal","temp","cond","fluor")
op=gconfirm("Would you like to use sensor 1 for all parameters?: ")
# OPTION 1: if you want to use primary sensors only
if (op==T) {
sn=paste0(sensors,1) # name of the sensors to be loaded (like oxy1)
cat("-> Retreiving primary sensors only.")
}
# OPTION 2: if you want to use combination of primary and secondary sensors you have to choose for each
if (op==F) {
n=NULL
# choose sensors for each parameter
for (i in 1:length(sensors)) {
n[i]=ginput(paste("Which sensor would you like to use for", sensors[i],"(1 or 2)?:") )
}
sn=paste0(sensors,n)
cat(paste("-> Retreiving following sensors:"),paste(sn,collapse=", "))
}
# add the rest of the sensors to the sensor list
sna=c(sn,"pressure","ph","par")
# check if all possible sensors are there
cl=which(names(qat) %in% sna)
if (length(cl)==length(sna)) {
cat("All sensors found in QAT file")
}
if (length(cl)<length(sna)) {
missing_sensors=setdiff(sna,names(qat)[cl])
cat("\n","\n")
cat(paste("-> Sensors not found in in QAT file:", paste(missing_sensors,collapse=", ")))
cat("\n","\n")
}
# qat data frame containing only fields to be loaded
qf=qat[,c("id",names(qat)[cl])]
# names of the original sensors assigned for each measurements
original_sensors=names(qf)[which(!is.na(as.numeric(gsub("[[:alpha:]]","",names(qf)))))]
# remove sensor number from the column header (replace temp1 with temp)
names(qf)=gsub("[[:digit:]]","",names(qf))
l=list(qf=qf,original_sensors=original_sensors)
return(l)
} |
d52af9287a3b5bf0f5708b2d295ed26b33482ce9 | 0322546b83d82d604784d71ce4eda7531879a3ae | /cvsc_welcome_mail.R | 2ed2fc772fc0d9e13c6191cfa00d647244493277 | [] | no_license | apawlik/cvcs-mailer | 658ad532f684fdbf507b048b09bd117700d6b713 | c608c82a46812f59fde49f1248af4f8e3ff92f4a | refs/heads/master | 2020-06-20T09:58:23.124375 | 2019-07-28T00:13:12 | 2019-07-28T00:13:12 | 197,086,333 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,643 | r | cvsc_welcome_mail.R | # (c) Aleksandra Pawlik, 2019
# Short script to help automate sending out welcome email to the CVSC guests
# The script reads in the file with all bookings (manually downloaded through the booking portal on )
library(tidyverse)
library(ggplot2)
library(filesstrings)
current_date <- as.Date(Sys.Date(), "%d-%m-%Y")
date_str <- gsub("-", "_", as.character(current_date))
filename <- paste("BookingRecords_", date_str, ".csv", sep="")
bookings <- read.csv(filename)
date2 <- format(current_date+1, "%d-%m-%Y")
b1 <- bookings %>%
select(Email, Check.In.Date, ) %>%
filter(as.character(Check.In.Date) == as.character(date2))
write_lines(b1$Email, path = paste("email_guests_on", date_str, ".csv", sep=""),sep=",")
## Linen report
linen <- bookings %>%
filter( Linen.Required == "Yes" | Last.Name =="Haka Tours") %>%
mutate(Guest = paste(First.Name, Last.Name)) %>%
mutate(Staying.Days = (as.Date(Check.Out.Date, "%d-%m-%Y") - as.Date(Check.In.Date, "%d-%m-%Y") )) %>%
select(Check.In.Date, Check.Out.Date, Staying.Days, Guest, Room.Type) %>%
arrange(date = as.Date(Check.In.Date, "%d-%m-%Y"))
write_lines(paste("Linen requirements as of ", date_str),path = paste("linen_report_", date_str, ".csv", sep=""))
write.table(linen, file = paste("linen_report_", date_str, ".csv", sep=""), sep=",", row.names = F, append=TRUE)
# Kitchen report
new_guests <- bookings %>%
filter(as.character(Check.In.Date) == as.character(format(current_date, "%d-%m-%Y")) ) %>%
mutate(Guest = paste(First.Name, Last.Name)) %>%
mutate(Staying.Days = (as.Date(Check.Out.Date, "%d-%m-%Y") - as.Date(Check.In.Date, "%d-%m-%Y") )) %>%
select(Guest, Staying.Days, Vegetarian)
write_lines(paste("Guests arriving today - ", date_str),path = paste("kitchen_report_", date_str, ".csv", sep=""))
write_lines("Guest name , Staying days , vegetarian ",path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write_lines("- - - - - - - - - -",path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write.table(new_guests, col.names = F, file = paste("kitchen_report_", date_str, ".csv", sep=""), row.names=F, sep=",", append=TRUE)
write_lines(paste("New guests total ",nrow(new_guests)),path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write_lines("----------",path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write_lines("----------",path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
current_guests <- bookings %>%
mutate(Guest = paste(First.Name, Last.Name)) %>%
filter(as.Date(Check.In.Date, "%d-%m-%Y") < as.Date(current_date, "%d-%m-%Y") & as.Date(current_date, "%d-%m-%Y") < as.Date(Check.Out.Date, "%d-%m-%Y") ) %>%
select(Guest, Vegetarian)
write_lines(paste("Current guests "),path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write_lines("Guest name , vegetarian ",path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write_lines("- - - - - - - - - -",path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
write.table(current_guests, col.names = F, file = paste("kitchen_report_", date_str, ".csv", sep=""), row.names=F, sep=",", append=TRUE)
write_lines(paste("Current guests total", nrow(current_guests)),path = paste("kitchen_report_", date_str, ".csv", sep=""), append=TRUE)
# Cleanup
# Move all files that are not with the current date at the end to the Archive
report_files= list.files(pattern = ".csv")
lapply(report_files, function(x){
if (x != paste("linen_report_", date_str, ".csv", sep="")
&& x != paste("kitchen_report_", date_str, ".csv", sep="")
&& x != paste("BookingRecords_", date_str, ".csv", sep="")
&& x != paste("email_guests_on", date_str, ".csv", sep="") ){
file.rename(from=x, to=paste("Archive/",x) )
}
})
## Special requests
special_requests <- bookings %>%
filter(Other.Information != "" ) %>%
filter(Other.Information != "Linen required") %>%
filter( !grepl("work party vouchers", Other.Information) ) %>%
filter(as.Date(Check.In.Date, "%d-%m-%Y") > as.Date(current_date, "%d-%m-%Y")) %>%
select(Booking.No., First.Name, Email, Check.In.Date, Other.Information) %>%
arrange(date = as.Date(Check.In.Date, "%d-%m-%Y"))
write.table(special_requests, col.names = F, file = paste("special_requests", date_str, ".csv", sep=""), row.names=F, sep=",")
## TO DO
# check if already emailed for a given booking
# schedule automated emails
# download the bookings csv |
1def381218e9d4bcafde3c74a94cd2e8a26cd3fc | 311ae82a6efaf9c9cac8d1b517ba992815c88128 | /Production/DSM/pH/digital_soil_mapping/spatialprediction/d3/statistical_summaries/spatialise_ph_ss_4a1_d3_.R | 2fb377b6554d5a910fdb4a60446d4d5f2cb03803 | [
"CC0-1.0"
] | permissive | AusSoilsDSM/SLGA | 8c77f0ad24a49e05f00c8a71b452214e401d6a3f | 41d8e2c009c1595c87bdd805a8ba6c4a3f45cbd1 | refs/heads/main | 2023-03-18T13:18:33.073555 | 2023-03-07T23:54:51 | 2023-03-07T23:54:51 | 555,090,777 | 7 | 2 | null | 2023-03-07T21:55:39 | 2022-10-20T23:51:23 | R | UTF-8 | R | false | false | 3,846 | r | spatialise_ph_ss_4a1_d3_.R | ### TERN LANDSCAPES
# Soil pH 4A1
# Author: Brendan Malone
# Email: brendan.malone@csiro.au
# created: 19.5.22
# modified: 19.5.22
# CODE PURPOSE
# calculate mean and statistical moments from prediction iterations
##
### variables
vart<- "pH_4a1"
depth<- "d3"
batch<- 2
# tile counter
i_cnt<- 1
## libraries
library(sp);library(rgdal);library(raster)
# root directories
model.out<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/models/"
root.tiles<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/predictions/tiles/"
root.slurm<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/slurm/"
#r.code<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/d1/statistical_summaries/spatialise_ph_ss_4a1_d1_ALL.R"
#slurms<- "/datasets/work/af-tern-mal-deb/work/projects/ternlandscapes_2019/soil_pH/rcode/digital_soil_mapping/spatialprediction/slurm/pH_4a1/d1/"
# average model error summaries
avg.mse.dat<- read.csv(file = paste0(model.out, "ranger_VAL_diogs_pH_summary_alldepths_90m.csv"))
avg.mse.dat
avgMSE<- avg.mse.dat$MSE[which(avg.mse.dat$depth == depth)]
avgMSE
### Folders where the predictions are
fols<- as.numeric(list.files(root.tiles, full.names = FALSE))
length(fols)
#select the folder
sfol<- fols[i_cnt]
nm1<- paste0(root.tiles,sfol, "/",depth, "/",vart, "/")
## get the covariates
files<- list.files(path = nm1, pattern="pred_iteration", full.names=TRUE, recursive = T)
#files
#stack rasters
s1<- stack()
for (j in 1:length(files)){
s1<- stack(s1, raster(files[j]))}
#names(s1)
# STATISTICAL MOMENTS
# Calculate mean
meanFile<- paste0(nm1, "pred_",vart,"_mean_",depth, ".tif")
bootMap.mean<- mean(s1)
bootMap.mean.r<- signif(bootMap.mean, digits = 3)
writeRaster(bootMap.mean.r,filename = meanFile, format = "GTiff", overwrite = TRUE)
# add kriging estimate
res.raster<- raster(paste0(nm1,"pred_residual_",depth,".tif"))
#plot(res.raster)
meanFile<- paste0(nm1, "pred_",vart,"_mean_RK_",depth, ".tif")
bootMap.mean <- bootMap.mean + res.raster
bootMap.mean.r<- signif(bootMap.mean, digits = 3)
writeRaster(bootMap.mean.r,filename = meanFile, format = "GTiff", overwrite = TRUE)
#Calculate variance
bootMap.var <- calc(s1, var)
#plot(bootMap.var)
#Overall prediction variance (adding avgGMSE)
varFile2<- paste0(nm1, "pred_",vart,"_VAR_",depth, ".tif")
bootMap.varF<- bootMap.var + avgMSE
writeRaster(bootMap.varF ,filename = varFile2, format = "GTiff", overwrite = TRUE)
#plot(bootMap.varF)
#Standard deviation
sdFile<- paste0(nm1, "pred_",vart,"_STDEV_",depth, ".tif")
bootMap.sd<- sqrt(bootMap.varF)
writeRaster(bootMap.sd ,filename = sdFile, format = "GTiff", overwrite = TRUE)
#standard error
seFile<- paste0(nm1, "pred_",vart,"_STERR_",depth, ".tif")
bootMap.se<- bootMap.sd * qnorm(0.95)
writeRaster(bootMap.se ,filename = seFile, format = "GTiff", overwrite = TRUE)
#upper prediction limit
uplFile<- paste0(nm1, "pred_",vart,"_UPL_",depth, ".tif")
bootMap.upl<- bootMap.mean + bootMap.se
bootMap.upl.r<- signif(bootMap.upl, digits = 3)
writeRaster(bootMap.upl.r ,filename = uplFile, format = "GTiff", overwrite = TRUE)
#plot(bootMap.upl)
#lower prediction limit
lplFile<- paste0(nm1, "pred_",vart,"_LPL_",depth, ".tif")
bootMap.lpl<- bootMap.mean - bootMap.se
bootMap.lpl.r<- signif(bootMap.lpl, digits = 3)
writeRaster(bootMap.lpl.r ,filename = lplFile, format = "GTiff", overwrite = TRUE)
#plot(bootMap.lpl)
# slurm file output
itOuts<- c(i_cnt,as.character(Sys.time()))
nmz<- paste0(root.slurm, vart, "/",depth, "/",batch, "/slurmckeck_", i_cnt, "_",sfol, ".txt")
write.table(itOuts, file = nmz, row.names = F, col.names = F, sep=",")
# unlink iteration files
unlink(files)
##END
|
d3655f59f1b9e9334a5b4f2a6e6329b243eeb2c6 | d0cc0897a6a34a064588001b1aae74839c85ba23 | /man/GetCNVClass.Rd | f869dfc9e69f9fc5c7d9537df3e02941a1dffab5 | [] | no_license | rxseadew/iPsychCNV | 5d32ab6587f69d18558da9a7c70e764f2019ec6d | 1c8fa20a6440fa686917ddabbac57d13007fa44e | refs/heads/master | 2022-04-07T07:22:34.497169 | 2017-06-27T11:59:19 | 2017-06-27T11:59:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 588 | rd | GetCNVClass.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GetCNVClass.R
\name{GetCNVClass}
\alias{GetCNVClass}
\title{GetCNVClass}
\source{
\url{http://biopsych.dk/iPsychCNV}
}
\usage{
GetCNVClass(Variable)
}
\arguments{
\item{Variable:}{Numeric, Unknown.}
}
\value{
return data in data frame.
}
\description{
GetCNVClass: Read data.
}
\details{
Specifically designed to handle noisy data from amplified DNA on phenylketonuria (PKU) cards. The function is a pipeline using many subfunctions.
}
\examples{
Unknown.
}
\author{
Marcelo Bertalan, Louise K. Hoeffding.
}
|
0f56be58015ae78e7d2f9cc7426944ceba68bcfe | 71101f28de94b8c189ce9b7f7a14cb236dfcfd6e | /man/getProtAtomsNearWater.Rd | 002c55e2ef307701b02426ab7f69cf09e387e5e3 | [] | no_license | Pawansit/vanddraabe | e5434b72fcf771cd28b9dfa6c88a328b140a40ef | 5eccfec7ed511e4eb2e38a21d2218106d7fb2c42 | refs/heads/master | 2023-05-12T18:53:44.718393 | 2021-06-10T20:58:26 | 2021-06-10T20:58:26 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,427 | rd | getProtAtomsNearWater.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HydrophilicityEvaluation.R
\name{getProtAtomsNearWater}
\alias{getProtAtomsNearWater}
\title{Number of Solvent Accessible/Exposed Protein Atoms Near a Water}
\usage{
getProtAtomsNearWater(h2o.oi, h2o.idc, atoms.oi, h2o.prot.dists,
h2o.prot.dists.tf)
}
\arguments{
\item{h2o.oi}{The index of the water of interest}
\item{h2o.idc}{The indices of the waters within the protein structure}
\item{atoms.oi}{The protein \code{data.frame} with the \code{SASA} and \code{SASA lost}
values for each atom within the protein.}
\item{h2o.prot.dists}{Distance \code{matrix} for all water-protein through space
distances}
\item{h2o.prot.dists.tf}{The \code{TRUE}/\code{FALSE} matrix indicating if the protein-
water distances are less than or equal to the user defined cutoff value
denoted by the \code{h2o.prot.dist.max} parameter for
\code{\link[=HydrophilicityEvaluation]{HydrophilicityEvaluation()}}. From \code{\link[=HydrophilicityEvaluation]{HydrophilicityEvaluation()}}: the
maximum distance between the water oxygen atoms and the protein for
consideration in the determination for hydrophilicity values; default: 6.0}
}
\value{
This function returns a \code{data.frame} with:
\itemize{
\item \strong{nearby.prot.atoms}: protein atoms within the user specified distance
of a water's oxygen atom
\item \strong{distances}: The distance -- in Angstroms -- from the water to the
closest solvent accessible protein atom so long as the distance is
equal to or less than the user provided value; see \code{h2o.prot.dists.tf}
above
\item \strong{dist.is.min}: ; see \code{h2o.prot.dists.tf}
above
\item \strong{SASA.and.minDist}: \code{TRUE}/\code{FALSE} indicating if the protein atom is
\emph{\strong{BOTH}} solvent accessible and at least the user defined number
of Angstroms from a water's oxygen atom; see \code{h2o.prot.dists.tf}
above
\item \strong{h2o.atom.ids}: Unique water atom ID
\item \strong{h2o.x}: Atom coordinate \code{X} for the water's oxygen atom
\item \strong{h2o.y}: Atom coordinate \code{Y} for the water's oxygen atom
\item \strong{h2o.z}: Atom coordinate \code{Z} for the water's oxygen atom
}
These values are returned in \code{df.nearby.prot.atoms} of the results of
\code{\link[=HydrophilicityEvaluation]{HydrophilicityEvaluation()}}
}
\description{
Calculate the number of solvent exposed protein atoms near a
water.
}
\details{
This function is called within \code{\link[=HydrophilicityEvaluation]{HydrophilicityEvaluation()}} to
determine protein atoms near each water oxygen.
This function is designed to work with the \code{\link[base:lapply]{base::lapply()}} function and
thus each \code{h2o.oi} is independently evaluated
}
\examples{
\dontrun{
getProtAtomsNearWater(h2o.oi = PDB.1hai.h2o.oi,
h2o.idc = PDB.1hai.clean.h2o.idc,
atoms.oi = PDB.1hai.aoi.clean.SASA,
h2o.prot.dists = PDB.1hai.h2o.prot.dists,
h2o.prot.dists.tf = PDB.1hai.h2o.prot.dists.tf)
}
}
\seealso{
Other "Hydrophilicity Evaluation" "Bound Water Environment": \code{\link{HydrophilicityEvaluation}},
\code{\link{calcAtomClassHydrophilicity}},
\code{\link{calcAtomHydrationEstimate}},
\code{\link{getResidueData}}
}
\author{
Emilio Xavier Esposito \email{emilio@exeResearch.com}
}
\concept{"Hydrophilicity Evaluation" "Bound Water Environment"}
|
1cd5455b61e1cb6633b510bd4502b409052c6618 | 5cb4a8af988878b929e9a34330d32150ec82eb0a | /server.R | 73928f5a1566979fc8eb6f4a26c2de811c9e9d2f | [] | no_license | gosaldar/data-product-project | 607aebd27018180f25baa1e886985d6c63a50c7a | 98a0a16484e8425b548e80660fa2a92e89f2f5d6 | refs/heads/master | 2021-01-10T10:57:00.363384 | 2015-09-27T10:53:18 | 2015-09-27T10:53:18 | 43,241,072 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,149 | r | server.R | library(shiny)
## Libraries needed for plotting, data processing and maps
library(ggplot2)
library(ggvis)
library(rCharts)
library(data.table)
library(dplyr)
library(reshape2)
library(markdown)
library(maps)
library(mapproj)
## Create helper functions
round2 <- function(x) round(x, 2)
replace_na <- function(x) ifelse(is.na(x), 0, x)
aggregate_by_year <- function(dt, year_min, year_max, evtypes) {
dt %>% filter(YEAR >= year_min, YEAR <= year_max, EVTYPE %in% evtypes) %>%
group_by(YEAR) %>% summarise_each(funs(sum), COUNT:CROPDMG) %>%
mutate_each(funs(round2), PROPDMG, CROPDMG) %>%
rename(
Year = YEAR, Count = COUNT,
Fatalities = FATALITIES, Injuries = INJURIES,
Property = PROPDMG, Crops = CROPDMG
)
}
aggregate_by_state <- function(dt, year_min, year_max, evtypes) {
states <- data.table(STATE=sort(unique(dt$STATE)))
aggregated <- dt %>% filter(YEAR >= year_min, YEAR <= year_max, EVTYPE %in% evtypes) %>%
group_by(STATE) %>%
summarise_each(funs(sum), COUNT:CROPDMG)
left_join(states, aggregated, by = "STATE") %>%
mutate_each(funs(replace_na), FATALITIES:CROPDMG) %>%
mutate_each(funs(round2), PROPDMG, CROPDMG)
}
compute_damages <- function(dt, category) {
dt %>% mutate(Damages = {
if(category == 'both') {
PROPDMG + CROPDMG
} else if(category == 'property') {
PROPDMG
} else {
CROPDMG
}
})
}
compute_affected <- function(dt, category) {
dt %>% mutate(Affected = {
if(category == 'both') {
INJURIES + FATALITIES
} else if(category == 'injuries') {
INJURIES
} else {
FATALITIES
}
})
}
plot_impact_by_state <- function (dt, states_map, year_min, year_max, fill, title, low = "#fef4ea", high = "#d84701") {
title <- sprintf(title, year_min, year_max)
p <- ggplot(dt, aes(map_id = STATE))
p <- p + geom_map(aes_string(fill = fill), map = states_map, colour='black')
p <- p + expand_limits(x = states_map$long, y = states_map$lat)
p <- p + coord_map() + theme_bw()
p <- p + labs(x = "Longitude", y = "Latitude", title = title)
p + scale_fill_gradient(low = low, high = high)
}
plot_impact_by_year <- function(dt, dom, yAxisLabel, desc = FALSE) {
impactPlot <- nPlot(
value ~ Year, group = "variable",
data = melt(dt, id="Year") %>% arrange(Year, if (desc) { desc(variable) } else { variable }),
type = "stackedAreaChart", dom = dom, width = 650
)
impactPlot$chart(margin = list(left = 100))
impactPlot$xAxis(axisLabel = "Year", width = 70)
impactPlot$yAxis(tickFormat = "#! function(d) {return d3.format(',0f')(d)} !#", axisLabel = yAxisLabel, width = 80)
impactPlot
}
plot_events_by_year <- function(dt, dom = "eventsByYear", yAxisLabel = "Count") {
eventsByYear <- nPlot(
Count ~ Year,
data = dt,
type = "lineChart", dom = dom, width = 650
)
eventsByYear$chart(margin = list(left = 100))
eventsByYear$xAxis(axisLabel = "Year", width = 70)
eventsByYear$yAxis(tickFormat = "#! function(d) {return d3.format('n')(d)} !#", axisLabel = yAxisLabel, width = 80)
eventsByYear
}
downloads <- function(dt) {
dt %>% rename(
State = STATE, Count = COUNT,
Injuries = INJURIES, Fatalities = FATALITIES,
Property.damage = PROPDMG, Crops.damage = CROPDMG
) %>% mutate(State=state.abb[match(State, tolower(state.name))])
}
## Load event and map data
dt <- fread('data/events.csv') %>% mutate(EVTYPE = tolower(EVTYPE))
evtypes <- sort(unique(dt$EVTYPE))
states_map <- map_data("state")
## Shiny server
shinyServer(function(input, output, session) {
values <- reactiveValues()
values$evtypes <- evtypes
output$evtypeControls <- renderUI({
checkboxGroupInput('evtypes', 'Event types', evtypes, selected=values$evtypes)
})
observe({
if(input$clear_all == 0) return()
values$evtypes <- c()
})
observe({
if(input$select_all == 0) return()
values$evtypes <- evtypes
})
# Datasets preparation
dt.agg <- reactive({
aggregate_by_state(dt, input$range[1], input$range[2], input$evtypes)
})
dt.agg.year <- reactive({
aggregate_by_year(dt, input$range[1], input$range[2], input$evtypes)
})
dataTable <- reactive({
downloads(dt.agg())
})
# Events by year
output$eventsByYear <- renderChart({
plot_events_by_year(dt.agg.year())
})
# Population impact by year
output$populationImpact <- renderChart({
plot_impact_by_year(
dt = dt.agg.year() %>% select(Year, Injuries, Fatalities),
dom = "populationImpact",
yAxisLabel = "Affected",
desc = TRUE
)
})
# Population impact by state
output$populationImpactByState <- renderPlot({
print(plot_impact_by_state (
dt = compute_affected(dt.agg(), input$populationCategory),
states_map = states_map,
year_min = input$range[1],
year_max = input$range[2],
title = "Population Impact %d - %d (People Affected)",
fill = "Affected"
))
})
# Economic impact by state
output$economicImpact <- renderChart({
plot_impact_by_year(
dt = dt.agg.year() %>% select(Year, Crops, Property),
dom = "economicImpact",
yAxisLabel = "Total Damage (Million US$)"
)
})
# Economic impact by state
output$economicImpactByState <- renderPlot({
print(plot_impact_by_state(
dt = compute_damages(dt.agg(), input$economicCategory),
states_map = states_map,
year_min = input$range[1],
year_max = input$range[2],
title = "Economic Impact %d - %d (Million US$)",
fill = "Damages"
))
})
# Render data table and create download handler
output$table <- renderDataTable(
{dataTable()}, options = list(searching = TRUE, pageLength = 10))
output$downloadData <- downloadHandler(
filename = 'stormdata.csv',
content = function(file) {
write.csv(dataTable(), file, row.names=FALSE)
}
)
})
|
4a17d1a5ee4e819bc1d8a907ea6ce755411b7ddb | 06b4a0acf81dca602eeee3fbc28429a135b4f868 | /man/plotReward.Rd | 0c52c0a6886fc39da81ff79daee7ba0124e9e71a | [] | no_license | rferrali/banditr | 54767888a89de1b6246b48a92d0120d70bf2afc2 | ccdc05a563a02f8fdf6a4838aa60ffb9456a1281 | refs/heads/master | 2021-01-23T06:14:33.679523 | 2017-07-26T13:06:02 | 2017-07-26T13:06:02 | 86,344,791 | 4 | 1 | null | 2018-02-20T03:13:14 | 2017-03-27T14:22:31 | R | UTF-8 | R | false | true | 785 | rd | plotReward.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plotReward}
\alias{plotReward}
\title{Plot the reward of a bandit object}
\usage{
plotReward(object, data = FALSE, cumulative = TRUE, expectedReward = TRUE,
maxReward, ...)
}
\arguments{
\item{object}{an object inheriting from class \code{"bandit"}}
\item{data}{logical: should the raw plot data be returned?}
\item{cumulative}{logical: should the cumulative reward be plotted?}
\item{expectedReward}{logical: should the expected reward be plotted?}
\item{maxReward}{logical: should the maximum reward be plotted? (only supported for
binomial models).}
\item{...}{additional parameters passed to \code{\link[graphics]{plot.default}}}
}
\description{
Plot the reward of a bandit object
}
|
a8803a01ecf49cb01228a0ac0974f0e4543dd2b7 | aa511de45582edbd995b479ca88e5352444dca3d | /src/chapt2/graphics.R | e628be867faf00533ceff02de0908fd8ced1006d | [] | no_license | nickbuker/ISL_labs | 8f81116c1616f60cf754839ab717b0642c5c27b5 | fd057bcc731699846f50a9cc3a24479716aacf55 | refs/heads/master | 2020-05-21T10:42:03.872220 | 2017-08-01T23:46:35 | 2017-08-01T23:46:35 | 84,619,389 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 932 | r | graphics.R | # plot() used to make scatter plots
x=rnorm(100)
y=rnorm(100)
plot(x, y, xlab='this is the x-axis',
ylab='this is the y-axix', main='Plot of x vs y')
# Save image as png, pdf etc, dev.off() indicates done plotting
png('../images/chapter2.1.png')
plot(x, y, xlab='this is the x-axis', col='green',
ylab='this is the y-axix', main='Plot of x vs y')
dev.off()
# seq() creates vector and n1:n2 is shorthand
x = seq(1, 10)
x = 1:10
y = seq(-pi, pi, length=50)
# contour() and image() used to create 3d plots (the latter is colored)
x = y
f = outer(x, y, function(x, y)cos(y)/(1+x^2))
contour(x, y, f)
# add layers the plots
contour(x, y, f, nlevels=45,add=TRUE)
fa = (f-t(f))/2
contour(x, y, fa, nlevels=15)
image(x, y, fa)
persp(x, y, fa)
persp(x, y, fa, theta=30)
png('../images/chapter2.2.png')
persp(x, y, fa, theta=30, phi=20)
dev.off()
persp(x, y, fa, theta=30, phi=70, col='blue')
persp(x, y, fa, theta=30, phi=40)
|
29764920eeb3ec0c027ce59efe7d19ec1f9928f4 | c2f658101b90bfdc466003f025113018f5d6344f | /install_some_pkgs_2.R | 79647e84d0650e213a7d1fd33848693483f71c79 | [] | no_license | miguel-conde/ds_pantry | 56902ac2ded9df6c69217f2d234ed8edf5fa008f | 6a9eb453cf0691f03daf4f7dcce2867a9388c222 | refs/heads/master | 2023-08-31T05:16:50.702374 | 2023-08-22T16:05:52 | 2023-08-22T16:05:52 | 244,453,925 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 685 | r | install_some_pkgs_2.R |
# https://stackoverflow.com/questions/66450454/how-to-update-all-packages-for-a-new-r-version-quickly-and-easily
mypks <- pacman::p_lib()
saveRDS(mypks, "~/tmp/mypks_R_3_6_3.rds")
mypks <- readRDS("~/tmp/mypks_R_3_6_3.rds")
install.packages(mypks)
# https://discourse.mc-stan.org/t/new-error-cleanup-makevar-old-argument-rmu-is-missing-with-no-default/18137/63
remove.packages(c("StanHeaders", "rstan"))
install.packages("StanHeaders", repos = c("https://mc-stan.org/r-packages/", getOption("repos")))
install.packages("rstan", repos = c("https://mc-stan.org/r-packages/", getOption("repos")))
devtools::install_version("withr", version="2.2.0", repo = "https://cran.rediris.es/")
|
d0103f15fd186d2afaf6806e7a10e831364d76a3 | 043a1e148a3c928f2dd84fd02370ab5a5a5d3406 | /server.R | 17fce5637e91e976a5da5a5e3797d2f9658145c7 | [] | no_license | ghettocounselor/googleBubbleChart | 8bd6e74657d2e09692bd82e995a6ef543217dfa4 | 4028d76520f3fb6377a267ffd627f93bc0a12f3c | refs/heads/master | 2020-03-23T21:11:42.393111 | 2019-03-25T02:25:19 | 2019-03-25T02:25:19 | 142,088,484 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,103 | r | server.R | # setwd("~/Data/shinny/gitHub_GoogleBubbleChart")
library(dplyr)
shinyServer(function(input, output, session) {
# Provide explicit colors for regions, so they don't get recoded when the
# different series happen to be ordered differently from year to year.
# http://andrewgelman.com/2014/09/11/mysterious-shiny-things/
defaultColors <- c("#3366cc", "#dc3912", "#ff9900", "#109618", "#990099", "#0099c6", "#dd4477")
series <- structure(
lapply(defaultColors, function(color) { list(color=color) }),
names = levels(data$Region)
# I'd really like to work out how to make this an input into the display, as in choose a region and have the graph be impacted.
)
# region selector
output$choose_region <- renderUI(
selectInput("data", "Region", choices = data.Regions, selected = NULL, multiple = TRUE)
)
# like this idea but not sure how to use it - basically if nothing selected, as is the begining state then use all fields for chart
if (is.null("choose_region"))
return(data.Regions)
yearData <- reactive({
# Filter to the desired year, and put the columns
# in the order that Google's Bubble Chart expects
# them (name, x, y, color, size). Also sort by region
# so that Google Charts orders and colors the regions
# consistently.
# HOVERCARD is defined by the 1st 4 options in the list
df <- data %>%
filter(Year == input$year) %>%
select(Country, Percent.ExpChange, Unit.ExpChange,
Region, Population, Health.Expenditure, Life.Expectancy ) %>%
arrange(Region)
})
output$chart <- reactive({
# Return the data and options
list(
data = googleDataTable(yearData()),
options = list(
title = sprintf(
"TITLE OF PLAYGROUND GRAPH",
input$year),
series = series
# stuff from UI
# fluidRow(
# shiny::column(4, offset = 4,
# sliderInput("year", "Year",
# min = min(data$Year), max = max(data$Year),
# value = min(data$Year), animate = TRUE)
)
)
})
}) |
90527446b65d4bf8859ddb285d902cd517e3653d | ef424746a3ea4ed6e167f03d359b39da48a0fc21 | /man/oneStep_MicroEL4PSite.Rd | 2b27bcd4d0fc9a97464e3ac6ceea018bf6590db7 | [] | no_license | smitdave/MASH | 397a1f501c664089ea297b8841f2cea1611797e4 | b5787a1fe963b7c2005de23a3e52ef981485f84c | refs/heads/master | 2021-01-18T18:08:25.424086 | 2017-08-17T00:18:52 | 2017-08-17T00:18:52 | 86,845,212 | 0 | 3 | null | 2017-08-17T00:18:52 | 2017-03-31T17:42:46 | R | UTF-8 | R | false | true | 693 | rd | oneStep_MicroEL4PSite.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AQUA-Micro-EL4P-Methods.R
\name{oneStep_MicroEL4PSite}
\alias{oneStep_MicroEL4PSite}
\title{MICRO \code{\link{AquaticSite}} Method: EL4P One Day Dynamics}
\usage{
oneStep_MicroEL4PSite(tNow)
}
\arguments{
\item{tNow}{current global time of \code{\link{MicroTile}}}
}
\description{
Collect eggs from the \code{\link{EggQ}} in this site and move \code{\link{EL4P}}, and run the one step difference equation dynamics for aquatic development,
finally, move emerging adults from the pool into the \code{\link{ImagoQ}} for all genotypes.
\itemize{
\item This method is bound to \code{AquaticSite$oneStep_EL4PSite()}.
}
}
|
362f55632ed8d5db0fe2a06f3c02038e9950e403 | 8863faa181d81a42965abaebe5cc1f03302935e5 | /ui.R | 8bb6224d0b492326f608607260dffcb7201c1613 | [] | no_license | gagejustins/hipsterLocator | 0b43c97ccc86641da0854ab4346f29b13defc353 | 3bc6bd704cc118cf87b5b529c069cd17eda38c82 | refs/heads/master | 2021-05-05T19:01:33.630140 | 2018-01-17T00:00:18 | 2018-01-17T00:00:18 | 117,757,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 610 | r | ui.R | # Use a fluid Bootstrap layout
fluidPage(
# Give the page a title
titlePanel("Elements of a Hipster State"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("data", "Dataset:",
choices=c("Population", "Electric Cars", "Farmers Markets")),
hr(),
helpText("Data from NYC Open Data Project. Thanks to Julia Silge for the general idea and choroplethR syntax.")
),
# Create a spot for the choropleth map
mainPanel(
plotOutput("main_plot")
)
)
) |
64e1e7f555818e08f673375f75f58a51b0f209a0 | cfb3751c5614a1b512265fb80e49e18f614bb18e | /Multiple_factor_EdgeR_analysis.R | c293a9c087d3d6a7b17ae7cca995b603f293900a | [] | no_license | jamestaylorjr/Trichoderma_Transcriptome_Metabolome | bfdc764655b612b343470d2f77a06a93515b2da7 | 247fe5c2dda904744ae94f4c9ae37261736be634 | refs/heads/master | 2020-03-23T11:13:16.648885 | 2018-11-01T14:36:43 | 2018-11-01T14:36:43 | 141,491,715 | 0 | 0 | null | 2018-11-01T14:36:44 | 2018-07-18T21:23:09 | Python | UTF-8 | R | false | false | 3,751 | r | Multiple_factor_EdgeR_analysis.R | #Multiple factor EdgeR analysis
#James T. Taylor, Jr 2018
setwd("C:/Users/jimta/Desktop/")
library(edgeR)
library(readxl)
path_to_phenotypes = "C:/Users/jimta/Desktop/jim_ballgown/pheno_data.csv"
counts <- read.csv(file="transcript_count_matrix.csv", header=TRUE, sep=",") #Then create the count matrix using read.csv()
cmat <- counts[ , -c(1,ncol(counts)) ] #format the data
rownames(cmat) <- counts[ , 1 ] # add gene names to new object
libsize <- colSums(cmat) # calculate library size in each sample
libmr <- libsize/1e06 #library size in millions of reads
cmat <- cmat[rowSums(cmat > 10) >= 3,] #keep only those rows where there are at least 10 counts in at least 3 samples
sample.description <- data.frame(sample=colnames(cmat))
genotype=regmatches(colnames(cmat),regexpr("fungus|fungus_and_maize",colnames(cmat))) #define genotypes for each column
treatment=regmatches(colnames(cmat),regexpr("6|12|15|24|36",colnames(cmat))) #define treatment variable for each column
#time to build the object for edgeR
batches <- genotype
conditions <- treatment
#The pipeline could look like this:
dge <- DGEList(cmat, group = batches ) # Create object
dge <- calcNormFactors(dge, method='TMM') # Normalize library sizes using TMM
dge <- dge[rowSums(1e+06 * dge$counts/expandAsMatrix(dge$samples$lib.size, dim(dge)) > 1) >= 6, ] #keep only those genes that have at least 1 read per million in at least 6 samples.
targets <- read.csv(file = path_to_phenotypes,sep = ',')
Group <- factor(paste(targets$treatment,targets$time,sep = '.'))
targets <- cbind(targets,Group=Group)
design <- model.matrix(~0+Group)
colnames(design) <- levels(Group)
dge <- estimateGLMCommonDisp(dge, design) # Estimate common dispersion
dge <- estimateGLMTrendedDisp(dge,design) #Estimate Trended dispersion
dge <- estimateGLMTagwiseDisp(dge, design)
my.contrasts <- makeContrasts(
fungus6hvs.maize6h = negative.6-positive.6,
fungus12hvs.maize12h = negative.12-positive.12,
fungus15hvs.maize15h = negative.15-positive.15,
fungus24hvs.maize24h = negative.24-positive.24,
fungus36hvs.maize36h = negative.36-positive.36,
maize12hvs.maize6h = positive.12-positive.6,
maize15hvs.maize12h = positive.15-positive.12,
maize24hvs.maize15h = positive.24-positive.15,
maize36hvs.maize24h = positive.36-positive.24,
fungus12hvs.fungus6h = negative.12-negative.6,
fungus15hvs.fungus12h = negative.15-negative.12,
fungus24hvs.fungus15h = negative.24-negative.15,
fungus36hvs.fungus24h = negative.36-negative.24,
levels=design
)
fit <- glmQLFit(dge, design)
lrt <- glmLRT(fit,contrast=my.contrasts)
out <- topTags(lrt,n=Inf)
mart_export <-read_excel("C:/Users/jimta/Downloads/mart_export.xls")
out$table$JGI <- mart_export$GeneID[match(row.names(out$table),mart_export$transcriptNames)]
#import signalp data from jgi
signalp <- read.delim("C:/Users/jimta/Downloads/Tvirens_v2.signalp_FrozenGeneCatalog_20100318.tab")
#fix gene numbers so they match current format
signalp$proteinid <- paste("TRIVIDRAFT_",signalp$proteinid, sep="")
#add signalp data to results file
out$table$signalp <- signalp$hmm_signalpep_probability[match(out$table$JGI,signalp$proteinid)]
kogg <- read.delim("C:/Users/jimta/Downloads/Tvirens_v2.koginfo_FrozenGeneCatalog_20100318.tab")
goinfo <- read.delim("C:/Users/jimta/Downloads/Tvirens_v2.goinfo_FrozenGeneCatalog_20100318.tab")
kogg$proteinId <- paste("TRIVIDRAFT_",kogg$proteinId,sep = "")
out$table$kogg <- kogg$kogdefline[match(out$table$JGI,kogg$proteinId)]
out$table$koggcat <- kogg$kogClass[match(out$table$JGI,kogg$proteinId)]
goinfo$proteinId <- paste("TRIVIDRAFT_",goinfo$proteinId,sep = "")
out$table$goinfo <- goinfo$goName[match(out$table$JGI,goinfo$proteinId)]
write.csv(out,file="edgeR_with_kog_unfiltered.csv")
|
bd5114b9212d35a5dd9a291ceb47b7aa98d457d1 | 5adc0dfe6cae8f90cc20cd149bf03852b0396e34 | /R/fertilizer_measurements.R | 56877f8cf1f4bdd9a65df4f33413fd01563b1ca7 | [
"MIT"
] | permissive | AGROFIMS/ragrofims | 43664011980affa495c949586bde192d08d4b48e | bc560a62c19c30bbc75615a19a4b9f8a235f7ddf | refs/heads/master | 2023-02-21T08:49:34.989861 | 2021-01-20T16:22:48 | 2021-01-20T16:22:48 | 277,626,238 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,791 | r | fertilizer_measurements.R | #' Get fertilizer product data from AgroFIMS platform
#'
#' @param expsiteId experiment-site Id or expsiteId
#' @param format type of data structure format
#' @param serverURL database server URL
#' @param version api version
#' @examples \dontrun{
#' library(ragapi)
#' library(ragrofims)
#' out<- get_agrofims_fertproducts(25, "data.frame")
#' }
#' @importFrom ragapi ag_get_cropmea_expsiteId ag_get_phenomea_expsiteId ag_get_soil_expsiteId ag_get_soil_expsiteId
#' @export
#'
get_agrofims_fertproducts <- function(expsiteId=NULL,
format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version = "/0291/r"
)
{
fertproduct <- ag_get_fertmea_expsiteId(
expsiteDbId = expsiteId,
format = format,
serverURL = serverURL,
version = version)
if(nrow(fertproduct)==0){
out <- data.frame()
return(out)
}
fertproduct <- fertproduct %>% dplyr::filter(typefertilizer=="Product")
if(nrow(fertproduct)>0){
#Calculation will ommit mising values.
crop <- unique(fertproduct$cropcommonname)
out <- vector("list",length = length(crop))
vars <- vector("list",length = length(crop))
#Separate and mutate elementlist columns in multiple nutrient columns
fertproduct <- ragrofims::mutate_crop_names(fertproduct)
fertproduct <- mutate_fertprod_nutelement(fertproduct)
#Transform to numeric
fertproduct[,"unitvalue"] <- as.numeric(fertproduct[,"unitvalue"])
#fertilizer <- calc_nutamount(fertilizer)
out <- fertproduct %>% dplyr::filter(unitvalue!="")
} else {
out <- data.frame()
}
return(out)
}
#' Mutate nutrient element list
#' @description AgroFIMS retrieve in one column all the nutrient elements concatenated by pipes \code{|}. This function splits in multiple
#' columns where each columns is a nutrient element respectively.
#' @param fertilizer data.frame table of fertilizer
#' @importFrom purrr map_at
#' @importFrom tidyr separate
#' @export
#'
mutate_fertprod_nutelement <- function(fertilizer){
fertilizer <- fertilizer %>% tidyr::separate(col = "elementlist",
into = c("N","P", "K","Ca","Mg","S","Mb", "Zn", "B", "Cu", "Fe", "Mn" ,"Ni","Cl"),
sep = "\\|"
)
fertilizer <- fertilizer %>%
purrr::map_at(.at = c("N","P", "K","Ca","Mg","S","Mb", "Zn", "B", "Cu", "Fe", "Mn" ,"Ni","Cl"), .f = as.numeric) %>%
as.data.frame(stringsAsFactors=FALSE)
}
#' Calculation of nutrient amounts based on fertilizer products ()
#' @param fertilizer data.frame fertilizer table which includes fertilizer products and nutrient elements
#' @importFrom dplyr mutate
#' @export
#'
calc_nutamount <- function(fertilizer){
if(nrow(fertilizer)>0){
meta_attributes <- c("indexorder","productvalue", "unit")
nut_names <- c("N","P", "K","Ca","Mg","S","Mb", "Zn", "B", "Cu", "Fe", "Mn" ,"Ni","Cl")
fertilizer <- fertilizer %>% mutate(N = (unitvalue*N)/100 ) %>%
mutate(P = (unitvalue*P)/100) %>%
mutate(K = (unitvalue*K)/100) %>%
mutate(Ca = (unitvalue*Ca)/100) %>%
mutate(Mg = (unitvalue*Mg)/100) %>%
mutate(S = (unitvalue*S)/100) %>%
mutate(Mb = (unitvalue*Mb)/100) %>%
mutate(Zn = (unitvalue*Zn)/100) %>%
mutate(B = (unitvalue*B)/100) %>%
mutate(Cu = (unitvalue*Cu)/100) %>%
mutate(Fe = (unitvalue*Fe)/100) %>%
mutate(Mn = (unitvalue*Mn)/100) %>%
mutate(Ni = (unitvalue*Ni)/100)
fertilizer <- fertilizer[,c(meta_attributes, nut_names)]
} else {
fertilizer <- data.frame()
}
}
# test(names(calc_nutamount(out)), "calcnutamount")
# test(names(calc_nutamount(out)), c(12,23,NA,NA))
# test(names(calc_nutamount(out)), c(12,23,12,12))
#' Get fertilizer product table by crop
#' @param fertproducts data.frame fertilizer product table
#' @param crop character crop name
#' @export
#'
get_fertproducts_crop <- function(fertproducts, crop){
if(nrow(fertproducts)==0){
out <- data.frame()
} else if(nrow(fertproducts)>0) {
fertproducts <- fertproducts %>% filter(cropcommonname == crop)
#Check if the crop is in the table
if(nrow(fertproducts)>0){
out <- fertproducts
}else { #do not find the crop in the table
out <- data.frame()
}
}
return(out)
}
|
0d1bb0e0f01fec1e38e34e5da4797f60d2cd78b4 | 2cd4292520d1376bbd909c030ae9b4e1ffcf9014 | /data/create_alc.R | 7edb144c2c5aa7ed0885e95ba35dc2e1c8b1ae88 | [] | no_license | Sirke/IODS-project | 29b06e6688d12814f28779ba53d53fe85e878c46 | 94163a4c36242d6cb029f1b8aafb298ffb22adf5 | refs/heads/master | 2020-04-02T16:43:14.904162 | 2018-12-06T19:43:18 | 2018-12-06T19:43:18 | 154,625,518 | 0 | 0 | null | 2018-10-25T07:07:01 | 2018-10-25T07:07:01 | null | UTF-8 | R | false | false | 2,630 | r | create_alc.R | #Sirke Piirainen
#12.11.2018
#exercise 3, creating alc
#data from: https://archive.ics.uci.edu/ml/datasets/Student+Performance
#read in the data
mat<-read.csv("student-mat.csv",header = T, sep = ";")
por<-read.csv("student-por.csv",header = T, sep = ";")
#check structure and dimensions of the data
str(mat)
str(por)
#in Rstudio I can see dimensions of the data from the environment window also
dim(mat)
dim(por)
##############################################################################################
#joining datasets
# access the dplyr library
library(dplyr)
# common columns to use as identifiers
join_by <- c("school","sex","age","address","famsize","Pstatus","Medu","Fedu","Mjob","Fjob","reason","nursery","internet")
# join the two datasets by the selected identifiers
math_por <- inner_join(mat, por, by = join_by,suffix=c(".math",".por"))
#check structure
str(math_por)
#and dimensions
dim(math_por)
#####################################################################################
#combining duplicated answers
# print out the column names of 'math_por'
colnames(math_por)
# create a new data frame with only the joined columns
alc <- select(math_por, one_of(join_by))
# the columns in the datasets which were not used for joining the data
notjoined_columns <- colnames(mat)[!colnames(mat) %in% join_by]
# print out the columns not used for joining
notjoined_columns
# for every column name not used for joining...
for(column_name in notjoined_columns) {
# select two columns from 'math_por' with the same original name
two_columns <- select(math_por, starts_with(column_name))
# select the first column vector of those two columns
first_column <- select(two_columns, 1)[[1]]
# if that first column vector is numeric...
if(is.numeric(first_column)) {
# take a rounded average of each row of the two columns and
# add the resulting vector to the alc data frame
alc[column_name] <- round(rowMeans(two_columns))
} else { # else if it's not numeric...
# add the first column vector to the alc data frame
alc[column_name] <- first_column
}
}
# glimpse at the new combined data
glimpse(alc)
###################################################################################
#creating new variables
# define a new column alc_use by combining weekday and weekend alcohol use
alc <- mutate(alc, alc_use = (Dalc + Walc) / 2)
# define a new logical column 'high_use' which is TRUE if alc_use is greater than 2.
alc <- mutate(alc, high_use = alc_use > 2)
#check that everything is correct
glimpse(alc)
#save the data in csv
write.csv(alc,"alc.csv",row.names = F)
|
df32779feb10d7b2bb903aba60a709c387d51961 | f81ac43a1d02013a9cb9eebc2a7d92da4cae9169 | /tests/testthat/test_with_data.table.R | c56fe2c4372d4500191ce72e9574dbef0eef01bd | [] | no_license | gdemin/expss | 67d7df59bd4dad2287f49403741840598e01f4a6 | 668d7bace676b555cb34d5e0d633fad516c0f19b | refs/heads/master | 2023-08-31T03:27:40.220828 | 2023-07-16T21:41:53 | 2023-07-16T21:41:53 | 31,271,628 | 83 | 15 | null | 2022-11-02T18:53:17 | 2015-02-24T17:16:42 | R | UTF-8 | R | false | false | 379 | r | test_with_data.table.R | context("data.table vec ops")
dt2 = data.table(
zz = 42,
b_3 = NA,
aa = 10 %r% 5,
b_ = 20 %r% 5,
b_1 = 11 %r% 5,
b_2 = 12 %r% 5,
b_4 = 14 %r% 5,
b_5 = 15 %r% 5
)
expect_identical(dt2 %n_i% "b_", dt2[,"b_"])
expect_identical(dt2 %n_d% "b_", dt2[,-"b_"])
expect_identical(dt2 %i% anyNA, dt2[,"b_3"])
expect_identical(dt2 %d% anyNA, dt2[,-"b_3"]) |
208664e9b203e32eb4dbfbe99bf7bbc08171876e | 15596649781b0ab5a15a95e7757fee4294b4ea4f | /homework03/code/problem1.r | 398678b3438fe933e7d9f4caa23e96d4f6ec3916 | [] | no_license | kjoyce/applied_linear_models | 422979ddd9765b1d5a21bfef49b55605c5da8813 | 2f8f7d324e7f6cb1f289e80baae8b6d6a1b490fe | refs/heads/master | 2021-01-22T13:41:45.143842 | 2013-12-10T19:11:16 | 2013-12-10T19:11:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,818 | r | problem1.r | library(faraway)
library(ellipse)
data(prostate)
attach(prostate)
mdl = lm(lpsa~lcavol+lweight+age+lbph+svi+lcp+gleason+pgg45,data=prostate)
# a
print(sprintf("(%.4f,%.4f)",confint(mdl,level=.9)[4,1],confint(mdl,level= .9)[4,2]) )
print(sprintf("(%.4f,%.4f)",confint(mdl,level=.95)[4,1],confint(mdl,level= .95)[4,2]) )
print(sprintf("(%.4f,%.4f)",confint(mdl,level=1-.1/2)[4,1],confint(mdl,level= 1-.1/2)[4,2]) )
print(sprintf("(%.4f,%.4f)",confint(mdl,level=1-.05/2)[4,1],confint(mdl,level=1-.05/2)[4,2]) )
# b
plot(ellipse(mdl,c(4,5)),type="l",lwd=2, # Plots the conf. region
cex.axis=1.5,xlab="Beta3 (Age)",
ylab="Beta4 (lbph)",cex.lab=1.6,mgp=c(2.7,1,0)#,xlim=c(-.005,.01)
)
title("Joint Confidence Region for Age and LBPH")
points(0,0,pch=16,cex=2) # Plot the origin
text(0,0,'(0,0)',cex=1.6,pos=2)
dev.copy(pdf,'conf_region_1b.pdf')
dev.off()
# c
n = length(mdl$residuals)
p = length(mdl$coefficients)
x_0 = c(1, 1.44692 , 3.62301 , 65.00000 , 0.30010 , 0.00000 , -0.79851 , 7.00000 , 15.00000)
xtx = t(model.matrix(mdl))%*%model.matrix(mdl)
(yhat = (mdl$coefficients%*%x_0))
(me = qt(.975,n-p)*sqrt(deviance(mdl)/(n-p)*t(x_0)%*%solve(xtx,x_0)))
cat("(",sprintf("%.3f",yhat + c(-1,1)*me),")")
# d
x_1 = c(1, 1.44692 , 3.62301 , 20 , 0.30010 , 0.00000 , -0.79851 , 7.00000 , 15.00000)
(yhat_1 = (mdl$coefficients%*%x_1))
(me_1 = qt(.975,n-p)*sqrt(deviance(mdl)/(n-p)*c(x_1)%*%solve(xtx,x_1)))
cat("(",sprintf("%.3f",yhat_1 + c(-1,1)*me_1),")\n")
hist(age,cex.axis=1.6,cex.lab=1.5)
dev.copy(pdf,'age_hist.pdf')
dev.off()
# e
nperm = 4999
F1 = summary(mdl)$coefficients[4,3]^2 # Numerator to F test
numerator = sum(replicate(nperm, summary(lm(lpsa~lcavol+lweight+sample(age)+lbph+svi+lcp+gleason+pgg45))$coef[4,3]^2 > F1))
(p = (numerator+1)/(nperm+1))
|
005b535d5cdcba76ed2f45bd616a09cd7e44b6ff | 33e69ff9d17fd65a39e6f388085dca4fe4703126 | /man/transformestimates.exp.Rd | 74c69896670d5ed3f452331432fad0dd0aad9e4b | [] | no_license | ssouyris/spatsurv | 3451d1cbf8a931a9dbcaa06c20d79a118feb6f53 | 97d16c9a8262a3d19e11bd9c1c98553f71bcb8c4 | refs/heads/master | 2020-12-24T11:52:36.642276 | 2014-06-13T10:46:15 | 2014-06-13T10:46:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 451 | rd | transformestimates.exp.Rd | \name{transformestimates.exp}
\alias{transformestimates.exp}
\title{transformestimates.exp function}
\usage{
transformestimates.exp(x)
}
\arguments{
\item{x}{a vector of paramters}
}
\value{
the transformed parameters. For the exponential model this
is just the identity.
}
\description{
A function to transform estimates of the parameters of the
exponential baseline hazard function, so they are
commensurate with R's inbuilt density functions.
}
|
eb579a0c7afb40395c377c662171fbe3764774a2 | 1bfadb58b266929c359a1a4c3afd9f9ce694228d | /man/state.carto.Rd | 5222a621ee07b1da7b877226c695bf30e3530abb | [] | no_license | adeckmyn/maps | 25c3396c94adfb70262449f29f1fd77ff9583f29 | dc1eefb14d155fca33ace2333c490f5fd52eeb34 | refs/heads/master | 2022-11-11T17:56:03.194633 | 2022-11-01T10:28:10 | 2022-11-01T10:28:10 | 40,031,227 | 29 | 10 | null | 2021-09-24T14:20:18 | 2015-08-01T01:27:16 | R | UTF-8 | R | false | false | 1,424 | rd | state.carto.Rd | \name{state.carto}
\docType{data}
\alias{state.carto}
\alias{state.cartoMapEnv}
\alias{state.carto.center}
\title{
United States State Population Cartogram Map
}
\usage{
data(stateMapEnv)
data(state.carto.center)
}
\description{
This database produces a cartogram of the states of the United States
mainland based on CartoDraw, roughly proportional to population (see
references).
\code{state.carto.center} are coordinates of the state centers
for annotation purposes.
}
\format{
The data file is merely a character string which
specifies the name of an environment variable which contains the
base location of the binary files used by the map drawing functions.
This environment variable (\code{R_MAP_DATA_DIR} for the datasets in the
maps package) is set at package load time \emph{if it does not
already exist}. Hence setting the environment variable before loading
the package can override the default location of the binary datasets.
}
\seealso{
\code{\link{map}}.
}
\examples{
map('state.carto', fill = TRUE, col = palette())
}
\references{
Richard A. Becker, and Allan R. Wilks,
"Maps in S",
\emph{AT&T Bell Laboratories Statistics Research Report [93.2], 1993.}
Richard A. Becker, and Allan R. Wilks,
"Constructing a Geographical Database",
\emph{AT&T Bell Laboratories Statistics Research Report [95.2], 1995.}
CartoDraw,
\url{http://www.computer.org/csdl/trans/tg/2004/01/v0095-abs.html}
}
\keyword{datasets}
|
9dc5f0db6d9bfdcc4f072b113c2639de37eaa36a | b8c40ff8d1fc1b0e66aaf4e7a7e70da22b711cef | /man/make_clinical_events_db.Rd | 15acf828ad01bc9df59b54b181299a781f2561b7 | [
"MIT"
] | permissive | rmgpanw/ukbwranglr | 4f25d74583bd260b90431fde32435742458e9591 | a6136c0188eb53066a2ced5da9f110212847c411 | refs/heads/main | 2023-05-23T19:49:36.244874 | 2023-03-29T11:08:10 | 2023-03-29T11:08:10 | 308,650,225 | 9 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,209 | rd | make_clinical_events_db.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqlite_db.R
\name{make_clinical_events_db}
\alias{make_clinical_events_db}
\title{Create a SQLite database with a \code{clinical_events} table}
\usage{
make_clinical_events_db(
ukb_main_path,
ukb_db_path,
ukb_main_delim = "auto",
gp_clinical_path = NULL,
gp_scripts_path = NULL,
ukb_data_dict = get_ukb_data_dict(),
ukb_codings = get_ukb_codings(),
overwrite = FALSE,
chunk_size = 5e+05
)
}
\arguments{
\item{ukb_main_path}{Path to the main UKB dataset file.}
\item{ukb_db_path}{Path to the SQLite database file. The file name must end
with '.db'. If no file with this name exists then one will be created.}
\item{ukb_main_delim}{Delimiter for \code{ukb_main_path}. Default value is
\code{"auto"}.}
\item{gp_clinical_path}{(Optional) path to the UKB primary care clinical
events file (\code{gp_clinical.txt}).}
\item{gp_scripts_path}{(Optional) path to the UKB primary care prescriptions
file (\code{gp_scripts.txt}).}
\item{ukb_data_dict}{The UKB data dictionary (available online at the UK
Biobank
\href{https://biobank.ndph.ox.ac.uk/showcase/exinfo.cgi?src=AccessingData}{data
showcase}. This should be a data frame where all columns are of type
\code{character}.}
\item{ukb_codings}{The UKB codings file (available online at the UK Biobank
\href{https://biobank.ndph.ox.ac.uk/showcase/exinfo.cgi?src=AccessingData}{data
showcase}. This should be a data frame where all columns are of type
\code{character}.}
\item{overwrite}{If \code{TRUE}, then tables \code{clinical_events} and
\code{gp_clinical_values} will be overwritten if they already exist in the
database. Default value is \code{FALSE}.}
\item{chunk_size}{The number of rows to include in each chunk when processing
primary care datasets.}
}
\value{
Returns \code{ukb_db_path} invisibly.
}
\description{
Adds tables named \code{clinical_events}, and optionally 'gp_clinical_values' and
'gp_scripts_names_and_quantities' to a SQLite database file (the latter 2 are
only added if \code{gp_clinical_path} and/or \code{gp_scripts_path} respectively are
provided). This is a long format table combining all clinical events data
from a UK Biobank main dataset and the UK Biobank primary care clinical
events dataset. Use \code{\link[=clinical_events_sources]{clinical_events_sources()}} to see a list of all
currently included clinical events sources. Expect this to take ~1 hour to
finish running.
}
\details{
See the \href{https://dbplyr.tidyverse.org/articles/dbplyr.html}{introduction to dbplyr} vignette for
getting started with databases and \link[dplyr:dplyr-package]{dplyr::dplyr}.
Indexes are set on the \code{source}, \code{code} and \code{eid} columns in the
\code{clinical_events} table for faster querying.
}
\examples{
# dummy UKB data dictionary and codings
dummy_ukb_data_dict <- get_ukb_dummy("dummy_Data_Dictionary_Showcase.tsv")
dummy_ukb_codings <- get_ukb_dummy("dummy_Codings.tsv")
# file paths to dummy UKB main and primary care datasets
dummy_ukb_main_path <- get_ukb_dummy(
"dummy_ukb_main.tsv",
path_only = TRUE
)
dummy_gp_clinical_path <- get_ukb_dummy(
"dummy_gp_clinical.txt",
path_only = TRUE
)
dummy_gp_scripts_path <- get_ukb_dummy(
"dummy_gp_scripts.txt",
path_only = TRUE
)
# file path where SQLite database will be created
dummy_ukb_db_path <- file.path(tempdir(), "ukb.db")
# build database
suppressWarnings(make_clinical_events_db(
ukb_main_path = dummy_ukb_main_path,
gp_clinical_path = dummy_gp_clinical_path,
gp_scripts_path = dummy_gp_scripts_path,
ukb_db_path = dummy_ukb_db_path,
ukb_data_dict = dummy_ukb_data_dict,
ukb_codings = dummy_ukb_codings,
))
# connect to database
con <- DBI::dbConnect(
RSQLite::SQLite(),
dummy_ukb_db_path
)
ukbdb <- db_tables_to_list(con)
# table names
names(ukbdb)
# view tables
ukbdb$clinical_events
ukbdb$gp_clinical_values
ukbdb$gp_scripts_names_and_quantities
# close database connection
DBI::dbDisconnect(con)
}
\seealso{
Other clinical events:
\code{\link{clinical_events_sources}()},
\code{\link{example_clinical_codes}()},
\code{\link{extract_phenotypes}()},
\code{\link{tidy_clinical_events}()}
}
\concept{clinical events}
|
fd7e1b348eec0dde89fe7ef3f8b8a9ee120ac615 | 12619e5af30c95993c1470a1d16b1a508a9d4204 | /mapuloh/R/coord_lookup.R | ee15682e18b72fc0f1d70249dc8041910c72cdbb | [] | no_license | nusrathfatima/advanced-r-lab-5-liu | ab0ec87ced19c0cbfd211a1b129d7679838292fd | f0dab20b9dac8f7010912b5c41ab1cb4b0f10009 | refs/heads/master | 2021-06-04T14:53:49.684430 | 2016-09-29T14:43:10 | 2016-09-29T14:43:10 | 104,630,369 | 1 | 0 | null | 2017-09-24T08:40:33 | 2017-09-24T08:40:32 | null | UTF-8 | R | false | false | 1,661 | r | coord_lookup.R | #' @title Coordinate finder
#'
#' @description The 'coord_lookup' function is a function that provides the user the coordinates of a given
#' address. The funtion uses the google geocode API to retrieve the result.
#'
#' @param address A character string that includes address components
#'
#' @return A dataframe cointaining Full address names as well as the coordinates
#'
#' @examples
#' coord_lookup(address = "Mäster Mattias väg, Linköping")
#'
#'
#' @export
coord_lookup <- function(address = NULL){
require("httr")
#Intial
if(is.null(address) | !is.character(address)){
stop("Use character values")
}
#Creating query and using GET verb
address <- gsub("[[:space:]]","+",address)
url <- "https://maps.googleapis.com/maps/api/geocode/json?address="
key <- "&key=AIzaSyCcRPdN_sAcwiovz7EPAq31l5cFIxp-aW4"
url <- paste0(url,address,key)
get_res <- httr::GET(url)
#Checking for status code
if(!grepl("^2",httr::status_code(get_res))){
stop(httr::message_for_status(get_res))
} else {
#Handlig result and returning
content_res <- httr::content(get_res)
geometry_res <- lapply(content_res[["results"]], "[[", "geometry")
coord <- as.data.frame(t(sapply(geometry_res, "[[", "location")))
full_adress <- unlist(lapply(content_res[["results"]], "[[", "formatted_address"))
res_df <- data.frame("Full_address" = full_adress,
"lat" = unlist(coord[,1]),
"lng" = unlist(coord[,2]),
stringsAsFactors = FALSE)
rownames(res_df) <- NULL
if(nrow(res_df) == 0){
stop("No match found")
}
return(res_df)
}
}
|
7ce52cf1d66106581b6b4c19e556d3080342f7ce | 34a18d8886baac7cd57ef9ae7977142ec016546c | /opdracht_10/opdracht_10.R | 5c92500d6b86c935293fe45a2af2878ef431c3a6 | [] | no_license | EddyvonB/project_simulaties | 81a9e18f6256d3414e9d726075649c905a90f1c8 | b9a8f247222e0ec10564fac201ccc1e1f67ff960 | refs/heads/master | 2020-04-15T16:56:33.023138 | 2016-06-27T13:16:43 | 2016-06-27T13:16:43 | 35,145,338 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 575 | r | opdracht_10.R | ##Opdracht 10a)
Z(T+alpha(T)) = Z(t) – 5/1000 Z(t) *delta(t)
opgave 10c
dZ/dt = (-d/1000) * Z(t)
dZ/Z(t) = (-d*t)/1000
Primitieveren -->
ln|Z| = (-t*d)/1000 + C
Z(0) = 0,050
Ln(0,050)
C = ln(0,050)
ln(Z) = (-t*d)/1000 + ln(0,050)
Z = e^(-t*d/1000) + 0,050
Uitstroomsnelheid = 13
opgave 10e
Z(t) = zoutconcentratie
Gewicht van zout dat overblijft = (1000-2,5*(t+delta(t))) * Z(t+delta(t))
Uitstroom = 7,5*Z(t) * delta(t)
Instroom = (1000-2,5*t)*Z(t)
Z(t+delta(t)) =( (1000-2,5*t)*Z(t) -7,5 *Z(t)*delta(t))/ (1000- 2,5*(t+delta(t)))
|
f2538e0c10896e3ed5457d5faa2d3d6b325d0e87 | 16d4b34e90bddcaa18e11b6145dd8845ca36cd1e | /source code/Frees - PRIDIT - Book.R | b8faeb6661c0747b270daaf5c0b84640fbbcde56 | [] | no_license | daclapp/iCASexam | 5deb91d2bb751abe7ab5d4700cfaab83a1abb00c | c2bfa596739a8ad7e12601c27a8a8043ecb0b970 | refs/heads/master | 2021-03-30T20:35:01.117931 | 2019-10-27T16:57:28 | 2019-10-27T16:57:28 | 124,601,888 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,810 | r | Frees - PRIDIT - Book.R | #https://instruction.bus.wisc.edu/jfrees/jfreesbooks/PredictiveModelingVol1/unsupervised-predictive-modeling-methods/v2-chapter-7.html
# Code to calculate RIDITs and PRIDITs on Questionable Claims Data
# Read in questionable claims data
# this version of data has dependent var and is 1500 lines
mydata1<-read.csv("data/SimPIP.csv",header=TRUE)
# get variable names
names(mydata1)
# get number of rows and columns
nrow(mydata1)
ncol(mydata1)
table(Suspicion,legalrep)
mydata=mydata1[,3:27]
mydata[1:5,]
####### PRIDIT #######
totalcols = ncol(mydata)
totalrows = nrow(mydata)
# create data frame to hold the RIDITS
riditsheet = data.frame(matrix(nrow=totalrows,ncol=totalcols))
# use same variable names as for original data
names(riditsheet) = names(mydata)
for (i in 1:totalcols) {
i=1
worksheet = data.frame(table(mydata[,i])) # converts results of frequency table to data frame
temp = cumsum(worksheet$Freq) # cumulative sum of frequwncies from table
worksheet$CumSum = union(0,temp[-length(temp)]) # cumulative sum of all prior freq table entries
# compute RIDIT
worksheet$Ridit = (0.5*worksheet$Freq + worksheet$CumSum)/totalrows
worksheet$Ridit_scaled = 2 * worksheet$Ridit - 1
# create hash table mapping original data to corresponding RIDIT value
hashtable = new.env(hash=TRUE)
nrows = nrow(worksheet)
for (x in 1:nrows) {
dummy = toString(worksheet$Var1[x])
hashtable[[dummy]] = worksheet$Ridit_scaled[x]
}
# find RIDIT values for original data
for (j in 1:totalrows) {
dummy = toString(mydata[j,i])
riditsheet[j,i] = hashtable[[dummy]]
}
}
# get principal component wit prcomp
pca_out = prcomp(riditsheet, scale.=TRUE)
plot(pca_out, main="Scree Plot of PRIDITs")
loadings1<-as.matrix(pca_out$rotation)
write.csv(loadings1,file="c:/ClusterData/PcaLoad.csv")
biplot(pca_out)
pca_out2 = prcomp(riditsheet, cor.=TRUE)
Pcascores<-pca_out2$scores
Pcascores
scoresout<-data.frame(Pcascores)
scoresout[1:10,]
write.csv(scoresout,file="c:/ClusterData/Pcascores.csv")
# use princomp to get the PRIDIT
pca_out2 = princomp(riditsheet, cor=TRUE,scores=TRUE)
plot(pca_out2, main="Scree Plot of PRIDITs")
# get loadings of variables on principal components
loadings2<-as.matrix(pca_out2$loadings)
loadings2
summary(pca_out2)
PRIDIT<-pca_out2$scores[,1:1]
#Top2[,1:2]
Top2[1:10,]
Suspicion<-mydata1$Suspicion
TestPCA<-data.frame(Suspicion,Top2)
TestPCA[1:10,]
TreePridit<-rpart(Suspicion~PRIDIT,data=TestPCA)
rpart.plot(TreePridit)
# write the RIDIRS to file
write.csv(riditsheet,file="c:/ClusterData/PIPriditSim1.csv")
# Write the PRIDIT
write.csv(PRIDIT,file="c:/ClusterData/PIPRIDIT.csv")
write.csv(Totalscore,file="c:/ClusterData/PIPScoreSim1.csv")
#-----------------------------------------
|
c805586bd9fa5d90fa89357ebe7563ab1290d0f1 | e7c7e8b21ab45ccf91c01f8faa4d11641606ba12 | /R/20200511/other_functions/piumet_code.R | da1f9bca1720d38b3a528ad8c5ca9d006a28eda0 | [
"Apache-2.0"
] | permissive | mohanbabu29/precision_exposome | 09120274ebb7103ca3c73c002497406709a372e3 | 600c20db7eff1ddfc7b2656ddc538153b1044961 | refs/heads/main | 2023-03-18T02:42:36.202085 | 2021-03-11T17:24:59 | 2021-03-11T17:24:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,772 | r | piumet_code.R | ###----------------------------------------------------------------------------
readPIUMet <-
function(path = ".",
marker_table,
text = TRUE,
layout = "kk",
size_range = c(3, 8),
width_range = c(0.2, 0.5),
marker_name = NULL) {
annotation_result <-
read.table(
file.path(
path,
"peaks_putative_metabolites_w10.0_b2.0_mu0.0005_R1.txt"
),
sep = "\t",
header = TRUE
)
if (nrow(annotation_result) == 0) {
cat("No result.\n")
return(NULL)
}
annotation_result <-
annotation_result %>%
dplyr::mutate(mz =
stringr::str_replace(mz.peak, "m/z=", "") %>%
as.numeric() %>%
round(4)) %>%
dplyr::mutate(mz2 = as.character(mz))
if (!is.null(marker_name)) {
marker <-
variable_info %>%
dplyr::filter(name %in% marker_name) %>%
dplyr::mutate(polarity = case_when(
stringr::str_detect(name, "POS") ~ "positive",
stringr::str_detect(name, "NEG") ~ "negative"
)) %>%
dplyr::mutate(
mz2 = case_when(
polarity == "positive" ~ as.character(round(mz, 4) - 1),
polarity == "negative" ~ as.character(round(mz, 4) + 1)
)
)
} else{
marker_table <-
marker_table %>%
dplyr::mutate(
mz2 = case_when(
polarity == "positive" ~ as.character(round(mz, 4) - 1),
polarity == "negative" ~ as.character(round(mz, 4) + 1)
)
)
}
annotation_result <-
annotation_result %>%
dplyr::left_join(marker_table, by = "mz2") %>%
dplyr::select(-c(mz.y)) %>%
dplyr::rename(mz = mz.x)
edge_attr <-
read.table(
file.path(path, "result_edge_frequency_w10.0_b2.0_mu0.0005_R1.txt"),
sep = "\t",
header = FALSE
) %>%
dplyr::rename(edge = V1)
edge_data <-
read.table(
file.path(path, "result_union_net_w10.0_b2.0_mu0.0005_R1.txt"),
sep = "\t",
header = FALSE
) %>%
dplyr::rename(from = V1, to = V2) %>%
dplyr::mutate(edge = paste(from, "(pp)", to, sep = " ")) %>%
dplyr::left_join(edge_attr, by = "edge")
node_data <-
read.table(
file.path(path, "result_node_frequency_w10.0_b2.0_mu0.0005_R1.txt"),
sep = "\t",
header = FALSE
) %>%
dplyr::rename(node = V1,
node_class = V3,
HMDB_ID = V4) %>%
dplyr::left_join(annotation_result[, c("name", "Metabolite.Name", "super.class")],
by = c("node" = "Metabolite.Name"))
node <-
node_data$node %>%
stringr::str_replace("m/z=", "") %>%
as.numeric() %>%
round(4) %>%
as.character()
node <- marker_table$name[match(node, marker_table$mz2)]
rename <-
data.frame(name1 = node_data$node[!is.na(node)],
name2 = node[!is.na(node)])
node_data$node <-
sapply(node_data$node, function(x) {
temp_idx <- match(x, rename$name1)
if (is.na(temp_idx)) {
return(x)
} else{
rename$name2[temp_idx]
}
}) %>%
unname()
edge_data$from <-
sapply(edge_data$from, function(x) {
temp_idx <- match(x, rename$name1)
if (is.na(temp_idx)) {
return(x)
} else{
rename$name2[temp_idx]
}
}) %>%
unname()
edge_data$to <-
sapply(edge_data$to, function(x) {
temp_idx <- match(x, rename$name1)
if (is.na(temp_idx)) {
return(x)
} else{
rename$name2[temp_idx]
}
}) %>%
unname()
edge_data$edge <-
paste(edge_data$from, "(pp)", edge_data$to, sep = " ")
node_data <-
node_data %>%
dplyr::select(-name) %>%
dplyr::distinct()
node_data$node_class[grep("Metabolite", node_data$node_class)] <-
"Metabolite"
graph <-
tidygraph::tbl_graph(nodes = node_data,
edges = edge_data,
directed = FALSE) %>%
dplyr::mutate(Degree = tidygraph::centrality_degree(mode = 'all'))
fill <-
c(
"m/z Peak" = ggsci::pal_aaas()(10)[9],
"Metabolite" = ggsci::pal_aaas()(10)[1],
"Protein" = ggsci::pal_aaas(alpha = 0.1)(10)[2]
# "Protein" = "tomato"
)
col <-
c(
"m/z Peak" = ggsci::pal_aaas()(10)[9],
"Metabolite" = ggsci::pal_aaas()(10)[1],
"Protein" = ggsci::pal_aaas()(10)[2]
)
shape = c("m/z Peak" = 24,
"Metabolite" = 22,
# "Metabolite_others" = 22,
"Protein" = 21)
require(ggraph)
if (text) {
plot <-
ggraph(graph,
layout = layout) +
geom_edge_link(
aes(edge_width = V3),
alpha = 1,
color = "black",
show.legend = TRUE
) +
geom_node_point(
aes(
size = V2,
fill = node_class,
shape = node_class
),
alpha = 1,
show.legend = TRUE
) +
scale_shape_manual(values = shape) +
guides(color = guide_legend(override.aes = list(size = 3))) +
ggraph::geom_node_text(aes(label = node,
color = node_class),
repel = TRUE,
size = 3) +
ggraph::scale_edge_width(range = width_range) +
scale_size_continuous(range = size_range) +
scale_fill_manual(values = fill) +
scale_color_manual(values = col) +
ggraph::theme_graph() +
theme(
plot.background = element_rect(fill = "transparent", color = NA),
panel.background = element_rect(fill = "transparent", color = NA),
legend.position = "right",
legend.background = element_rect(fill = "transparent", color = NA)
)
} else{
plot <-
ggraph(graph,
layout = layout) +
geom_edge_link(
aes(edge_width = V3),
alpha = 1,
color = "black",
show.legend = TRUE
) +
geom_node_point(
aes(
size = V2,
fill = node_class,
shape = node_class
),
alpha = 1,
show.legend = TRUE
) +
scale_shape_manual(values = shape) +
guides(color = guide_legend(override.aes = list(size = 3))) +
ggraph::scale_edge_width(range = width_range) +
scale_size_continuous(range = size_range) +
scale_fill_manual(values = fill) +
scale_color_manual(values = col) +
ggraph::theme_graph() +
theme(
plot.background = element_rect(fill = "transparent", color = NA),
panel.background = element_rect(fill = "transparent", color = NA),
legend.position = "right",
legend.background = element_rect(fill = "transparent", color = NA)
)
}
output_path <- file.path(path, "Result")
dir.create(output_path)
save(edge_data, file = file.path(output_path, "edge_data"))
save(node_data, file = file.path(output_path, "node_data"))
save(graph, file = file.path(output_path, "graph"))
save(annotation_result, file = file.path(output_path, "annotation_result"))
ggsave(
plot,
filename = file.path(output_path, "graph_plog.pdf"),
width = 7,
height = 7
)
plot
} |
613b1e7b89962632e9f606d0709d01cfe1c2f74e | 280979ed9c1f0238a6b6081688bad512a9c0834c | /get_state_polling_data.R | f3233710eed1cc8207016f2d95ff4dfea18266c5 | [] | no_license | tylerreny/polling_data | 26dfdc3121c456a34bcb6b32bf9b9da79b7f0f7a | 15df3129d05cb26c36d12f908b75d03b4dbe3816 | refs/heads/master | 2021-01-24T07:32:18.379848 | 2017-06-05T00:50:41 | 2017-06-05T00:50:41 | 93,350,838 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 473 | r | get_state_polling_data.R | setwd('/Users/treny/Dropbox/_projects/googleTrends/polling')
files <- list.files()
variables <- c('Trump','Clinton','Other','Undecided',
'start_date','end_date','observations')
master <- data.frame(NULL)
for(i in 1:length(files)){
df <- read_delim(files[i],delim='\t')
df <- df[,variables]
state <- gsub('_poll_response.tsv','',files[i])
df$state <- state
master <- rbind(master,df)
}
write.csv(master,row.names=F,'all_polling_data_by_state.csv')
|
911a95a93ee647870a17c713e73acb9bcca26f3d | cdc506805e99d4281bfb2b5bf15fc34cdb358fce | /3_estructura de datos.R | bda0c0676f305fe639618eb4acee97d5b99e1bd0 | [] | no_license | anabella005/Holando | 881c9279df052668da4fa36ec84a83900ce23ed6 | 4181b715f08a0d64212024eb637060b772ac4ea4 | refs/heads/master | 2020-08-16T19:08:03.771023 | 2019-10-29T18:28:37 | 2019-10-29T18:28:37 | 215,540,308 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,419 | r | 3_estructura de datos.R | library(tidyverse)
machos <- filter(EBV, SEXOLETRA == "M")
str(machos)
summary(EBV$SEXOLETRA)
hembras <- filter(EBV, SEXOLETRA == "F")
summary(EBV$FNACNUMERO)
year1936<-EBV %>%
filter(FNACNUMERO %in% c(19360206:19370000))
decada1930<-EBV %>%
filter(FNACNUMERO %in% c(19360206:19400000))
decada1940<-EBV %>%
filter(FNACNUMERO %in% c(19400000:19500000))
#Resumen info por origen (media, desvio y n para leche) No puede calcular el desvio con un solo dato.
decada1930 %>%
group_by(ORI) %>%
summarise(LECHE_mean = mean(LECHE),
LECHE_sd = sd(LECHE),
LECHE_n = length(LECHE))
decada1930 %>%
group_by(ORI) %>%
summarise(FNAC_min = min (FNACNUMERO),
FNAC_max = max (FNACNUMERO),
FNAC_mean = mean(FNACNUMERO),
hijos = length(FNACNUMERO))
forcats::fct_explicit_na
fct_explicit_na(EBV$ISOM)
library(forcats)
padres<-EBV %>%
group_by(fct_explicit_na(ISOP)) %>%
summarise(hijos = length(FNACNUMERO))
madres<-EBV %>%
group_by(fct_explicit_na(ISOM)) %>%
summarise(hijos = length(FNACNUMERO))
#Para pasar el conjunto de datos de R a txt:
write.table(EBV, "C:/Users/Ana/Desktop/tesis FG/R/EBV.txt", sep = "\t", col.names = TRUE, row.names = FALSE, quote = FALSE,
na = "NA")
#padres mas influyentes:
#mayor numero de hijos?
padres<-EBV %>%
group_by(fct_explicit_na(ISOP)) %>%
summarise(hijos = length(FNACNUMERO))
madres<-EBV %>%
group_by(fct_explicit_na(ISOM)) %>%
summarise(hijos = length(FNACNUMERO))
#mayor valor de EBV?
EBV_padres<-EBV %>%
group_by(fct_explicit_na(ISOP)) %>%
summarise(hijos = length(FNACNUMERO),
ORI = mean(ORI),
min.leche = min(LECHE),
mean.leche = mean(LECHE),
max.leche = max(LECHE),
min.prot = min(PROT),
mean.prot = mean(PROT),
max.prot = max(PROT),
min.grasa = min(GRASA),
mean.grasa = mean(GRASA),
max.grasa = max(GRASA))
EBV_madres<-EBV %>%
group_by(fct_explicit_na(ISOM)) %>%
summarise(hijos = length(FNACNUMERO),
min.leche = min(LECHE),
mean.leche = mean(LECHE),
max.leche = max(LECHE),
min.prot = min(PROT),
mean.prot = mean(PROT),
max.prot = max(PROT),
min.grasa = min(GRASA),
mean.grasa = mean(GRASA),
max.grasa = max(GRASA))
|
9282ed1c380f1b9eab6a071f05ab454ed3d4669b | 0c32a1c89eab6e27a62c782500c926098709ab81 | /tests/testthat/test-alpha.R | 2a819fc554b5bd116d0a0a2906f7f1ea5a74cd2e | [] | no_license | vis-jusuE404/colourvalues | 3e4b457fc3d6c4fda953e2085566e5c85cbf97c8 | 00e18ca3f1e957ae702b7442916dea389c635a00 | refs/heads/master | 2022-05-20T15:57:42.893685 | 2019-04-25T23:18:36 | 2019-04-25T23:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,238 | r | test-alpha.R | context("alpha")
test_that("[0,1) alpha values treated as percentage", {
expect_true( colour_values(1, alpha = 0.5) == colour_values(1, alpha = 255 / 2) )
expect_true( colour_values(1, alpha = 1) == colour_values(1, alpha = 255 / 255 ) )
})
test_that("alpha values > 255 are scaled", {
expect_true( colour_values(1, alpha = 255 ) == colour_values(1, alpha = 255 * 2 + 1 ) )
expect_true( colour_values(1, alpha = 255 ) == colour_values(1, alpha = 255 * 3 + 2 ) )
expect_true( colour_values(1, alpha = 255 ) == colour_values(1, alpha = 255 * 10 + 9 ) )
})
test_that("alpha vector scaled to [0-255]", {
expect_true( all( colour_values( 1:10 , alpha = 0:9 ) == colour_values( 1:10, alpha = 256:265 ) ) )
})
test_that("alpha not updated by reference", {
v <- 1
o <- 1
colour_values(x = v, alpha = o)
expect_equal( o, 1 )
v <- c(1,1)
o <- c(1,1)
colour_values(x = v, alpha = o)
expect_equal( o, c(1,1) )
v <- 1:5
o <- 1:5
colour_values(x = v, alpha = o)
expect_equal( o, 1:5 )
v <- 1:5
o <- rep(5,5)
colour_values(x = v, alpha = o)
expect_equal( o, rep(5,5) )
df <- data.frame(id = 1:5, o = rep(5,5) )
cols <- colour_values(x = df$id, alpha = df$o )
expect_equal( df$o, rep(5,5) )
})
|
0e21bb0895e5c3a1366630519484d1edd01bf1b8 | ce9e36c47bd8342e17649b3cad0d8ba93ce28cdf | /ProjetStatistique_CodeR_Biernacki-Bruno_Camiat_Deze.r | 37e18f903e8018aabcd7baea0b748a5796318cfa | [] | no_license | florian-deze/Statistical-Food-Analysis | f5e7ee2a19f61f1fb5839d6a710ee549f034e4e5 | 76c4304f9ac12f25546411370ef0b8ad1a2dfdec | refs/heads/master | 2020-12-02T21:18:26.533188 | 2017-07-05T08:05:14 | 2017-07-05T08:05:14 | 96,293,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,303 | r | ProjetStatistique_CodeR_Biernacki-Bruno_Camiat_Deze.r | # PROJET STAT
###########################
# Importation des donnees #
###########################
setwd("C:/Users/User/Documents/Polytech/GIS Semestre 8/Projet Statistique")
food = read.csv("food.csv", header=TRUE, dec=".", sep=";", row.names=1)
# On verifie que l'importation s'est bien deroulee
head(food)
str(food)
attach(food)
foodQ = food
str(foodQ)
##########################################
# Statistiques univariees (descriptives) #
##########################################
summary(food)
# Ecarts types
apply(food, 2, sd)
# variable Energy
par(mfrow=c(1,2))
boxPlotEnergy = boxplot(Energy, main="Variable Energy"); boxPlotEnergy
#text(x=rep(1:5,each=5)+0.3,y=as.vector(boxPlotEnergy$stats),labels=as.vector(boxPlotEnergy$stats),col=c("green","blue","red","blue","green",cex=0.7))
plot(Energy,main="Variable Energy")
table(Energy)
par(mfrow=c(1,1))
# variable Protein
par(mfrow=c(1,2))
boxPlotProtein = boxplot(Protein, main="Variable Protein"); boxPlotProtein
table(Protein)
plot(Protein, main="Variable Protein")
# valeur "350" : surement une valeur aberrante
which(food$Protein == 350)
food$Protein[which(food$Protein == 350)] = NA
foodQ$Protein[which(foodQ$Protein == 350)] = NA
par(mfrow=c(1,1))
# variable Fat
par(mfrow=c(1,2))
boxPlotFat = boxplot(Fat, main="Variable Fat"); boxPlotFat
table(Fat)
plot(Fat, main="Variable Fat") # beaucoup de valeurs vers le 0
par(mfrow=c(1,1))
# variable Water
par(mfrow=c(1,2))
boxPlotWater = boxplot(Water, main="Variable Water"); boxPlotWater
#text(x=rep(1:5,each=5)+0.3,y=as.vector(boxPlotWater$stats),labels=as.vector(boxPlotWater$stats),col=c("green","blue","red","blue","green",cex=0.7))
table(Water)
plot(Water, main="Variable Water") # tres regulier
par(mfrow=c(1,1))
# variable VitaminA
par(mfrow=c(1,2))
boxPlotVitA = boxplot(VitaminA, main="Variable Vitamin A"); boxPlotVitA #on ne voit pas grand chose
plot(VitaminA, main="Variable Vitamin A") # valeurs concentrees vers 0, 2 valeurs extremes : a verifier si aberrantes ?
table(VitaminA) # on a 319 "0" : 68%
par(mfrow=c(1,1))
# variable VitaminB1
par(mfrow=c(1,2))
boxPlotVitB1 = boxplot(VitaminB1, main="Variable Vitamin B1"); boxPlotVitB1
plot(VitaminB1, main="Variable Vitamin B1")
table(VitaminB1)
quantile(food$VitaminB1)
par(mfrow=c(1,1))
# variable VitaminB2
par(mfrow=c(1,2))
boxPlotVitB2 = boxplot(VitaminB2, main="Variable Vitamin B2"); boxPlotVitB2
plot(VitaminB2, main="Variable Vitamin B2")
table(VitaminB2)
which(food$VitaminB2 == 20) #valeur aberrante
food$VitaminB2[which(food$VitaminB2 == 20)] = NA
foodQ$VitaminB2[which(foodQ$VitaminB2 == 20)] = NA
par(mfrow=c(1,1))
# variable VitaminC
par(mfrow=c(1,2))
boxPlotVitC = boxplot(VitaminC, main="Variable Vitamin C"); boxPlotVitC
plot(VitaminC, main="Variable Vitamin C")
table(VitaminC)
par(mfrow=c(1,1))
###############################################
# Tranformation des variables en qualitatives #
###############################################
# Il faut au moins 24 indivudus par classes (5% de 469)
## ---- Variable Energy ----
outE = food$Energy
table(outE)
quantile(outE)
foodQ$Energy = cut(foodQ$Energy, breaks = c(0,65, 200, 320, 900))
levels(foodQ$Energy) = c("pasEnergique", "pauveEnEnergie", "energique", "richeEnEnergie")
table(foodQ$Energy)
foodQ$Energy
## ---- Variable Protein ----
outP = food$Protein[-which(food$Protein == 0)]
table(na.omit(outP))
quantile(na.omit(outP))
foodQ$Protein = cut(foodQ$Protein, breaks = c(-1, 0, 2, 8, 19, 45))
levels(foodQ$Protein) = c("pasProteine", "peuProteine", "proteine", "bienProteine", "tresProteine")
table(foodQ$Protein)
foodQ$Protein
## ---- Variable Fat ----
outF = food$Fat[-which(food$Fat == 0)]
table(outF)
quantile(outF)
foodQ$Fat = cut(foodQ$Fat, breaks = c(-1, 0,2.5, 10, 22, 100))
levels(foodQ$Fat) = c("pasGras", "peuGras", "gras", "bienGras", "tresGras")
table(foodQ$Fat)
foodQ$Fat
## ---- Variable Water ----
outW = food$Water
table(outW)
quantile(outW)
foodQ$Water = cut(foodQ$Water, breaks = c(-1, 29.75, 63, 80, 100))
levels(foodQ$Water) = c("pasDEau", "pauveEnEau", "eau", "RicheEnEeau")
table(foodQ$Water)
foodQ$Water
## ---- Variable VitaminA ----
outA = food$VitaminA[-which(food$VitaminA == 0)]
table(outA)
quantile(outA)
foodQ$VitaminA = cut(foodQ$VitaminA, breaks = c(-1,0,40,80,300,12000))
levels(foodQ$VitaminA) = c("pasVitamine", "peuVitamine", "vitamine", "bienVitamine","tresVitamine")
table(foodQ$VitaminA)
foodQ$VitaminA
## ---- Variable VitaminB1 ----
outB1 = food$VitaminB1[-which(food$VitaminB1 == 0)]
table(outB1)
quantile(outB1)
foodQ$VitaminB1 = cut(foodQ$VitaminB1, breaks = c(-1, 0, 0.05, 0.10, 0.20, 1.50))
levels(foodQ$VitaminB1) = c("pasVitamine", "peuVitamine", "vitamine", "bienVitamine","tresVitamine")
table(foodQ$VitaminB1)
foodQ$VitaminB1
## ---- Variable VitaminB2 ----
outB2 = food$VitaminB2[-which(food$VitaminB2 == 0)]
table(na.omit(outB2))
quantile(na.omit(outB2))
foodQ$VitaminB2 = cut(foodQ$VitaminB2, breaks = c(-1, 0,0.06, 0.1, 0.20, 4.20))
levels(foodQ$VitaminB2) = c("pasVitamine", "peuVitamine", "vitamine", "bienVitamine","tresVitamine")
table(foodQ$VitaminB2)
foodQ$VitaminB2
## ---- Variable VitaminC ----
outC = food$VitaminC[-which(food$VitaminC == 0)]
table(outC)
quantile(outC)
foodQ$VitaminC = cut(foodQ$VitaminC, breaks = c(-1, 0, 15, 25, 200))
levels(foodQ$VitaminC) = c("pasVitamine", "peuVitamine", "vitamine", "tresVitamine")
table(foodQ$VitaminC)
foodQ$VitaminC
## Modification des noms des colonnes
names(foodQ) = c("EnergyQ", "ProteinQ", "FatQ", "WaterQ", "VitaminAQ", "VitaminB1Q", "VitaminB2Q", "VitaminCQ")
##########################
# Statistiques bivariees #
##########################
# Matrice de correlations
corfood = cor(food, use="pairwise.complete.obs")
corfood
# Boxplots Energy, Protein, Water et Fat en fonction des vitamines
par(mfrow=c(2,2))
boxplot(food$Energy~foodQ$VitaminA,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Energy en fonction de la VitaminA")
boxplot(food$Energy~foodQ$VitaminB1,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Energy en fonction de la VitaminB1")
boxplot(food$Energy~foodQ$VitaminB2,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Energy en fonction de la VitaminB2")
boxplot(food$Energy~foodQ$VitaminC,col=c("red", "orange" , "yellow", "green"), main="Energy en fonction de la VitaminC")
par(mfrow=c(2,2))
boxplot(food$Protein~foodQ$VitaminA,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Protein en fonction de la VitaminA")
boxplot(food$Protein~foodQ$VitaminB1,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Protein en fonction de la VitaminB1")
boxplot(food$Protein~foodQ$VitaminB2,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Protein en fonction de la VitaminB2")
boxplot(food$Protein~foodQ$VitaminC,col=c("red", "orange" , "yellow", "green"), main="Protein en fonction de la VitaminC")
par(mfrow=c(2,2))
boxplot(food$Water~foodQ$VitaminA,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Water en fonction de la VitaminA")
boxplot(food$Water~foodQ$VitaminB1,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Water en fonction de la VitaminB1")
boxplot(food$Water~foodQ$VitaminB2,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Water en fonction de la VitaminB2")
boxplot(food$Water~foodQ$VitaminC,col=c("red", "orange" , "yellow", "green"), main="Water en fonction de la VitaminC")
par(mfrow=c(2,2))
boxplot(food$Fat~foodQ$VitaminA,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Fat en fonction de la VitaminA")
boxplot(food$Fat~foodQ$VitaminB1,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Fat en fonction de la VitaminB1")
boxplot(food$Fat~foodQ$VitaminB2,col=c("red", "orange" , "yellow", "green", "darkgreen"), main="Fat en fonction de la VitaminB2")
boxplot(food$Fat~foodQ$VitaminC,col=c("red", "orange" , "yellow", "green"), main="Fat en fonction de la VitaminC")
par(mfrow=c(1,1))
##############
# ACP et CAH #
##############
library(FactoMineR)
# On fait l'ACP en remplacant les 3 valeurs tres hautes de vitaminA(12000,12000,8000) par 1500 pour eviter de fausser le classement
food.corrige = food
food.corrige$VitaminA[food.corrige$VitaminA==12000]=1500
food.corrige$VitaminA[food.corrige$VitaminA==8000]=1500
res.pca = PCA(na.omit(food.corrige), ncp=3, scale.unit=TRUE, graph=TRUE)
res.pca$eig # 3 composantes principales (inertie moy > 1)
res.pca.hcpc = HCPC(res.pca,nb.clust=3) #on veut 3 classes
res.pca.hcpc$desc.var #description par variables
res.pca.hcpc$desc.axes #description par les axes
res.pca.hcpc$desc.ind #description par les individus
# Effectif des individus classe
table(res.pca.hcpc$data.clust$clust)
# Liste des individus dans chaque classe
row.names(res.pca.hcpc$data.clust[which(res.pca.hcpc$data.clust$clust==2),])
row.names(res.pca.hcpc$data.clust[which(res.pca.hcpc$data.clust$clust==3),])
row.names(res.pca.hcpc$data.clust[which(res.pca.hcpc$data.clust$clust==1),])
##############
# ACM et CAH #
##############
# ACM
res.mca = MCA(na.omit(foodQ), ncp=16, graph=FALSE)
res.mca$eig
# CAH
res.mca.hcpc = HCPC(res.mca,nb.clust=3) #on veut 3 classes
res.mca.hcpc$desc.var #description par les variables
res.mca.hcpc$desc.axes #description par les axes
res.mca.hcpc$desc.ind #description par les individus
# Effectif des individus par classe
table(res.mca.hcpc$data.clust$clust)
# Liste des individus dans chaque classe
row.names(res.mca.hcpc$data.clust[which(res.mca.hcpc$data.clust$clust==1),])
row.names(res.mca.hcpc$data.clust[which(res.mca.hcpc$data.clust$clust==2),])
row.names(res.mca.hcpc$data.clust[which(res.mca.hcpc$data.clust$clust==3),])
##################
# Indice de Rand #
##################
# Renommer les classes pour que la classe i faite avec ACP corresponde a la classe i faite avec ACM
# On utilise les numeros de classe 4, 5 et 6 pour eviter les confusions en faisant le lien entre les classes
classes.acp = res.pca.hcpc$data.clust$clust
classes.acm = res.mca.hcpc$data.clust$clust
classes.acp.renomme = as.vector(classes.acp)
classes.acp.renomme[classes.acp==1]=6
classes.acp.renomme[classes.acp==2]=4
classes.acp.renomme[classes.acp==3]=5
classes.acp.renomme
classes.acm.renomme = as.vector(classes.acm)
classes.acm.renomme[classes.acm==1]=4
classes.acm.renomme[classes.acm==2]=5
classes.acm.renomme[classes.acm==3]=6
classes.acm.renomme
# Calcul de l'indice de rand = mesure de classement identique entre les deux classifications
library(flexclust)
indice.rand = randIndex(table(classes.acp.renomme,classes.acm.renomme), correct=FALSE)
indice.rand #l'indice est eleve, donc les 2 classifications donnent des resultats similaires
##########################
# Analyse discriminante #
##########################
library(MASS)
res.lda = lda(na.omit(res.pca.hcpc$data.clust[,-9]),grouping = res.pca.hcpc$data.clust$clust)
# Exemple d'utilisation pour predire la classe
predict(res.lda, newdata=c(350,5,15,30,10,0.1,0.1,5))
|
0555a4325e7072500bcd26b48f5940b57d0925e5 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /valgrind_test_dir/threshold_multilevel-test.R | 1c205b333a62240769a0c5b4a212bbde7d2b8ab6 | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 253 | r | threshold_multilevel-test.R | function (im, thresvals)
{
e <- get("data.env", .GlobalEnv)
e[["threshold_multilevel"]][[length(e[["threshold_multilevel"]]) +
1]] <- list(im = im, thresvals = thresvals)
.Call("_imagerExtra_threshold_multilevel", im, thresvals)
}
|
2c6df0b17ef338d7a068bba08d1a768352fbf489 | 90958e0e0b598f7f789dae0318f2d7f4eb58f374 | /ui.R | b80946a21c4bc47e2c988f9f17cb029a5429b429 | [] | no_license | elan-kumaran/Word-Predictor | 3c3bef7e8aa770bf6f74c75de15ee4f91d3225f7 | 30c38c97be77cfd0ff8358bc70773530552c9142 | refs/heads/master | 2021-04-26T23:08:23.823215 | 2018-03-05T16:46:39 | 2018-03-05T17:19:41 | 123,936,084 | 0 | 0 | null | 2018-03-05T15:10:01 | 2018-03-05T15:03:34 | null | UTF-8 | R | false | false | 2,534 | r | ui.R | shinyUI(pageWithSidebar(
headerPanel("SwiftKey Predict Next word"),
sidebarPanel(
textInput("textInp", "Please Type Text here","sample text"),
h6('The next word will be predicted based on the text you type here'),
# h6('Please type and see.'),
# textInput("textInp1", "Please Type Text here","of"),
strong(h3('Instructions')),
h5('1. Please type a text .'),
h5('2. As and when you type a text, the next word will be predicted'),
h5('3. The top prediction is shown and other possible words are also displayed .')
),
# sidebarPanel(
# textInput("textInp1", "Please Type Text here","of"),
# h6('The next word will be predicted based on the text you type here'),
# h6('Please type and see.')
#
#
# ),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Predictions",
strong(h4('The top predicted word is:')),
em(h3(textOutput("nextWord1"))),
br(),
h4('Other possible predictions are:'),
em(h3(textOutput("nextWord2"))),
# h4('The predicted word 3 is'),
em(h3(textOutput("nextWord3"))),
em(h3(textOutput("nextWord4"))),
# h4('The predicted word 3 is'),
em(h3(textOutput("nextWord5")))),
#
#
tabPanel("Model Algorithm",
mainPanel(
includeMarkdown("About.md"))),
tabPanel("Reports",
# HTML("<hr>"), # Add a line
h4("The milestone report is available at",
a("Rpubs Report",
href="http://www.rpubs.com/elankumaran/SwiftKey"))
)
)
)
))
|
bbbfda5da841efbf97f90b322427c6fe7b8c9487 | f69fcac40ce65dc4c0e3e0202d0c0dfab63c3c6a | /07-ngrams.R | cc35782d76a06669c5054d75577a658b2508e6d9 | [
"MIT"
] | permissive | d4tagirl/uruguayan_parliamentary_session_diary | 863545ea161277db7036fc8a3e86d5e21ef3b8a5 | 83b6c04af8603fcdcebd2fd0680be0d0b1047eab | refs/heads/master | 2021-01-25T11:49:06.115262 | 2019-09-19T19:13:33 | 2019-09-19T19:13:33 | 93,946,454 | 16 | 3 | null | 2018-04-19T18:15:58 | 2017-06-10T15:00:59 | R | UTF-8 | R | false | false | 2,647 | r | 07-ngrams.R | library(readr)
library(ggplot2)
library(purrr)
library(dplyr)
library(tidyr)
library(tidytext)
library(ggplot2)
library(viridis)
library(lubridate)
library(rcorpora)
diputados <- readRDS("data/pdf_diputados")
senadores <- readRDS("data/pdf_senadores")
# me deshago de los saltos de línea que sólo los voy a usar más adelante
diputados <- diputados %>%
mutate(pdf = stri_replace_all(pdf, replacement = "", regex = "\\\n"))
senadores <- senadores %>%
mutate(pdf = stri_replace_all(pdf, replacement = "", regex = "\\\n"))
# ------------ ngrams ------------
# A pesar de tener el problema mencionado en el artículo:
#La función pdftools::read_pdf() lee los renglones de izquierda a derecha. En los Diarios de Sesiones hay algunas páginas que se organizan con texto en dos columnas, entonces hay renglones que, leídos de esa forma, quedan incoherentes. Esto hay que tenerlo en cuenta para ver si el tipo de análisis que quiero hacer tiene sentido o no. Por ejemplo, si lo que quiero es analizar n-gramas donde el orden de las palabras es importante, voy a tener problemas porque estaría considerando palabras de distintas columnas de texto, como si vinieran una a continuación de la otra.
# Voy a intentarlo de todas formas, asumiendo que si hay palabras que vienen una a continuación de la otra pero no están relacionadas porque pertenecen a diferentes columnas, la frecuencia va a ser muy baja y no me va a afectar demasiado cuando mire el top_n de los ngrams más usados.
diputados %>%
tidytext::unnest_tokens(ngram, pdf, token = "ngrams", n = 3) %>%
filter(str_detect(ngram, "sendic")) %>%
count(ngram, sort = TRUE) %>%
top_n(10) %>%
mutate(ngram = reorder(ngram, n)) %>%
ggplot(aes(ngram, n)) +
geom_col(fill = "red", show.legend = FALSE) +
xlab(NULL) +
ylab(NULL) +
coord_flip() +
theme_minimal()
# miro trigramas que tienen la palabra ANCAP
diputados %>%
tidytext::unnest_tokens(ngram, pdf, token = "ngrams", n = 3) %>%
filter(str_detect(ngram, "ancap")) %>%
count(ngram, sort = TRUE) %>%
top_n(10) %>%
mutate(ngram = reorder(ngram, n)) %>%
ggplot(aes(ngram, n)) +
geom_col(fill = "red", show.legend = FALSE) +
xlab(NULL) +
ylab(NULL) +
coord_flip() +
theme_minimal()
# miro trigramas que tienen la palabra renuncia
diputados %>%
tidytext::unnest_tokens(ngram, pdf, token = "ngrams", n = 3) %>%
filter(str_detect(ngram, "renuncia")) %>%
count(ngram, sort = TRUE) %>%
top_n(10) %>%
mutate(ngram = reorder(ngram, n)) %>%
ggplot(aes(ngram, n)) +
geom_col(fill = "red", show.legend = FALSE) +
xlab(NULL) +
ylab(NULL) +
coord_flip() +
theme_minimal()
|
a5ae43b7fc277ffec8ecdfe669a5f0aa496c9fbb | d603d1e09427ca593fb0ceb74c15a21b1b307330 | /tests/testthat/test-cleaning.R | 5d31c854eac125a6f17e173a6eae0bf2b9bb863a | [] | no_license | ixpantia/lacrmr | 16fe93d5160906e6a24b8365409e07032a13e5dc | 17940efabe8c69d134f10177f9d7e0b291bb06b3 | refs/heads/master | 2022-07-14T07:34:42.349779 | 2022-07-11T20:16:46 | 2022-07-11T20:16:46 | 169,632,410 | 1 | 3 | null | 2022-07-11T20:16:47 | 2019-02-07T19:49:41 | R | UTF-8 | R | false | false | 1,226 | r | test-cleaning.R | context("Cleaning")
testthat::test_that("List are removed from the pipeline report", {
mockery::stub(where = get_pipeline_report,
what = "jsonlite::validate",
how = TRUE)
mockery::stub(where = get_pipeline_report,
what = "jsonlite::fromJSON",
how = pipeline_data)
pipeline_test <- get_pipeline_report(user_code = "user_code_test",
api_token = "token_api_test",
pipelineid = "pipeline_test")
expect_equal(ncol(pipeline_test), 33)
expect_equal(nrow(pipeline_test), 7)
})
testthat::test_that("Lists are removed from the contacts information", {
mockery::stub(where = get_contact_information,
what = "jsonlite::validate",
how = TRUE)
mockery::stub(where = get_contact_information,
what = "jsonlite::fromJSON",
how = contact_information)
get_contact_information_test <- get_contact_information(
user_code = "user_code_test",
api_token = "token_api_test",
contact_id = "123")
expect_equal(ncol(get_contact_information_test), 20)
expect_equal(nrow(get_contact_information_test), 1)
})
|
dd8a99c97aa44b82b1e7f7e164105ea93e83cd7e | 2c80e92261d10e6573dcf216fbd69c6a3db10bb6 | /CSTC/conn_cluster.R | 1fa176eba983de8d570da71672205e6155a67136 | [] | no_license | Tonyfer/Working_scripts | 95ef63fd8e06e192052a34a2f5669929824e7982 | 89f29ae0ade52fc9ea0f67d45e7ae4098ee6d9f3 | refs/heads/master | 2020-04-28T23:14:19.297778 | 2019-09-27T14:35:02 | 2019-09-27T14:35:02 | 175,647,685 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,672 | r | conn_cluster.R | conn_l = list()
si_l = list()
ru_l = list()
cbic_l = list()
for (i in c(1:7)){
conn = read.csv(paste0('~/Desktop/CSTC/122_conn',i,'_after.csv'), as.is = T)[,-1]
si_l[[i]] = conn[hcp_si$group == 0,]
ru_l[[i]] = conn[hcp_si$group == 1,]
cbic_l[[i]] = conn[hcp_si$group == 2,]
}
age_vec = seq(5,21,0.1)
new_age = data.frame(age_vec)
colnames(new_age) = 'age'
##########################################################################
#####################PREDICTION###########################################
##########################################################################
r_square_si_l = list()
r_square_ru_l = list()
r_square_cbic_l = list()
d_si_l = list()
d_ru_l = list()
d_cbic_l = list()
for (i in c(1:7)){
d_si = double()
r_square_si = c()
ssp_si=c()
for (c in colnames(si_l[[i]])[1:(dim(si_l[[i]])[2]-6)]){
gami = mgcv::gam(si_l[[i]][,c]~s(age,sp=-1), data = si_l[[i]])
predict = predict(gami, newdata = new_age)
d_si = rbind(d_si,predict)
r_square_si = c(r_square_si, summary(gami)$r.sq)
ssp_si = c(ssp_si,gami['sp'][[1]])
}
rownames(d_si) = colnames(si_l[[i]])[1:(dim(si_l[[i]])[2]-6)]
names(r_square_si) = colnames(si_l[[i]])[1:(dim(si_l[[i]])[2]-6)]
names(ssp_si) = colnames(si_l[[i]])[1:(dim(si_l[[i]])[2]-6)]
d_si_l[[i]] = d_si
r_square_si_l[[i]] = r_square_si
#ooo = rownames(d_si)[order(r_square_si, decreasing = T)]
#pdf(paste0('~/Desktop/Jae_fucking_loser/after_444_conn',i,'_si_auto.pdf'))
#for (o in ooo){
# df_pre = data.frame(age = age_vec, value = d_si[o,])
# df_data = data.frame(age = si_l[[i]][,'age'], value = si_l[[i]][,o])
# p2 = ggplot() +
# geom_line(df_pre, mapping = aes(x=age, y=value), col='Red') +
# #geom_point(df, mapping = aes(x=age, y=v)) +
# geom_point(df_data, mapping = aes(x = age, y =value), alpha = 0.5)+
# ggtitle(paste('SI ',o,' r_square ', round(r_square_si[o],5), ' sp = ', round(ssp_si[o],3),sep=''))+
# theme_bw() +
# theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
# print(p2)
# }
# dev.off()
d_ru = double()
r_square_ru = c()
ssp_ru=c()
for (c in colnames(ru_l[[i]])[1:(dim(ru_l[[i]])[2]-6)]){
gami = mgcv::gam(ru_l[[i]][,c]~s(age,sp=-1), data = ru_l[[i]])
predict = predict(gami, newdata = new_age)
d_ru = rbind(d_ru,predict)
r_square_ru = c(r_square_ru, summary(gami)$r.sq)
ssp_ru = c(ssp_ru,gami['sp'][[1]])
}
rownames(d_ru) = colnames(ru_l[[i]])[1:(dim(ru_l[[i]])[2]-6)]
names(r_square_ru) = colnames(ru_l[[i]])[1:(dim(ru_l[[i]])[2]-6)]
names(ssp_ru) = colnames(ru_l[[i]])[1:(dim(ru_l[[i]])[2]-6)]
d_ru_l[[i]] = d_ru
r_square_ru_l[[i]] = r_square_ru
#ooo = rownames(d_ru)[order(r_square_ru, decreasing = T)]
#pdf(paste0('~/Desktop/Jae_fucking_loser/after_444_conn',i,'_ru_auto.pdf'))
# for (o in ooo){
# df_pre = data.frame(age = age_vec, value = d_ru[o,])
# df_data = data.frame(age = ru_l[[i]][,'age'], value = ru_l[[i]][,o])
# p2 = ggplot() +
# geom_line(df_pre, mapping = aes(x=age, y=value), col='Red') +
# #geom_point(df, mapping = aes(x=age, y=v)) +
# geom_point(df_data, mapping = aes(x = age, y =value), alpha = 0.5)+
# ggtitle(paste('RU ',o,' r_square ', round(r_square_ru[o],5), ' sp = ', round(ssp_ru[o],3),sep=''))+
# theme_bw() +
# theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
# print(p2)
# }
# dev.off()
d_cbic = double()
r_square_cbic = c()
ssp_cbic=c()
for (c in colnames(cbic_l[[i]])[1:(dim(cbic_l[[i]])[2]-6)]){
gami = mgcv::gam(cbic_l[[i]][,c]~s(age,sp=-1), data = cbic_l[[i]])
predict = predict(gami, newdata = new_age)
d_cbic = rbind(d_cbic,predict)
r_square_cbic = c(r_square_cbic, summary(gami)$r.sq)
ssp_cbic = c(ssp_cbic,gami['sp'][[1]])
}
rownames(d_cbic) = colnames(cbic_l[[i]])[1:(dim(cbic_l[[i]])[2]-6)]
names(r_square_cbic) = colnames(cbic_l[[i]])[1:(dim(cbic_l[[i]])[2]-6)]
names(ssp_cbic) = colnames(cbic_l[[i]])[1:(dim(cbic_l[[i]])[2]-6)]
d_cbic_l[[i]] = d_cbic
r_square_cbic_l[[i]] = r_square_cbic
# ooo = rownames(d_cbic)[order(r_square_cbic, decreasing = T)]
# pdf(paste0('~/Desktop/Jae_fucking_loser/after_444_conn',i,'_cbic_auto.pdf'))
#
#
# for (o in ooo){
# df_pre = data.frame(age = age_vec, value = d_cbic[o,])
# df_data = data.frame(age = cbic_l[[i]][,'age'], value = cbic_l[[i]][,o])
# p2 = ggplot() +
# geom_line(df_pre, mapping = aes(x=age, y=value), col='Red') +
# #geom_point(df, mapping = aes(x=age, y=v)) +
# geom_point(df_data, mapping = aes(x = age, y =value), alpha = 0.5)+
# ggtitle(paste('CBIC ',o,' r_square ', round(r_square_cbic[o],5), ' sp = ', round(ssp_cbic[o],3),sep=''))+
# theme_bw() +
# theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black"))
# print(p2)
# }
# dev.off()
#
}
library(dendextend)
library(colorspace)
library(ggplot2)
hclust_c_si_l = list()
hclust_c_ru_l = list()
hclust_c_cbic_l = list()
for (i in c(1:7)){
dist_mat_si <- cor(t(d_si_l[[i]]))
dist_mat_si = 1 - dist_mat_si
dist_c_si = as.dist(dist_mat_si)
#dist_t_si = as.dist(dist_mat_si[383:444, 383:444])
hclust_c_si_l[[i]] <- hclust(dist_c_si, method = 'average')
#hclust_t_si <- hclust(dist_t_si, method = 'average')
dist_mat_ru <- cor(t(d_ru_l[[i]]))
dist_mat_ru = 1 - dist_mat_ru
dist_c_ru = as.dist(dist_mat_ru)
#dist_t_ru = as.dist(dist_mat_ru[383:444, 383:444])
hclust_c_ru_l[[i]] <- hclust(dist_c_ru, method = 'average')
#hclust_t_ru <- hclust(dist_t_ru, method = 'average')
dist_mat_cbic <- cor(t(d_cbic_l[[i]]))
dist_mat_cbic = 1 - dist_mat_cbic
dist_c_cbic = as.dist(dist_mat_cbic)
#dist_t_cbic = as.dist(dist_mat_cbic[383:444, 383:444])
hclust_c_cbic_l[[i]] <- hclust(dist_c_cbic, method = 'average')
#hclust_t_cbic <- hclust(dist_t_cbic, method = 'average')
}
find_match = function(matric1, matric2){
rownames(matric1) = 1:nrow(matric1)
rownames(matric2) = 1:nrow(matric2)
cr = as.data.frame(cor(t(matric1), t(matric2),use='complete.obs'))
cr = cbind(cr, rep(-1, nrow(cr)))
or = c()
for(i in 1:nrow(cr)){
v = cr[i, !colnames(cr) %in% or]
m = which.max(v)
m = colnames(v)[m]
or = c(or,m)
# fff = as.data.frame(fff)
}
return(or)
}
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
for (net in c(1:7)){
dir.create(paste0('~/clusters_analysis/cluster_ana_122_conn', net, '_2'))
setwd(paste0('~/clusters_analysis/cluster_ana_122_conn', net, '_2'))
pdf('cluster_hcp.pdf')
for (c_number in c(2:5)){
cut_t_si <- cutree(hclust_c_si_l[[1]], k=c_number)
mean_t_si = double()
for(i in c(1:c_number)){
if (sum(cut_t_si==i) ==1) {
after_scale = t(scale(d_si_l[[1]][cut_t_si==i, ]))
} else{
after_scale = t(scale(t(d_si_l[[1]][cut_t_si==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
mean_t_si = rbind(mean_t_si, mean)
}
clu_t_si = unique(cut_t_si[labels(hclust_c_si_l[[1]])])
cut_c_si <- cutree(hclust_c_si_l[[net]], k=c_number)
mean_c_si = double()
for(i in c(1:c_number)){
if (sum(cut_c_si==i)==1) {
after_scale = t(scale(d_si_l[[net]][cut_c_si==i, ]))
} else{
after_scale = t(scale(t(d_si_l[[net]][cut_c_si==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
mean_c_si = rbind(mean_c_si, mean)
}
ind_c_si = as.integer(find_match(mean_c_si, mean_t_si))
clu_c_si = unique(cut_c_si[labels(hclust_c_si_l[[net]])])
cut_c_ru <- cutree(hclust_c_ru_l[[net]], k=c_number)
mean_c_ru = double()
for(i in c(1:c_number)){
if (sum(cut_c_ru==i)==1) {
after_scale = t(scale(d_ru_l[[net]][cut_c_ru==i, ]))
} else{
after_scale = t(scale(t(d_ru_l[[net]][cut_c_ru==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
mean_c_ru = rbind(mean_c_ru, mean)
}
ind_c_ru = as.integer(find_match(mean_c_ru, mean_t_si))
clu_c_ru = unique(cut_c_ru[labels(hclust_c_ru_l[[net]])])
cut_c_cbic <- cutree(hclust_c_cbic_l[[net]], k=c_number)
mean_c_cbic = double()
for(i in c(1:c_number)){
if (sum(cut_c_cbic==i)==1) {
after_scale = t(scale(d_cbic_l[[net]][cut_c_cbic==i, ]))
} else{
after_scale = t(scale(t(d_cbic_l[[net]][cut_c_cbic==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
mean_c_cbic = rbind(mean_c_cbic, mean)
}
ind_c_cbic = as.integer(find_match(mean_c_cbic, mean_t_si))
clu_c_cbic = unique(cut_c_cbic[labels(hclust_c_cbic_l[[net]])])
col_all = rainbow_hcl(10,c=90)
col_si = col_all[clu_t_si]
col_si = col_all[ind_c_si[clu_c_si]]
col_ru = col_all[ind_c_ru[clu_c_ru]]
col_cbic = col_all[ind_c_cbic[clu_c_cbic]]
dir.create(paste(c_number,'_cluster',sep=''))
par(mfrow=c(3,1))
avg_dend_obj <- as.dendrogram(hclust_c_si_l[[net]])
avg_col_dend <- color_branches(avg_dend_obj, k=c_number, col = col_si)
plot(avg_col_dend, main = 'dendrogram for SI')
avg_dend_obj <- as.dendrogram(hclust_c_ru_l[[net]])
avg_col_dend <- color_branches(avg_dend_obj, k=c_number, col = col_ru)
plot(avg_col_dend, main = 'dendrogram for RU')
avg_dend_obj <- as.dendrogram(hclust_c_cbic_l[[net]])
avg_col_dend <- color_branches(avg_dend_obj, k=c_number, col = col_cbic)
plot(avg_col_dend, main = 'dendrogram for CBIC')
dir.create(paste(c_number,'_cluster/','SI',sep = ''))
dir.create(paste(c_number,'_cluster/','RU',sep = ''))
dir.create(paste(c_number,'_cluster/','CBIC',sep = ''))
plot_list=list()
cut_c_si_v = rep(0, length(cut_c_si))
for(i in 1:c_number){
cut_c_si_v[cut_c_si==i] = ind_c_si[i]
}
for(i in unique(cut_c_si_v)){
dir.create(paste(c_number,'_cluster/','SI/cluster_',i,sep = ''))
if (sum(cut_c_si_v==i)==1) {
after_scale = t(scale(d_si_l[[net]][cut_c_si_v==i, ]))
} else{
after_scale = t(scale(t(d_si_l[[net]][cut_c_si_v==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
sd = apply(after_scale, 2, sd, na.rm = T)
df = data.frame(age = age_vec, mean = mean, sd = sd)
p = ggplot(df, aes(x=age, y=mean)) +
geom_line(col=col_all[i]) +
geom_point(col=col_all[i])+
geom_errorbar(aes(ymin=mean-sd, ymax=mean+sd), width=0.1,alpha = 0.1, col=col_all[i])+
ylab('value')+labs(title = paste('cluster',i,'\n N =', sum(cut_c_si_v==i)))+
theme(axis.text=element_text(size=6), axis.title=element_text(size=6))
plot_list[[3*i-2]]=p
write(rownames(d_si_l[[net]])[cut_c_si_v==i],paste(c_number,'_cluster/','SI/cluster_',i,'/variable.txt',sep = ''))
}
cut_c_ru_v = rep(0, length(cut_c_ru))
for(i in 1:c_number){
cut_c_ru_v[cut_c_ru==i] = ind_c_ru[i]
}
#col = rainbow_hcl(length(unique(cut_avg)),c=90)
for(i in unique(cut_c_ru_v)){
dir.create(paste(c_number,'_cluster/','RU/cluster_',i,sep = ''))
if (sum(cut_c_ru_v==i)==1) {
after_scale = t(scale(d_ru_l[[net]][cut_c_ru_v==i, ]))
} else{
after_scale = t(scale(t(d_ru_l[[net]][cut_c_ru_v==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
sd = apply(after_scale, 2, sd, na.rm = T)
df = data.frame(age = age_vec, mean = mean, sd = sd)
p = ggplot(df, aes(x=age, y=mean)) +
geom_line(col=col_all[i]) +
geom_point(col=col_all[i])+
geom_errorbar(aes(ymin=mean-sd, ymax=mean+sd), width=0.1,alpha = 0.1, col=col_all[i])+
ylab('value')+labs(title = paste('cluster',i,'\n N =', sum(cut_c_ru_v==i)))+
theme(axis.text=element_text(size=6), axis.title=element_text(size=6))
plot_list[[3*i-1]]=p
write(rownames(d_ru_l[[net]])[cut_c_ru_v==i],paste(c_number,'_cluster/','RU/cluster_',i,'/variable.txt',sep = ''))
}
cut_c_cbic_v = rep(0, length(cut_c_cbic))
for(i in 1:c_number){
cut_c_cbic_v[cut_c_cbic==i] = ind_c_cbic[i]
}
#col = rainbow_hcl(length(unique(cut_avg)),c=90)
for(i in unique(cut_c_cbic_v)){
dir.create(paste(c_number,'_cluster/','CBIC/cluster_',i,sep = ''))
if (sum(cut_c_cbic_v==i)==1) {
after_scale = t(scale(d_cbic_l[[net]][cut_c_cbic_v==i, ]))
} else{
after_scale = t(scale(t(d_cbic_l[[net]][cut_c_cbic_v==i, ])))
}
mean = apply(after_scale, 2, mean, na.rm = T)
sd = apply(after_scale, 2, sd, na.rm = T)
df = data.frame(age = age_vec, mean = mean, sd = sd)
p = ggplot(df, aes(x=age, y=mean)) +
geom_line(col=col_all[i]) +
geom_point(col=col_all[i])+
geom_errorbar(aes(ymin=mean-sd, ymax=mean+sd), width=0.1,alpha = 0.1, col=col_all[i])+
ylab('value')+labs(title = paste('cluster',i,'\n N =', sum(cut_c_cbic_v==i)))+
theme(axis.text=element_text(size=6), axis.title=element_text(size=6))
plot_list[[3*i]]=p
write(rownames(d_cbic_l[[net]])[cut_c_cbic_v==i],paste(c_number,'_cluster/','CBIC/cluster_',i,'/variable.txt',sep = ''))
}
multiplot(plotlist = plot_list, cols = c_number)
}
dev.off()
}
|
212b2674191443f90e8421c7ba60d564e2f2e1b4 | d746fef241f9a0e06ae48cc3b1fe72693c43d808 | /tesseract/rotate/d7xk57-012.r | 94ec25e80569d0d6563ae8fb9219f823097f6dc4 | [
"MIT"
] | permissive | ucd-library/wine-price-extraction | 5abed5054a6e7704dcb401d728c1be2f53e05d78 | c346e48b5cda8377335b66e4a1f57c013aa06f1f | refs/heads/master | 2021-07-06T18:24:48.311848 | 2020-10-07T01:58:32 | 2020-10-07T01:58:32 | 144,317,559 | 5 | 0 | null | 2019-10-11T18:34:32 | 2018-08-10T18:00:02 | JavaScript | UTF-8 | R | false | false | 199 | r | d7xk57-012.r | r=358.59
https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7xk57/media/images/d7xk57-012/svc:tesseract/full/full/358.59/default.jpg Accept:application/hocr+xml
|
e6c4eca6f2dec254644cba7754a9c1d5f7ab47f1 | d4ca19e932de004240920f1a6728eea3244d4295 | /Import.r | e0c20e201409c6ca8bf8aa3a2517f2ee998c1818 | [] | no_license | ejjunju/wbm | 5b23c6a0239cca75c08faa4c369da17c5b8cc2c3 | c08f09d6bdffa8e9a1375bc0d788fddbf563d60f | refs/heads/master | 2016-09-06T05:06:29.814320 | 2013-10-01T09:14:05 | 2013-10-01T09:14:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 561 | r | Import.r | %load('testData.mat') Load data if it exists
cd 'C:\DATA\STATIONS\wbm_Rusumo\Calib\matlab'
%DATA IMPORT
%Control/initialising
[nm,nc,petype,wbmtype] = importInit('init.txt');%Time & Qobs
[Time,Qobs] = importTimeQobs('Qobs.txt');
[prec,tmpr,epot,d,h] = importMet('met.txt');%met data (nrows=nm*nC)[prec,tmpr,epot,d,h]
[ir,ic,z,lake,cellarea,Lat] = importFxd('fxd.txt');%Fixed
[cp,params,lower1,upper1,FLG] = importPar('Free_par.txt');%Parameters
Qobs=timeseries(Qobs,Time);
Qobs.TimeInfo.Format = 'mmm dd, yy'; % Set format for display on x-axis.
plot(Qobs); |
17995833d93eb0f92bf706135f61413b4117483a | 0e2383401e7648f7715711f74b8d3f0af5d58960 | /analysis/heuristic_analysis_setup_compare.R | 7ba4ee70c6c20f1f9803253ac126f74c02693e8e | [] | no_license | nareshram256/grid_robust_optimization | 44928c27b08350bf10361439c73941f7f2c075fa | 8d4c0c4b04c51c70efc8ed064f3de4eef8c081ce | refs/heads/master | 2022-11-22T17:48:41.077704 | 2020-07-23T08:06:57 | 2020-07-23T08:06:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,856 | r | heuristic_analysis_setup_compare.R | # Analysis for the heuristic
# Compare search setupparameters
heuristic <- read_csv("c:/temp/grid_cascade_output/heuristic_results.csv")
heuristic.compare.starttime <- heuristic %>%
group_by(args.instance_location, budget, upgrade_selection_bias, min_neighborhoods, min_neighbors_per_neighborhood) %>%
summarize(starttime = min(current_time))
neighborhood.jumps <- heuristic %>%
mutate(rundescription = paste0("(", upgrade_selection_bias*100, "%, ", min_neighborhoods,
", ", min_neighbors_per_neighborhood, ")")) %>%
group_by(args.instance_location, budget, upgrade_selection_bias,
min_neighborhoods, min_neighbors_per_neighborhood,
neighborhoods_searched, rundescription) %>%
left_join(heuristic.compare.starttime) %>%
mutate(runtime = (current_time - starttime)/60) %>%
select(-starttime) %>%
summarize(neighborhood.jump.time = min(runtime),
current_time = min(current_time),
current_supply = min(current_supply))
heuristic.compare <- heuristic %>%
mutate(rundescription = paste0("(", upgrade_selection_bias*100, "%, ", min_neighborhoods,
", ", min_neighbors_per_neighborhood, ")")) %>%
left_join(heuristic.compare.starttime) %>%
mutate(runtime = (current_time - starttime)/60) %>%
select(-starttime)
hr.comp.plot <- ggplot(heuristic.compare, aes(y = current_supply, x = runtime)) +
geom_line(aes(color = rundescription), size = 2, alpha = 0.5) +
guides(color = guide_legend("(Selection bias,\nmin neighborhoods,\nmin neighbors per neighborhood)")) +
geom_point(data = neighborhood.jumps, aes(x = neighborhood.jump.time,
y = current_supply,
color = rundescription), size = 2)
plotly::ggplotly(hr.comp.plot) |
d4a13e97b64a1a611fa0a55089e0467956e33386 | b445f8db1b80b6ffdf752312e738bc2bc392c310 | /Project_Classification_Jay.R | c3886e67cb7ea3aed9d04f03dca577388c4e21df | [] | no_license | jay953010/Rule-Based-Classifier-Algorithms-in-R | 136fe4429437833e123bf9457633daa965942ae1 | 86b21d6ef40ed467ae473751b880a0c3025a9b91 | refs/heads/master | 2021-07-09T22:10:34.091733 | 2017-10-10T03:17:37 | 2017-10-10T03:17:37 | 106,363,294 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,554 | r | Project_Classification_Jay.R |
library(caret) #Library for data tuning and data preprocessing
library(class) #Library to use for KNN Algorithm installtion package install.packages("class)
library(RWeka) #Package for C45 rule
library(party)
library(e1071) #Package to use for Support Vector Machine
library(xlsx)
library(gmodels)
#Taking Input Data
InputData <- function() {
InputData <- read.csv("Life_Expectancy_Dataset.csv")
InputData1 <- InputData[,c(3:6)] #Taking Column 3 to 6 as they are the only one used for classification
DataFrame <- data.frame(InputData1)
return (DataFrame)
}
#Dividing the Input Dataset
divideDataset <- function(DataFrame,x){
set.seed(x)
return(sample(2,nrow(DataFrame),replace = TRUE, prob=c(0.8,0.2)))
}
#KNN algorithm
myKNN <- function(TrainingData,TestData) {
CLabel <- TrainingData[,4]
return(knn(train = TrainingData[,1:3], test = TestData[,1:3],cl = CLabel, k=14.5,prob=TRUE))
}
#Support Vector Machine Algorithm
mySVM <- function(TrainingData) {
return (svm(Continent ~., data = TrainingData,type = "C-classification", kernel = "linear", cost = 10, Scale = FALSE))
}
mySVMPredict <- function(svmfit1,TestData) {
predict(svmfit1, TestData[,1:4], type = "C-classification")
}
#C45 Decision Tree Algorithm
myC45 <- function(TrainingData){
return (J48(Continent~.,control = Weka_control(R = FALSE , C = 0.5 , M = 0),data = TrainingData))
}
#Ripper Descision Tree Algorithm
myRipper <- function(TrainingData){
JRip(Continent~.,control = Weka_control(O = 0 , F = 7 , N = 1),data = TrainingData)
}
#Prediction Algorithm for test data for rule based classifier algorithm
myC45RipPredict <- function(m,TestData){
return (predict(m,newdata = TestData1[,1:3]))
}
DataFrame <- InputData() #Taking input data
#1
#Dividing Data into Training (80%) and Test (20%) set
CreatePartition <- divideDataset(DataFrame,32234)
TrainingData1 <- DataFrame[CreatePartition==1,1:4]
TestData1 <- DataFrame[CreatePartition==2,1:4]
#Implementation and Result of KNN
Test_Prediction <- myKNN(TrainingData1,TestData1)
confusionMatrix(Test_Prediction,TestData1[,4]) #Result and analysis of KNN
#Implementation and Result of Support Vector Machine
svmfit <- mySVM(TrainingData1)
p<-mySVMPredict(svmfit,TestData1)
confusionMatrix(p,TestData1[,4]) #Result and analysis of SVM
#print(svmfit)
#plot(p)
#Implementation and result of C4.5
m1 <- myC45(TrainingData1)
p1<- myC45RipPredict(m1,TestData1)
confusionMatrix(p1,TestData1[,4]) #Result and analysis of C45
#Implementation and result of Ripper
m2 <- myRipper(TrainingData1)
p2<- myC45RipPredict(m2,TestData1)
confusionMatrix(p2,TestData1[,4]) #Result and analysis of Ripper
#2
#Dividing Data into Training (80%) and Test (20%) set
CreatePartition <- divideDataset(DataFrame,1234)
TrainingData1 <- DataFrame[CreatePartition==1,1:4]
TestData1 <- DataFrame[CreatePartition==2,1:4]
#Implementation and Result of KNN
Test_Prediction <- myKNN(TrainingData1,TestData1)
confusionMatrix(Test_Prediction,TestData1[,4]) #Result and analysis of KNN
#Implementation and Result of Support Vector Machine
svmfit <- mySVM(TrainingData1)
p<-mySVMPredict(svmfit,TestData1)
confusionMatrix(p,TestData1[,4]) #Result and analysis of SVM
#print(svmfit)
#plot(p)
#Implementation and Result of C4.5
m1 <- myC45(TrainingData1)
p1<- myC45RipPredict(m1,TestData1)
confusionMatrix(p1,TestData1[,4]) #Result and analysis of C45
#Implementation and Result of Ripper
m2 <- myRipper(TrainingData1)
p2<- myC45RipPredict(m2,TestData1)
confusionMatrix(p2,TestData1[,4]) #Result and analysis of Ripper
#3)Dividing Data into Training (80%) and Test (20%) set
CreatePartition <- divideDataset(DataFrame,2018)
TrainingData1 <- DataFrame[CreatePartition==1,1:4]
TestData1 <- DataFrame[CreatePartition==2,1:4]
#Implementation and Result of KNN
Test_Prediction <- myKNN(TrainingData1,TestData1)
confusionMatrix(Test_Prediction,TestData1[,4]) #Result and analysis of KNN
#Implementation and Result of Support Vector Machine
svmfit <- mySVM(TrainingData1)
p<-mySVMPredict(svmfit,TestData1)
confusionMatrix(p,TestData1[,4]) #Result and analysis of SVM
#print(svmfit)
#plot(p)
#Implementation and Result of C4.5
m1 <- myC45(TrainingData1)
p1<- myC45RipPredict(m1,TestData1)
confusionMatrix(p1,TestData1[,4]) #Result and analysis of C45
#Implementation and Result of Ripper
m2 <- myRipper(TrainingData1)
p2<- myC45RipPredict(m2,TestData1)
confusionMatrix(p2,TestData1[,4]) #Result and analysis of Ripper
#4
#Dividing Data into Training (80%) and Test (20%) set
CreatePartition <- divideDataset(DataFrame,1264)
TrainingData1 <- DataFrame[CreatePartition==1,1:4]
TestData1 <- DataFrame[CreatePartition==2,1:4]
#Implementation and Result of KNN
Test_Prediction <- myKNN(TrainingData1,TestData1)
confusionMatrix(Test_Prediction,TestData1[,4]) #Result and analysis of KNN
#Implementation and Result of Support Vector Machine
svmfit <- mySVM(TrainingData1)
p<-mySVMPredict(svmfit,TestData1)
confusionMatrix(p,TestData1[,4]) #Result and analysis of SVM
#print(svmfit)
#plot(p)
#Implementation and Result of C4.5
m1 <- myC45(TrainingData1)
p1<- myC45RipPredict(m1,TestData1)
confusionMatrix(p1,TestData1[,4]) #Result and analysis of C45
#Implementation and Result of Ripper
m2 <- myRipper(TrainingData1)
p2<- myC45RipPredict(m2,TestData1)
confusionMatrix(p2,TestData1[,4]) #Result and analysis of Ripper
#5
#Dividing Data into Training (80%) and Test (20%) set
CreatePartition <- divideDataset(DataFrame,1784)
TrainingData1 <- DataFrame[CreatePartition==1,1:4]
TestData1 <- DataFrame[CreatePartition==2,1:4]
#Implementation and Result of KNN
Test_Prediction <- myKNN(TrainingData1,TestData1)
confusionMatrix(Test_Prediction,TestData1[,4]) #Result and analysis of KNN
#Implementation and Result of Support Vector Machine
svmfit <- mySVM(TrainingData1)
p<-mySVMPredict(svmfit,TestData1)
(confusionMatrix(p,TestData1[,4])) #Result and analysis of SVM
#print(svmfit)
#plot(p)
#Implementation and Result of C4.5
m1 <- myC45(TrainingData1)
p1<- myC45RipPredict(m1,TestData1)
confusionMatrix(p1,TestData1[,4]) #Result and analysis of C45
#Implementation and Result of Ripper
m2 <- myRipper(TrainingData1)
p2<- myC45RipPredict(m2,TestData1)
confusionMatrix(p2,TestData1[,4]) #Result and analysis of Ripper
|
79dcc9717ccd37098d73c9d88ae250110dcdc77c | 45b5d3662e656932a9a2a5eac26dc16749d952de | /man/write32.dta.Rd | 70cf25220c71125467d5c6536b47949d269be10f | [] | no_license | muschellij2/processVISION | 9dbff7ccb6a4598d3cba7cab080e3c5deb86d0d9 | 7fa7327996a5e0e3ac5872bc2b1aeb2b61f76596 | refs/heads/master | 2020-06-01T10:47:53.914160 | 2017-08-21T20:17:13 | 2017-08-21T20:17:13 | 15,747,358 | 0 | 1 | null | 2014-03-31T23:03:57 | 2014-01-08T20:36:19 | R | UTF-8 | R | false | true | 1,053 | rd | write32.dta.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write32.dta.R
\name{write32.dta}
\alias{my.write.dta}
\alias{write32.dta}
\alias{write32.dta,}
\title{Process a VISION XML file from their EDC}
\usage{
write32.dta(dataframe, file, version = 7L, convert.dates = TRUE,
tz = "GMT", convert.factors = c("labels", "string", "numeric", "codes"),
remove.nullstrings = TRUE)
}
\arguments{
\item{dataframe}{a data frame.}
\item{file}{character string giving filename.}
\item{version}{integer: Stata version: 6, 7, 8 and 10 are supported, and 9 is mapped to 8, 11 to 10.}
\item{convert.dates}{Convert Date and POSIXt objects to Stata dates?}
\item{tz}{timezone for date conversion}
\item{convert.factors}{how to handle factors}
\item{remove.nullstrings}{logical (TRUE) Replace "" with NA}
}
\description{
Essentially a direct copy of \code{\link{write.dta}} from
the \code{foreign} package, except taking $<=$ 32
instead of $<$ 32 characters in a variable name
}
\examples{
\dontrun{
}
}
\seealso{
\code{\link{write.dta}}
}
|
4bdf52f575895f9be4b7951dbcdf5eb5e05ecb0c | a137a1a2290233912f91bd9bdf4401e63f5c7499 | /run_analysis.R | cceee4dbe503d45039e12b1e3c8354067c3c56be | [] | no_license | Phileytan/CleaningData | b5a4d5755f1827807c120a326154cd29233afd02 | 2973a6fa42d6ee3216f76295a89c6f1053c22843 | refs/heads/master | 2020-03-07T18:53:09.381861 | 2018-04-01T17:35:47 | 2018-04-01T17:35:47 | 127,655,164 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,558 | r | run_analysis.R | getandclean <- function() {
#### Download the files form the web
adresseURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(adresseURL, destfile = "./Dataset.zip")
dateDownloaded<-date()
#### Read the tables
X_test<-read.table("./UCI HAR Dataset/test/X_test.txt")
y_test<-read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test<-read.table("./UCI HAR Dataset/test/subject_test.txt")
X_train<-read.table("./UCI HAR Dataset/train/X_train.txt")
y_train<-read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train<-read.table("./UCI HAR Dataset/train/subject_train.txt")
activity_labels<-read.table("./UCI HAR Dataset/activity_labels.txt")
features<-read.table("./UCI HAR Dataset/features.txt")
#### 1- Merges the training and the test sets
y<-rbind(y_test,y_train)
X<-rbind(X_test,X_train)
subject<-rbind(subject_test,subject_train)
#### 2- Extracts only the measurements on the mean and standard deviation for each measurement.
names(X)<-tolower(features[,2])
selection<-grep("mean|std", features$V2)
Xmeanstd<- X[,selection]
#### 3- Uses descriptive activity names to name the activities in the data set
firstlevel=min(activity_labels$V1)
lastlevel=max(activity_labels$V1)
y$V1 <- factor(y$V1,
levels = c(firstlevel:lastlevel),
labels = c(as.character(activity_labels$V2)))
#### 4- Appropriately labels the data set with descriptive variable names.
## Add literral names (Xmeanstd already changed before for selection)
names(subject)<-"numsubject"
names(y)<-"activity"
## Subject is more a factor than an integer
subject$numsubject<-as.factor(subject$numsubject)
#### tidy vairables names, without : "-" and "()"
names(Xmeanstd)=gsub("-","",names(Xmeanstd))
names(Xmeanstd)=gsub("\\()","",names(Xmeanstd))
#### Final merge in one unique table set of tidy data
data<-cbind(subject, y, Xmeanstd)
#### 5- From the data set in step 4, creates a second, independent tidy data
#### set with the average of each variable for each activity and each subject.
meandata<-aggregate(data[ ,3:ncol(data)], by=data[ , 1:2], mean)
write.table(meandata,file = "./meandata.txt", row.name=FALSE)
}
|
f061b1639863ed564e1c0041831e33ecadae96c0 | 91021e4ef3da3604739b4ed54684c4605233973c | /RSkripts/Extract_DataMerge.R | b371abc7575cd3365e9a8cd5e657d93c86b664c4 | [] | no_license | michaelbly/CampProfiles_Iraq_2020 | 1e8aedbafbe37c7ff859dcc543773202acc40992 | fc2ad62e6d8500f3497266b51705bf4ad68839fb | refs/heads/master | 2022-02-24T17:51:50.674133 | 2019-09-27T15:57:47 | 2019-09-27T15:57:47 | 211,353,860 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,097 | r | Extract_DataMerge.R | ################
#LOAD AND ATTACH ALL RELEVANT PACKAGES
###################################
#library(tidyverse)
#library(car)
#library(sandwich)
library(texreg)
library(stringr)
library(vars)
#library(zoo)
library(strucchange)
library(data.table)
library(openxlsx)
library(plm)
library(Formula)
#library(gmm)
library(reshape2)
library(gdata)
library(foreign)
library(plyr)
library(dplyr)
#library(AER)
#library(systemfit)
#library(datasets)
library("readxl")
#library("xlsx")
#######################################################################################################################
# 1. Import Data Previous Round March 2019
#######################################################################################################################
########################################################################################################################
DataMerge<-read.xlsx("dataset/(DRAFT) REACH_IRQ_CCCM_Sources of data_Camp profiling_SEPT2019.xlsx", sheet="Data merge March 2019")
names(DataMerge)
subset <- c("Camp", "SSID", "611EnrolmentCurrent", "1217EnrolmentCurrent", "PDSAccessCurrent", "HealthServicesCurrent", "CCCMCurrent",
"HHDocumentationCurrent", "AvgShelterCovgPPCurrent", "AvgNumperShelterCurrent", "PersonsPerLatrineCurrent",
"PersonsPerShowerCurrent", "FreqSolidWasteCurrent", "DateOpened")
DataMerge <- DataMerge[subset]
#DUPLICATE THE JAD'AH CAMP ROWS 5-TIMES TO MERGE IT WITH THE OTHER DATASETS
rep.row<-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
DataMerge[c(49:52), ] <- as.data.frame(rep.row(DataMerge[c(40), ],4))
DataMerge$Camp <- as.character(as.vector(DataMerge$Camp))
DataMerge$SSID <- as.character(as.vector(DataMerge$SSID))
DataMerge$`611EnrolmentCurrent` <- as.character(as.vector(DataMerge$`611EnrolmentCurrent`))
DataMerge$`1217EnrolmentCurrent` <- as.character(as.vector(DataMerge$`1217EnrolmentCurrent`))
DataMerge$PDSAccessCurrent <- as.character(as.vector(DataMerge$PDSAccessCurrent))
DataMerge$HealthServicesCurrent <- as.character(as.vector(DataMerge$HealthServicesCurrent))
DataMerge$CCCMCurrent <- as.character(as.vector(DataMerge$CCCMCurrent))
DataMerge$HHDocumentationCurrent <- as.character(as.vector(DataMerge$HHDocumentationCurrent))
DataMerge$AvgShelterCovgPPCurrent <- as.character(as.vector(DataMerge$AvgShelterCovgPPCurrent))
DataMerge$AvgNumperShelterCurrent <- as.character(as.vector(DataMerge$AvgNumperShelterCurrent))
DataMerge$PersonsPerLatrineCurrent <- as.character(as.vector(DataMerge$PersonsPerLatrineCurrent))
DataMerge$PersonsPerShowerCurrent <- as.character(as.vector(DataMerge$PersonsPerShowerCurrent))
DataMerge$FreqSolidWasteCurrent <- as.character(as.vector(DataMerge$FreqSolidWasteCurrent))
DataMerge$DateOpened <- as.character(as.vector(DataMerge$DateOpened))
#Rename the Qayarah-Jad'ah Camps
DataMerge[40, 1] <- "Qayyarah-Jad'ah 1-2"
DataMerge[49, 1] <- "Qayyarah-Jad'ah 3"
DataMerge[50, 1] <- "Qayyarah-Jad'ah 4"
DataMerge[51, 1] <- "Qayyarah-Jad'ah 5"
DataMerge[52, 1] <- "Qayyarah-Jad'ah 6"
|
eb308c99f487827d95c2c63ef02dee6a95a7e8fa | fc6ba5dd242c2043ec8c32c96890c7be0a0d260c | /wordcloud2/TextMining_WordCloud2.R | acf3f1e0eacecfd0b9f50c2cc45c4e84fdc2604e | [] | no_license | naind/R_crawling | b63d2393314e17ae2dd474a941692e2d89ceacfd | e3b48afbad23dcdda1550f20f29164c1832700f5 | refs/heads/master | 2022-12-15T08:14:31.381605 | 2020-09-10T09:43:56 | 2020-09-10T09:43:56 | 287,299,417 | 0 | 0 | null | null | null | null | UHC | R | false | false | 822 | r | TextMining_WordCloud2.R | setwd("C:/Users/94050/Desktop/git/R_Project") # 내 워킹 디렉토리 변경
list.files() # 내 워킹 디렉토리 안에 파일목록보기
library(KoNLP)
useNIADic()
# 사전에 없는 단어 추가한다.
mergeUserDic(data.frame(c("청취"), c("ncn")))
text1 <- readLines("ANSI.txt")
text2 <- extractNoun(text1)
text2
# 리스트 안에 리스트가 들어있기에 unlist해준다.
text3 <- unlist(text2)
# 불용어 전처리 ㅎ -> "" 변경
text4 <- gsub("ㅎ", "", text3)
# 2 글자 이상 4 글자 이하 단어
text5 <- text4[nchar(text4) < 5]
text5 <- text5[nchar(text5) > 1]
# 내림차순으로 설정 (default 오름차순)
text6 <- sort(table(text5), decreasing = T)
# 상위 300 단어
text7 <- head(text6, 300)
install.packages("wordcloud2")
library(wordcloud2)
wordcloud2(text7, size = 1)
|
0f6345a95711e393a208c0bb407bb2d670c6481e | 991c97b4f8697f7c4635b76f4449d83871dcefe2 | /R/show_source.R | e5aea716cdd96def2f1346c18f2b0fe79cc6e96e | [
"MIT"
] | permissive | r-lib/pillar | 803706f4fa4e7f039005f5a8ad27945869fe7d02 | 92095fcc0fffa6d2b2d42c3ec6017e7501c0c99b | refs/heads/main | 2023-06-08T05:12:41.813170 | 2023-03-26T01:56:15 | 2023-03-26T01:56:15 | 91,374,446 | 146 | 40 | NOASSERTION | 2023-04-05T01:30:09 | 2017-05-15T19:04:32 | R | UTF-8 | R | false | false | 1,588 | r | show_source.R | set_show_source_hooks <- function() {
width <- 40
set_show_source_opts_hook(width)
set_show_source_source_hook()
}
# Defines a `show_source` knitr option. If this chunk option is set,
# the code should be a single function (use ::: for internal functions).
# The code will be replaced by the function definition, and not evaluated.
set_show_source_opts_hook <- function(width) {
force(width)
show_source_opts_hook <- function(options) {
qualified_fun_name <- options$code
fun_name <- gsub("^.*:::", "", qualified_fun_name)
fun <- eval(parse(text = fun_name))
code <- deparse(fun, control = "useSource")
options$code <- paste0(fun_name, " <- ", paste0(code, collapse = "\n"))
options$eval <- FALSE
# Store for later reuse by source hook
options$show_source <- qualified_fun_name
options
}
knitr::opts_hooks$set(show_source = show_source_opts_hook)
}
# show_source chunks are included in a <details> tag.
set_show_source_source_hook <- function() {
# Need to use a closure here to daisy-chain hooks and to keep state
old_source_hook <- knitr::knit_hooks$get("source")
show_source_source_hook <- function(x, options) {
x <- old_source_hook(x, options)
if (is.null(options$show_source)) {
return(x)
}
qualified_fun_name <- options$show_source
fun_name <- gsub("^.*:::", "", qualified_fun_name)
paste0(
"<details><summary>Source code of <code>",
qualified_fun_name, "()</code></summary>",
x, "</details>\n"
)
}
knitr::knit_hooks$set(source = show_source_source_hook)
}
|
d628da3c0037a24a731795684dca4a1c849f7c52 | 4a2cd4bcbf96ac29449cd1a785f0cdf757ae26c9 | /shotspotter.R | 90b466d3c0a0deb9376b0379fc2f52bacf9e1a41 | [] | no_license | taehwank15/shotspotter | a87d9b6713a46f6610655b0d6c0077e10042589c | a4edf88c8257a1690ea9cc947aa018aaddcae097 | refs/heads/master | 2020-05-15T06:53:15.120229 | 2019-04-23T03:52:42 | 2019-04-23T03:52:42 | 182,131,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,634 | r | shotspotter.R | library(tidyverse)
library(tidycensus)
library(sf)
library(tigris)
library(ggthemes)
library(lubridate)
# Validate
data <- read_csv("http://justicetechlab.org/wp-content/uploads/2017/08/OakShots_latlong.csv",
col_types = cols(
OBJECTID = col_double(),
CAD_ = col_character(),
BEAT = col_character(),
DATE___TIM = col_character(),
ADDRESS = col_character(),
CALL_ID = col_character(),
DESCRIPTIO = col_character(),
Xrough = col_double(),
Yrough = col_double(),
XCOORD = col_double(),
YCOORD = col_double()
)) %>%
mutate(DATE___TIM = mdy_hm(DATE___TIM))
# Turn df into shape file
# east_paloalto <- st_as_sf(data)
# has data for all the urban areas, jut want data for oakland - filter
raw_shapes <- urban_areas(class = "sf")
shapes <- raw_shapes %>%
filter(NAME10 == "San Francisco--Oakland, CA")
shot_locations <- st_as_sf(data, coords = c("XCOORD", "YCOORD"), crs = 4326) %>%
sample_n(5)
ggplot(data = shapes) +
geom_sf() +
geom_sf(data = shot_locations) +
theme_map()
shot_locations2 <- st_as_sf(data, coords = c("XCOORD", "YCOORD"), crs = 4326)
# animation ideas
# select an address, then show the shots fired at that address over time
# select a date range, then show the shots fired in oakland within that range
# Takes out outlier data points from outside of Oakland
filter(floor(XCOORD) != -142) %>%
filter(floor(YCOORD) != 30)
(floor(YCOORD) != 30)
|
684b1a9711d7da9c73d3eecac671688fc694b8de | 6cbc6e80ae07b8fb1fff0a5cad4ddcd29c358c0a | /R/ezr_gainslift.R | a088b33e28f1f05140feea600ff586f14d179e70 | [] | no_license | lenamax2355/easyr | d99638b84fd9768774fa7ede84d257b10e0bacf6 | 37ab2fe5c28e83b9b5b3c0e3002f2df45708016b | refs/heads/master | 2022-01-09T20:43:17.801623 | 2019-05-13T02:49:48 | 2019-05-13T02:49:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,343 | r | ezr_gainslift.R | #' Gains, Lift, and KS Calculator
#'
#' Outputs a dataframe that has the cumulative capture rate, gains, and KS metrics at each 1% increment of the score. Also produces the lower threshold of the score.
#'
#' @param df Dataframe of prediction results
#' @param binary_target Should work for either numeric or factor. Must be 0s and 1s though.
#' @param prediction The prediction value. Higher values = higher chance of obtaining a 1
#' @param higher_morelikely Default is TRUE. If a higher prediction value is more likely to be a 1. Set to false if higher is going to be a 0.
#' @param round_value Default is 2. Rounds values to 2 decimal places for a nicer user experience.
#' @param concise_result Default is TRUE. Won't print every record of gainslift table.
#' @return Returns a dataframe with various metrics at each progression into the scorecard.
#'
#' @examples ezr.gainslift(df = dataset_telco_churn_from_kaggle, binary_target = 'Churn', prediction ='TotalCharges',higher_morelikely = FALSE )
ezr.gainslift = function (df, binary_target, prediction, higher_morelikely = TRUE,
round_value = 6, concise_result = TRUE) {
if (class(df)[1] == "H2OFrame") {
retain_vars = c(binary_target, prediction)
df = as.data.frame(df[retain_vars])
}
df = df %>% dplyr::select(c(binary_target, prediction))
df = df %>% mutate(`:=`(!!binary_target, readr::parse_number(as.character(!!rlang::sym(binary_target)))))
if (higher_morelikely == TRUE) {
df = df %>% arrange(desc(!!rlang::sym(prediction)))
}
else {
df = df %>% arrange(!!rlang::sym(prediction))
}
total_records = nrow(df)
df$rowid = seq(1, total_records, 1)
total_bads = sum(df[binary_target], na.rm = TRUE)
df["cum_bads"] = cumsum(df[binary_target])
df = df %>% mutate(cum_response_rate = cum_bads/rowid, cum_capture_rate = cum_bads/total_bads,
cum_expected_bads_baseline = (rowid/total_records) *
total_bads, cum_lift = cum_bads/cum_expected_bads_baseline,
cum_gain = cum_bads - cum_expected_bads_baseline)
gains_lift_table = data.frame(cumulative_data_fraction = seq(0.01,
1, 0.01))
gains_lift_table = gains_lift_table %>% mutate(n_records = base::floor(cumulative_data_fraction *
total_records))
gains_lift_table = gains_lift_table %>% inner_join(df, by = c(n_records = "rowid")) %>%
dplyr::rename(min_score = !!prediction)
gains_lift_table = gains_lift_table %>% mutate(cum_nonresp_rate = (n_records -
cum_bads)/(total_records - total_bads), ks_split = cum_capture_rate -
cum_nonresp_rate, new_records = n_records - lag(n_records,
n = 1), new_bads = cum_bads - lag(cum_bads, n = 1), response_rate = round(new_bads/new_records,
2), new_bads = NULL, new_records = NULL, response_rate = ifelse(is.na(response_rate) ==
TRUE, cum_response_rate, response_rate)) %>% select(-!!rlang::sym(binary_target)) %>% mutate_all(.funs = funs(parse_number(as.character(.)))) %>%
mutate_all(.funs = funs(round(., round_value)))
if (concise_result == TRUE) {
gains_lift_table = gains_lift_table %>% filter(cumulative_data_fraction %in%
c(0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08,
0.09, 0.1, 0.15, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6,
0.7, 0.8, 0.9, 1))
}
return(gains_lift_table)
}
|
0ed5f361cd1360d26fc085ac129609bad2b58f33 | ce055040549214f56f9c603eb0dd67d6b7ad196e | /R/value.R | dc7045e20dfc311e8a10cd2f03f6f4a9a00138b1 | [] | no_license | HenrikBengtsson/future | d3e103cf60bd8bcd9e7e1c45d239ea0e9f3dd18e | 30715de064db5cc0927fc71201f7866e5f45137e | refs/heads/develop | 2023-08-28T20:12:44.689930 | 2023-08-18T11:59:09 | 2023-08-18T11:59:09 | 37,042,109 | 971 | 104 | null | 2023-06-17T07:33:21 | 2015-06-08T02:37:06 | R | UTF-8 | R | false | false | 7,958 | r | value.R | #' The value of a future or the values of all elements in a container
#'
#' Gets the value of a future or the values of all elements (including futures)
#' in a container such as a list, an environment, or a list environment.
#' If one or more futures is unresolved, then this function blocks until all
#' queried futures are resolved.
#'
#' @param future,x A [Future], an environment, a list, or a list environment.
#'
#' @param stdout If TRUE, standard output captured while resolving futures
#' is relayed, otherwise not.
#'
#' @param signal If TRUE, \link[base]{conditions} captured while resolving
#' futures are relayed, otherwise not.
#'
#' @param \dots All arguments used by the S3 methods.
#'
#' @return
#' `value()` of a Future object returns the value of the future, which can
#' be any type of \R object.
#'
#' `value()` of a list, an environment, or a list environment returns an
#' object with the same number of elements and of the same class.
#' Names and dimension attributes are preserved, if available.
#' All future elements are replaced by their corresponding `value()` values.
#' For all other elements, the existing object is kept as-is.
#'
#' If `signal` is TRUE and one of the futures produces an error, then
#' that error is produced.
#'
#' @rdname value
#' @export
value <- function(...) UseMethod("value")
#' @rdname value
#' @export
value.Future <- function(future, stdout = TRUE, signal = TRUE, ...) {
if (future$state == "created") {
future <- run(future)
}
result <- result(future)
stop_if_not(inherits(result, "FutureResult"))
value <- result$value
visible <- result$visible
if (is.null(visible)) visible <- TRUE
## Always signal immediateCondition:s and as soon as possible.
## They will always be signaled if they exist.
signalImmediateConditions(future)
## Output captured standard output?
if (stdout) {
if (length(result$stdout) > 0 &&
inherits(result$stdout, "character")) {
out <- paste(result$stdout, collapse = "\n")
if (nzchar(out)) {
## AD HOC: Fix captured UTF-8 output on MS Windows?
if (!isTRUE(result$r_info$captures_utf8) && getOption("future.stdout.windows.reencode", TRUE)) {
out <- adhoc_native_to_utf8(out)
}
cat(out)
}
}
## Drop captured stdout to save memory?
if (isTRUE(attr(future$stdout, "drop"))) {
result$stdout <- NULL
future$result <- result
}
}
## Were there any variables added to the global enviroment?
if (length(result$globalenv$added) > 0L) {
onMisuse <- getOption("future.globalenv.onMisuse", "ignore")
if (onMisuse != "ignore") {
if (onMisuse == "error") {
cond <- GlobalEnvFutureError(globalenv = result$globalenv, future = future)
} else if (onMisuse == "warning") {
cond <- GlobalEnvFutureWarning(globalenv = result$globalenv, future = future)
} else {
cond <- NULL
warnf("Unknown value on option 'future.globalenv.onMisuse': %s",
sQuote(onMisuse))
}
if (!is.null(cond)) {
## FutureCondition to stack of captured conditions
new <- list(condition = cond, signaled = FALSE)
conditions <- result$conditions
n <- length(conditions)
## An existing run-time error takes precedence
if (n > 0L && inherits(conditions[[n]]$condition, "error")) {
conditions[[n + 1L]] <- conditions[[n]]
conditions[[n]] <- new
} else {
conditions[[n + 1L]] <- new
}
result$conditions <- conditions
future$result <- result
}
}
}
## Was RNG used without requesting RNG seeds?
if (!isTRUE(future$.rng_checked) && isFALSE(future$seed) && isTRUE(result$rng)) {
## BACKWARD COMPATIBILITY: Until higher-level APIs set future()
## argument 'seed' to indicate that RNGs are used. /HB 2019-12-24
if (any(grepl(".doRNG.stream", deparse(future$expr), fixed = TRUE))) {
## doFuture w/ doRNG, e.g. %dorng%
} else if (is_lecyer_cmrg_seed(future$globals$...future.seeds_ii[[1]])) {
.Defunct(msg = "Please upgrade your 'future.apply' or 'furrr' (type 1)")
} else if (is_lecyer_cmrg_seed(future$envir$...future.seeds_ii[[1]])) {
.Defunct(msg = "Please upgrade your 'future.apply' or 'furrr' (type 2)")
} else {
onMisuse <- getOption("future.rng.onMisuse", "warning")
if (onMisuse != "ignore") {
if (onMisuse == "error") {
cond <- RngFutureError(future = future)
} else if (onMisuse == "warning") {
cond <- RngFutureWarning(future = future)
} else {
cond <- NULL
warnf("Unknown value on option 'future.rng.onMisuse': %s",
sQuote(onMisuse))
}
if (!is.null(cond)) {
## RngFutureCondition to stack of captured conditions
new <- list(condition = cond, signaled = FALSE)
conditions <- result$conditions
n <- length(conditions)
## An existing run-time error takes precedence
if (n > 0L && inherits(conditions[[n]]$condition, "error")) {
conditions[[n + 1L]] <- conditions[[n]]
conditions[[n]] <- new
} else {
conditions[[n + 1L]] <- new
}
result$conditions <- conditions
future$result <- result
}
}
}
}
future$.rng_checked <- TRUE
## Check for non-exportable objects in the value?
onReference <- getOption("future.globals.onReference", "ignore")
if (onReference %in% c("error", "warning")) {
new <- tryCatch({
assert_no_references(value, action = onReference, source = "value")
NULL
}, FutureCondition = function(cond) {
list(condition = cond, signaled = FALSE)
})
if (!is.null(new)) {
## Append FutureCondition to the regular condition stack
conditions <- result$conditions
n <- length(conditions)
## An existing run-time error takes precedence
if (n > 0L && inherits(conditions[[n]]$condition, "error")) {
conditions[[n + 1L]] <- conditions[[n]]
conditions[[n]] <- new
} else {
conditions[[n + 1L]] <- new
}
result$conditions <- conditions
future$result <- result
}
}
## Signal captured conditions?
conditions <- result$conditions
if (length(conditions) > 0) {
if (signal) {
mdebugf("Future state: %s", sQuote(future$state))
## Will signal an (eval) error, iff exists
signalConditions(future, exclude = getOption("future.relay.immediate", "immediateCondition"), resignal = TRUE)
} else {
## Return 'error' object, iff exists, otherwise NULL
error <- conditions[[length(conditions)]]$condition
if (inherits(error, "error")) {
value <- error
visible <- TRUE
}
}
}
if (visible) value else invisible(value)
}
#' @rdname value
#' @export
value.list <- function(x, stdout = TRUE, signal = TRUE, ...) {
y <- futures(x)
y <- resolve(y, result = TRUE, stdout = stdout, signal = signal, force = TRUE)
for (ii in seq_along(y)) {
f <- y[[ii]]
if (!inherits(f, "Future")) next
v <- value(f, stdout = FALSE, signal = FALSE, ...)
if (signal && inherits(v, "error")) stop(v)
if (is.null(v)) {
y[ii] <- list(NULL)
} else {
y[[ii]] <- v
v <- NULL
}
}
y
}
#' @rdname value
#' @export
value.listenv <- value.list
#' @rdname value
#' @export
value.environment <- function(x, stdout = TRUE, signal = TRUE, ...) {
y <- futures(x)
y <- resolve(y, result = TRUE, stdout = stdout, signal = signal, force = TRUE)
names <- ls(envir = y, all.names = TRUE)
for (key in names) {
f <- y[[key]]
if (!inherits(f, "Future")) next
v <- value(f, stdout = FALSE, signal = FALSE, ...)
if (signal && inherits(v, "error")) stop(v)
y[[key]] <- v
}
y
}
|
6fea5b4deacdacc6cdcbb07ed4dfdb8531dbc461 | e4ad2398aa4b2d308ba0ec11803d58e36bba43d5 | /R/qcs.R.r | a3947c7944d69fa66f16156d06b0dea9de72e531 | [] | no_license | mflores72000/qcr | 2204b2810a24a91bee75ef68094feaf6198746bd | 4b07dcc8bdc2293ed0504d438e835b9562746612 | refs/heads/main | 2023-06-08T04:46:35.286754 | 2023-05-30T16:06:10 | 2023-05-30T16:06:10 | 387,871,922 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,864 | r | qcs.R.r | #-----------------------------------------------------------------------------#
# #
# QUALITY CONTROL STATISTICS IN R #
# #
# An R package for statistical in-line quality control. #
# #
# Written by: Miguel A. Flores Sanchez #
# Professor of Mathematic Department #
# Escuela Politecnica Nacional, Ecuador #
# miguel.flores@epn.edu.ec #
# #
#-----------------------------------------------------------------------------#
#-------------------------------------------------------------------------
# R chart
#-------------------------------------------------------------------------
##' Function to plot Shewhart R chart
##'
##' This function is used to compute statistics required by the R chart.
##'
##' @param x An object of class "qcd".
##' @param ... Arguments passed to or from methods.
##' @export
##' @examples
##'
##' ##
##' ## Continuous data
##' ##
##'library(qcr)
##'data(pistonrings)
##'str(pistonrings)
##'pistonrings.qcd<-qcd(pistonrings)
##'
##'class(pistonrings.qcd)
##'
##'res.qcs <- qcs.R(pistonrings.qcd)
##'class(res.qcs)
##'plot(res.qcs,title="Control Chart R for pistonrings")
##'summary(res.qcs)
##'
qcs.R <- function(x, ...) {
UseMethod("qcs.R")
}
##' @rdname qcs.R
##' @method qcs.R default
##' @inheritParams qcd
##' @param center A value specifying the center of group statistics or the
##' ''target'' value of the process.
##' @param std.dev A value or an available method specifying the within-group standard
##' deviation(s) of the process. Several methods are available for estimating the
##' standard deviation in case of a continuous process variable.
##' @param conf.nsigma A numeric value used to compute control limits, specifying the
##' number of standard deviations (if \code{conf.nsigma} > 1) or the confidence level (if 0
##' < \code{conf.nsigma} < 1).
##' @param limits A two-values vector specifying control limits.
##' @param plot Logical value. If \code{TRUE} a R chart should be plotted.
## @details
## In the default method \code{qcs.R.default} parameter \code{x} is a matrix
## or data-frame where it should contain data, index sample and, optionally, covariate(s).
##' @seealso
##' \code{\link{qcs}}, \code{\link{qcd}}
##' @export
qcs.R.default <- function(x, var.index = 1, sample.index = 2,
covar.index = NULL, covar.names = NULL,
data.name = NULL,
sizes = NULL,
center = NULL,
std.dev = c("UWAVE-R", "MVLUE-R"),
conf.nsigma = 3, limits = NULL, plot = FALSE, ...)
#.........................................................................
{
if (!is.numeric(std.dev))
std.dev <- match.arg(std.dev)
obj<-qcd(data= x, var.index = var.index, sample.index = sample.index,
covar.index = covar.index, covar.names = covar.names,
data.name = data.name, sizes = sizes)
result<-qcs.R.qcd(x = obj, center = center, std.dev = std.dev,
conf.nsigma = conf.nsigma,
limits = limits, plot = plot)
return(result)
#.........................................................................
} # qcs.R.default
#.........................................................................
##' @rdname qcs.R
##' @method qcs.R qcd
# @inheritParams qcs.S.default
##' @export
##'
qcs.R.qcd <- function(x, center = NULL,
std.dev = c("UWAVE-R", "MVLUE-R"),
conf.nsigma = 3, limits = NULL, plot = FALSE, ...)
#.........................................................................
{
#.........................................................................
if (!is.numeric(std.dev))
std.dev <- match.arg(std.dev)
if(is.null(x) || !inherits(x, "qcd"))
stop("data must be an objects of class (or extending) 'qcd'")
sizes <- x$sizes
type.data <- "continuous"
qcs<-qcs(x$x, x$sample, sizes, type = "R",
center, std.dev, conf.nsigma, limits, type.data)
center <- qcs$center
R <- qcs$statistics
std.dev <- qcs$std.dev
sizes <- qcs$sizes
limits <- qcs$limits
violations <- qcs$violations
statistics <- data.frame(R)
m <- length(x)
sample <- x$sample
if (m > 3) {
new.x <- x[, -c(1, 2, length(x))]
cov <- apply(new.x, 2, function(x) unlist(lapply(split(x, sample), unique)))
statistics <- data.frame(R, cov)
}
row.names(statistics) <- unique(x$sample)
data.name <- attr(x, "data.name")
result <- list(qcd = x, type = "R", statistics = statistics,
center = center, std.dev = std.dev,
limits = limits, conf.nsigma = conf.nsigma,
sizes = sizes, data.name = data.name,
violations = violations)
oldClass(result) <- c("qcs.R", "qcs")
if(plot) plot(result, ...)
return(result)
#.........................................................................
} # qcs.R.qcd
#.........................................................................
|
1f66a69c1d6abdf3a0e4e1b96baa75b1d177e305 | 406b5b48c4c62b54fb028df3e0e2f54c865c5c06 | /Bingo_enrich.R | 82d1c046e71584afd79140c206ab16aeec7cf0cd | [] | no_license | wenliangz/RNA-seq-shared-Rscripts | e80c7e646a079890d70a9e4f5be94ba63d5d5c7b | 64d061829373a8590c7ddb01ee6e541a42648cdc | refs/heads/master | 2020-06-16T04:14:12.963355 | 2016-12-01T02:10:12 | 2016-12-01T02:10:12 | 75,246,254 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,742 | r | Bingo_enrich.R | library(plyr)
library(dplyr)
library(magrittr)
library(stringr)
library(ggplot2)
source('multiplot.R')
# read bingo data
path <- 'y:/BlueBirdBio/OneDrive for Business/MyProjects/RNAseq_Mike_mRNATransfection/pathway'
# path <- 'C:/Users/Wen.Zhang/OneDrive for Business/MyProjects/RNAseq_Mike_mRNATransfection/pathway/'
fls <- list.files(path=path,pattern="*bgo$")
read_bingo <- function(path,filename)
{
df <- str_split(filename,pattern = '\\.')[1]
df <- read.table(paste(path,filename, sep=''),header = TRUE,sep='\t',skip = 19,fill=TRUE,stringsAsFactors = FALSE)
df %<>% mutate(log10corr.p = -log10(corr.p.value))
return(df)
}
bingos <- list()
for (i in 1:length(fls))
{
# i=1
bingos[[i]] <- read_bingo(path,fls[i])
names(bingos)[i] <- str_split(fls[i],pattern = '\\.')[[1]][1]
}
plot_enrichhis <- function(dfs)
{
plots <- list()
for (i in 1:length(dfs))
{
# i=1
# dfs = bingos
df <- dfs[[i]]
n = ifelse(nrow(df) >=10,10,nrow(df))
df <- dfs[[i]][1:n,]
df <- df[order(df$log10corr.p),]
df$Description <- factor(df$Description,levels=df$Description)
gg <- ggplot(df[order(df$log10corr.p),],aes(x=Description,y=log10corr.p,fill=Description)) +
geom_bar(stat='identity') +
coord_flip() +
theme(legend.position="none",
axis.text= element_text(size=16),
axis.title = element_text(size=16)) +
labs(x='',y = '-log10corr.p',
title = names(dfs)[i]
)
gg
plots[[i]] <- gg
}
# pdf('pathways_heatmap.pdf')
gg_multi <- multiplot(plotlist=plots,cols=1)
# dev.off()
return(gg_multi)
}
plot_enrichhis(bingos)
|
a9ee3c932320e52e60162cf415cecf89649a9d6b | 16ee8d77216b80bb4e6662b89cb914bc1a09f330 | /plot4.R | 7e57bb6c296b355c7dcd014998825ead93144d68 | [] | no_license | AnannyaU/ExData_Plotting1 | b4bcb68f306ad6ad04e5d11325cdd14e05e1401e | 9aecc38d4868753008f3c790b30f8b7a4b5cc992 | refs/heads/master | 2021-01-15T09:51:07.202785 | 2016-08-08T06:35:33 | 2016-08-08T06:35:33 | 65,170,902 | 0 | 0 | null | 2016-08-08T03:51:50 | 2016-08-08T03:51:49 | null | UTF-8 | R | false | false | 1,886 | r | plot4.R | # Converted the text file to csv before reading it
electricData <- read.csv("household_power_consumption.csv", header = TRUE, sep = ";")
# Created subsets corresponding to the two dates used for analysis: Feb 1, 2007 and Feb 2, 2007
electricDataFeb1 <- subset(electricData, Date == "1/2/2007")
electricDataFeb2 <- subset(electricData, Date == "2/2/2007")
# Combined the two subsets
electricDataFeb <- rbind.data.frame(electricDataFeb1, electricDataFeb2)
# Extracted the Date and Time columns into a new data frame and created a new column called DtTm using strptime() to convert factor into DateTime
DT <- cbind.data.frame(electricDataFeb$Date, electricDataFeb$Time)
electricDataFeb$DtTm <- strptime(paste(DT[,1],DT[,2]), "%d/%m/%Y %H:%M:%OS")
png(filename = "plot4.png", width = 480, height = 480)
par(mfrow = c(2,2), mar = c(4, 4, 2, 2))
x4 <- electricDataFeb$DtTm
y41 <- as.numeric(levels(electricDataFeb$Global_active_power))[electricDataFeb$Global_active_power]
plot(x4, y41, type = "l", ylab = "Global Active Power", xlab = "")
y42 <- as.numeric(levels(electricDataFeb$Voltage))[electricDataFeb$Voltage]
plot(x4, y42, type = "l", ylab = "Voltage", xlab = "datetime")
y431 <- as.numeric(levels(electricDataFeb$Sub_metering_1))[electricDataFeb$Sub_metering_1]
y432 <- as.numeric(levels(electricDataFeb$Sub_metering_2))[electricDataFeb$Sub_metering_2]
y433 <- as.numeric(electricDataFeb$Sub_metering_3)
plot(x4, y431, type = "l", ylab = "Energy sub metering", xlab = "")
lines(x4, y432, col="red")
lines(x4, y433, col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = c(1,1,1), col = c("black", "red", "blue"), box.lwd = 0, box.col = "white")
y44 <- as.numeric(levels(electricDataFeb$Global_reactive_power))[electricDataFeb$Global_reactive_power]
plot(x4, y44, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.off() |
61a998345dea1f1036b7cc248e5f214897a06998 | 89875c4898905aadb7a93ec9def70832b4eb75b2 | /install.R | 534b70385d0ddfddeae22c1416d827a4c3387c36 | [] | no_license | mabarbour/Lanphere_Experiments | 930ef2287bef9538e3e3e376dab93f49441e2e83 | 9d6410706cee5100de043540da15a7f9208a177e | refs/heads/master | 2021-03-27T20:11:21.131315 | 2018-05-12T13:24:08 | 2018-05-12T13:24:08 | 15,195,239 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 223 | r | install.R | install.packages("tidyverse")
install.packages("rmarkdown")
install.packages("brms")
install.packages("rstan")
install.packages("broom")
install.packages("parallel")
install.packages("cowplot")
install.packages("stringr")
|
6115d13096d9389e85f0d0d54361ed761cfcbde4 | f36bc1a3afeb5d2bc03358e818599f286f09dc2b | /modules/immunefeaturesmodule.R | e75503b474a476c79ba3f67e1c62b157c596e14b | [
"Apache-2.0"
] | permissive | bioinfo-dirty-jobs/shiny-iatlas | 1c5577f3b021689163b75834c08087378b0dcd26 | 2f81a3d899a1e6cb53daa462a0eaafe3cb9435d6 | refs/heads/master | 2020-05-29T10:52:13.246209 | 2018-09-30T19:19:23 | 2018-09-30T19:19:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,609 | r | immunefeaturesmodule.R | immunefeatures_UI <- function(id) {
ns <- NS(id)
tagList(
titleBox("iAtlas Explorer — Immune Feature Trends"),
textBox(
width = 12,
p("This module allows you to see how immune readouts vary across your groups, and how they relate to one another.")
),
sectionBox(
title = "Distributions",
messageBox(
width = 12,
p("This displays the value of immune readouts by sample group. Select a variable class to see the distribution of variables within that class displayed as as violin plot."),
p("Manuscript context: This allows you to display distributions such as those shown in Figures 1C and 1D.")
),
fluidRow(
optionsBox(
width = 6,
selectInput(
ns("violin_y"),
"Select Violin Plot Y Variable",
choices = get_feature_df_nested_list()
)
)
),
fluidRow(
plotBox(
width = 12,
plotlyOutput(ns("violinPlot")) %>%
shinycssloaders::withSpinner(),
p(),
textOutput(ns("violin_group_text"))
)
)
),
sectionBox(
title = "Correlations",
messageBox(
width = 12,
p("Here, you can look at correlation of a response variable with other variables, within each sample group. Select the response variable on the right. Select a variable class on the left to specify which other variable you would like to correlate the response variable with. The result will be a heatmap, with positive correlation shown with a red scale, absence of correlation in white, and negative correlation in blue. Click on any cell in the heatmap to see the underlying data as a scatterplot. In the scatterplot, each point represents a tumor sample, the response variable is shown on the Y-axis and the row variable is shown on the X-axis.
"),
p("Manuscript context: Select “Leukocyte Fraction” as the response variable “DNA Alteration” as the variable class. This will correspond to Figure 4A if you are looking at immune subtypes as your sample grouping.")
),
fluidRow(
optionsBox(
width = 12,
column(
width = 8,
selectInput(
ns("heatmap_y"),
"Select Variable Class",
c(
"Core Expression Signature",
"DNA Alteration",
"Adaptive Receptor - B cell",
"Adaptive Receptor - T cell",
"T Helper Cell Score",
"Immune Cell Proportion - Original",
"Immune Cell Proportion - Multipotent Progenitor Cell Derivative Class",
"Immune Cell Proportion - Common Lymphoid and Myeloid Cell Derivative Class",
"Immune Cell Proportion - Differentiated Lymphoid and Myeloid Cell Derivative Class"
),
selected = "Immune Cell Proportion - Original"
)
),
column(
width = 4,
selectInput(
ns("heatmap_values"),
"Select Response Variable",
choices = get_feature_df_nested_list(),
selected = "Leukocyte Fraction"
)
)
)
),
fluidRow(
plotBox(
width = 12,
fluidRow(
plotlyOutput(ns("corrPlot")) %>%
shinycssloaders::withSpinner(),
p(),
textOutput(ns("heatmap_group_text"))
)
)
),
fluidRow(
plotBox(
width = 12,
plotlyOutput(ns("scatterPlot")) %>%
shinycssloaders::withSpinner()
)
)
)
)
}
# Server ----
immunefeatures <- function(
input, output, session, group_display_choice, group_internal_choice,
subset_df, plot_colors) {
ns <- session$ns
# reactives ----
hm_variables <- reactive({
get_factored_variables_from_feature_df(input$heatmap_y) %>%
as.character
})
intermediate_corr_df <- reactive({
sample_groups <- get_unique_column_values(
group_internal_choice(),
subset_df())
build_intermediate_corr_df(
subset_df(),
group_column = group_internal_choice(),
value1_column = input$heatmap_values,
value2_columns = hm_variables(),
group_options = sample_groups)
})
# plots ----
output$violinPlot <- renderPlotly({
display_x <- group_display_choice()
internal_x <- group_internal_choice()
internal_y <- input$violin_y
display_y <- get_variable_display_name(internal_y)
plot_df <- build_violinplot_df(subset_df(), internal_x, internal_y)
create_violinplot(
plot_df,
xlab = display_x,
ylab = display_y,
fill_colors = plot_colors(),
source_name = "violin"
)
})
output$violin_group_text <- renderText(
create_group_text_from_plotly("violin"))
output$corrPlot <- renderPlotly({
heatmap_corr_mat <- build_heatmap_corr_mat(
intermediate_corr_df(),
group_column = group_internal_choice(),
value1_column = input$heatmap_values,
value2_columns = hm_variables())
create_heatmap(heatmap_corr_mat, "heatplot")
})
output$heatmap_group_text <- renderText(
create_group_text_from_plotly("heatplot", key_column = "x"))
output$scatterPlot <- renderPlotly({
eventdata <- event_data("plotly_click", source = "heatplot")
validate(need(
check_immunefeatures_scatterplot_click_data(
eventdata,
subset_df(),
group_internal_choice(),
intermediate_corr_df()),
"Click above heatmap"))
internal_variable_name <- eventdata$y[[1]] %>%
get_variable_internal_name() %>%
.[. %in% colnames(intermediate_corr_df())]
plot_df <- build_scatterplot_df(
intermediate_corr_df(),
group_column = group_internal_choice(),
group_filter_value = eventdata$x[[1]],
x_column = internal_variable_name,
y_column = input$heatmap_values)
create_scatterplot(
plot_df,
xlab = eventdata$y[[1]],
ylab = get_variable_display_name(input$heatmap_values),
title = eventdata$x[[1]],
label_col = "label")
})
}
|
7c654ecddaeedfc7961e8087edb296e1949ab546 | 13f914e94c0667a3ba32e728fac204a3901fd15c | /src/visualization/plot_mocks.R | 10be775b73a569f216d17d3520786f934251612b | [] | no_license | manutamminen/epicpcr_4 | fcaff79962d2e1df09cd4559b29c7fcf8aef2098 | 53e938f827e7ab356b756bc45d3bfa671c4bc6fa | refs/heads/main | 2023-05-31T09:54:40.421995 | 2021-06-17T10:56:55 | 2021-06-17T10:56:55 | 369,548,748 | 0 | 1 | null | 2021-06-17T10:56:55 | 2021-05-21T13:44:09 | Python | UTF-8 | R | false | false | 1,827 | r | plot_mocks.R |
library(tidyverse)
bact_bcs <-
read_tsv(snakemake@input[[1]])
euk_bcs <-
read_tsv(snakemake@input[[2]])
png(snakemake@output[[1]], units="in", width=5, height=5, res=300)
bact_bcs %>%
count(Sample, Taxonomy) %>%
filter(str_detect(Taxonomy, "Mock")) %>%
arrange(Sample, desc(n)) %>%
separate(Taxonomy, into=c(NA, "Ix"), sep="ock", remove=FALSE) %>%
separate(Ix, into=c("Ix", NA), sep="_") %>%
separate(Sample, into=c("Sample", NA), sep="_") %>%
mutate(Ix = as.numeric(Ix)) %>%
mutate(Fus = ifelse(Ix < 4, "No_fusion", "Fusion")) %>%
select(-Taxonomy) %>%
group_by(Sample, Ix, Fus) %>%
summarise(n = sum(n)) %>%
ggplot(aes(x=Ix, y=n)) +
geom_point() +
geom_smooth(method = "glm",
family = gaussian(link="log")) +
facet_grid(Sample ~ Fus, scales="free") +
theme(strip.text.y = element_text(angle = 0),
axis.text.x = element_text(angle = 90, hjust = 1),
axis.text.y = element_text(size = 5))
dev.off()
png(snakemake@output[[2]], units="in", width=5, height=5, res=300)
euk_bcs %>%
count(Sample, Taxonomy) %>%
filter(str_detect(Taxonomy, "Mock")) %>%
arrange(Sample, desc(n)) %>%
separate(Taxonomy, into=c(NA, "Ix"), sep="ock", remove=FALSE) %>%
separate(Ix, into=c("Ix", NA), sep="_") %>%
separate(Sample, into=c("Sample", NA), sep="_") %>%
mutate(Ix = as.numeric(Ix)) %>%
mutate(Fus = ifelse(Ix < 4, "No_fusion", "Fusion")) %>%
select(-Taxonomy) %>%
group_by(Sample, Ix, Fus) %>%
summarise(n = sum(n)) %>%
ggplot(aes(x=Ix, y=n)) +
geom_point() +
geom_smooth(method = "glm",
family = gaussian(link="log")) +
facet_grid(Sample ~ Fus, scales="free") +
theme(strip.text.y = element_text(angle = 0),
axis.text.x = element_text(angle = 90, hjust = 1),
axis.text.y = element_text(size = 5))
dev.off()
|
095aa71bf48bdebc06eb73ca2631b9428f676a26 | e9841bea06fe2f9372a4352209533c95af1cf41c | /cachematrix.R | a5375e599b1039d530f5d21af42cb9488c304e5e | [] | no_license | harridw/ProgrammingAssignment2 | 9df875b01a354ba148565453ff324b18fb3c6cd1 | e2adfd4b71ed72fda99a449c1b3f17004a7a548d | refs/heads/master | 2021-01-17T07:28:36.951317 | 2017-03-02T19:18:16 | 2017-03-02T19:18:16 | 83,455,503 | 0 | 0 | null | 2017-03-01T12:54:34 | 2017-02-28T16:36:20 | R | UTF-8 | R | false | false | 3,735 | r | cachematrix.R | ## Create matrix object to cache inverse matrix
## If matrix provided is not square (i.e. nrow == ncol) or otherwise non-invertible, cacheSolve will fail
## Use test matrices recommended by Alan E Berger for assignment
## m1 <- matrix(c(1/2,-1/4,-1,3/4), nrow=2, ncol=2) --> or other non-singular square matrix would work
## mtrx <- makeMatrix(m1) --> use matrix 'm1' for makeMatrix() function
## cacheSolve(mtrx) --> use makeMatrix() as input arguments for cacheSolve(mtrx) to compute inverse of matrix
makeMatrix <- function(x = matrix()) { ## initialize 'x' where class(x) is a matrix
imtrx <- NULL ## initialize as object in makeMatrix() environment
## NOTE: '<<-' is form of the assignment operator, which assigns the value on the right side of the operator
## to an object in the parent environment named by the object on the left side of the operator
set <- function(y){ ## Comment: function that defines objects of matrix in makeMatrix()
x <<- y ## Comments: assigns input for 'x' in makeMatrix(), the parent environment
imtrx <<- NULL ## Comments:
## 1 Assign value of 'NULL' to inverse matrix (imtrx) in makeMatrix(), parent environment
## 2 clear imtrx value that may be stored with prior execution of cacheSolve()
}
get <- function() x ## Comment: retrieves values for function from makeMatrix() --> result of lexical scoping
setinverse <- function(inverse) imtrx <<- inverse ## Comments:
## 1 function to set inverse matrix
## 2 assign the input argument to the value of imtrx in parent environment
getinverse <- function() imtrx ## Comments:
## 1 inverse matrix 'imtrx' defined in makeMatrix
## 2 uses lexical scoping to retreive correct value
list(set= set, get = get, ## Assigns each of functions defined above as an element of a list and returns to parent environment
setinverse = setinverse, ## Gives name to result for each function
getinverse = getinverse) ## Allows use of '$' extract operator to get contents
}
## Computes the inverse matrix for the matrix created above. If inverse matrix already stored ('cached') and the matrix has not changed,
## then cacheSolve should retrieve the inverse matrix from cache
cacheSolve <- function(x, ...) { ## Comments:
## 1 function to obtain/retrieve inverse matrix
## 2 Requires input arguments from makeMatrix(), not atomic vectors
imtrx <- x$getinverse() ## Comments:
## 1 require input arguments from makeMatrix(), else result in error ['$' does not work with atomic vectors ]
## 2 retrieves 'cached' result in makeMatrix()
if (!is.null(imtrx)){ ## if 'imtrx' is not NULL = 'TRUE', it returns the 'cached' inverse matrix
message("getting cached data")
return(imtrx)
}
data <- x$get() ## if !is.null(imtrx) = 'FALSE', cacheSolve() retrieves matrix from makeMatrix()
imtrx <- solve(data, ...) ## calculates inverse of matrix for data <- x$get
x$setinverse(imtrx) ## returns the inverse matrix ('imtrx') to parent environment
imtrx ## prints the inverse matrix to screen
}
|
9c275a17feff25b8e837d477729a02c8641fcf49 | 5ae29ef9ebca69c844f5927be475db7c5411ac97 | /g_2.R | f91be3066076a72d40588f1939dc6f772492220f | [] | no_license | abrarShariar/cartesian-R | 9cb2441b711ca82cca1900f8d65efa2c089148dc | aac56fb2cac863add9d87646fd6649b8f7e14b17 | refs/heads/master | 2020-07-22T15:51:09.893724 | 2019-09-12T06:22:08 | 2019-09-12T06:22:08 | 207,251,404 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 204 | r | g_2.R | # generating the graph for y=x^2-500
x = -50:50
y = x^2-500
plot(x,y, type="l", col="red", xlim=c(-100,100), ylim=c(-1000, 1000))
abline(h=0, v=0)
title(main="y=x^2-500", col.main="blue", font.main=2)
|
fd0a38e84be863de78e11027733d0737a03bf82c | 2062f045bfe5eac45003e92817a7b1c5c374fb0f | /plot4.R | 291ef86d35fdd697d13679552a950de4db5bea7b | [] | no_license | nandoboy/ExData_Plotting1 | 986e399344ea5ab1a1ad80bd748067cc319d1fd8 | 2652a6e7a7b3aeb03309aa367216ed26d059d376 | refs/heads/master | 2020-06-11T00:11:54.223216 | 2017-01-04T10:54:10 | 2017-01-04T10:54:10 | 75,835,455 | 0 | 0 | null | 2016-12-07T12:49:36 | 2016-12-07T12:49:36 | null | UTF-8 | R | false | false | 2,782 | r | plot4.R | #Steps for week-1 project:
hh_pow_cons <- read.table('household_power_consumption.txt',header = TRUE,sep = ';',stringsAsFactors = FALSE,skip = 66636,nrows = 2880)
name_frame <- read.table('household_power_consumption.txt',header = TRUE,sep = ';',stringsAsFactors = FALSE,nrows = 1)
names(hh_pow_cons) <- names(name_frame)
hh_pow_cons$Date <- strptime(hh_pow_cons$Date,'%d/%m/%Y')
#sub-setting
#hh_pow_cons <- hh_pow_cons[hh_pow_cons$Date == '2007-02-01' | hh_pow_cons$Date == '2007-02-02',]
hh_pow_cons$Global_active_power <- as.numeric(hh_pow_cons$Global_active_power)
hh_pow_cons$Global_reactive_power <- as.numeric(hh_pow_cons$Global_reactive_power)
hh_pow_cons$Voltage <- as.numeric(hh_pow_cons$Voltage)
hh_pow_cons$Global_intensity <- as.numeric(hh_pow_cons$Global_intensity)
hh_pow_cons$Sub_metering_1 <- as.numeric(hh_pow_cons$Sub_metering_1)
hh_pow_cons$Sub_metering_2 <- as.numeric(hh_pow_cons$Sub_metering_2)
hh_pow_cons$Sub_metering_3 <- as.numeric(hh_pow_cons$Sub_metering_3)
hh_pow_cons$Day <- weekdays(hh_pow_cons$Date)
hh_pow_cons$t_stamp <- strptime(paste(as.character(hh_pow_cons$Date),hh_pow_cons$Time,sep = ' '),format = '%Y-%m-%d %H:%M:%S')
#plot no. 4
png(filename = 'plot4.png',width = 480,height = 480)
par(mfrow = c(2,2),mar = c(4,4,2,1))
plot(Global_active_power~as.numeric(t_stamp),data = hh_pow_cons,type = "l",xlab="",ylab = 'Global_active_power',xaxt = "n")
axis(side = 1,at = c(as.numeric(min(hh_pow_cons$t_stamp)),as.numeric(mean(hh_pow_cons$t_stamp)),as.numeric(max(hh_pow_cons$t_stamp))),labels = c('Thu','Fri','Sat'))
plot(Voltage~as.numeric(t_stamp),data = hh_pow_cons,type = "l",xlab="datetime",ylab = 'Voltage',xaxt = "n")
axis(side = 1,at = c(as.numeric(min(hh_pow_cons$t_stamp)),as.numeric(mean(hh_pow_cons$t_stamp)),as.numeric(max(hh_pow_cons$t_stamp))),labels = c('Thu','Fri','Sat'))
plot(hh_pow_cons$Sub_metering_1~as.numeric(t_stamp),data = hh_pow_cons,type = "l",xlab = '',ylab = 'Energy sub metering',xaxt = "n")
axis(side = 1,at = c(as.numeric(min(hh_pow_cons$t_stamp)),as.numeric(mean(hh_pow_cons$t_stamp)),as.numeric(max(hh_pow_cons$t_stamp))),labels = c('Thu','Fri','Sat'))
lines(as.numeric(hh_pow_cons$t_stamp),hh_pow_cons$Sub_metering_2,type = 'l',col = 'red')
lines(as.numeric(hh_pow_cons$t_stamp),hh_pow_cons$Sub_metering_3,type = 'l',col = 'blue')
legend('topright',col=c('black','red','blue'),legend = c('Sub_metering_1','Sub_metering_2','Sub_metering_3'),lwd = 1)
plot(Global_reactive_power~as.numeric(t_stamp),data = hh_pow_cons,type = "l",xlab="datetime",ylab = 'Global_reactive_power',xaxt = "n")
axis(side = 1,at = c(as.numeric(min(hh_pow_cons$t_stamp)),as.numeric(mean(hh_pow_cons$t_stamp)),as.numeric(max(hh_pow_cons$t_stamp))),labels = c('Thu','Fri','Sat'))
dev.off() |
d4493bf247d7c540b692e551451d6d956b4cc209 | af87b7ab2c850155bfeeed32b90532dc9a94e755 | /man/StableEstim-package.Rd | 8eefaf4fe5eb33cec0eb187ea35171593e1dfd78 | [] | no_license | GeoBosh/StableEstim | 0c2b1a0a589700b6e9261c97e85a7ace51edc649 | 1e5818687cbe3b766e8fbd5a5240d369b82b14bb | refs/heads/master | 2022-08-25T11:54:09.434816 | 2022-08-07T08:59:44 | 2022-08-07T08:59:44 | 192,062,694 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,502 | rd | StableEstim-package.Rd | \name{StableEstim-package}
\alias{StableEstim-package}
\docType{package}
\title{
Stable law estimation functions
}
\description{
A collection of methods to estimate the four parameters of stable
laws. The package also provides functions to compute the
characteristic function and tools to run Monte Carlo simulations.
}
\details{
The main functions of the package are briefly described below:
\describe{
\item{main function:}{\code{\link{Estim}} is the most useful
function of the package. It estimates of the parameters
and the asymptotic properties of the estimators.}
\item{estimation function:}{
the methods provided so far are the maximum-likelihood
(\code{\link{MLParametersEstim}}), the generalised method of
moment with finite (\code{\link{GMMParametersEstim}}) or
continuum (\code{\link{CgmmParametersEstim}}) moment conditions,
the iterative Koutrouvelis regression method
(\code{\link{KoutParametersEstim}}) and the fast Kogon-McCulloch
method used for first guess estimation
(\code{\link{IGParametersEstim}}).
}
\item{characteristic function:}{the characteristic function
(\code{\link{ComplexCF}})
and its Jacobian (\code{\link{jacobianComplexCF}})
can be computed and will return a vector (respectively a matrix)
of complex numbers.}
\item{Monte Carlo simulation}{
\code{\link{Estim_Simulation}} is a tool to run Monte Carlo
simulations with flexible options to select the estimation method,
the Monte Carlo control parameters, compute statistical summaries
or save results to a file.
}
}
}
\author{
Tarak Kharrat, Georgi N. Boshnakov
}
\note{
Version 1 of this package had a somewhat restricted license since
it needed package \pkg{akima} in some computations.
In version 2 of the package we implemented a 2D interpolation routine
and removed the dependency on \pkg{akima}. Therefore,
\pkg{StableEstim} is now under GPL license. The package is related to
upcoming work by the authors where the different methods are compared
using MC simulations.
}
\seealso{
\code{fBasics:::.mleStableFit},
\code{fBasics:::.qStableFit}
package \pkg{stabledist}
}
\references{% bibentry:all
Carrasco M and Florens J (2000).
``Generalization of GMM to a continuum of moment conditions.''
\emph{Econometric Theory}, \bold{16}(06), pp. 797--834.
Carrasco M and Florens J (2002).
``Efficient GMM estimation using the empirical characteristic function.''
\emph{IDEI Working Paper}, \bold{140}.
Carrasco M and Florens J (2003).
``On the asymptotic efficiency of GMM.''
\emph{IDEI Working Paper}, \bold{173}.
Carrasco M, Chernov M, Florens J and Ghysels E (2007).
``Efficient estimation of general dynamic models with a continuum of moment conditions.''
\emph{Journal of Econometrics}, \bold{140}(2), pp. 529--573.
Carrasco M, Florens J and Renault E (2007).
``Linear inverse problems in structural econometrics estimation based on spectral decomposition and regularization.''
\emph{Handbook of econometrics}, \bold{6}, pp. 5633--5751.
Carrasco M and Kotchoni R (2010).
``Efficient estimation using the characteristic function.''
Mimeo. University of Montreal.
Nolan J (2001).
``Maximum likelihood estimation and diagnostics for stable distributions.''
\emph{L'evy processes: theory and applications}, pp. 379--400.
\insertRef{nolan:2012}{StableEstim}
Hansen LP (1982).
``Large sample properties of generalized method of moments estimators.''
\emph{Econometrica: Journal of the Econometric Society}, pp. 1029--1054.
Hansen LP, Heaton J and Yaron A (1996).
``Finite-sample properties of some alternative GMM estimators.''
\emph{Journal of Business & Economic Statistics}, \bold{14}(3), pp. 262--280.
Feuerverger A and McDunnough P (1981).
``On efficient inference in symmetric stable laws and processes.''
\emph{Statistics and Related Topics}, \bold{99}, pp. 109--112.
Feuerverger A and McDunnough P (1981).
``On some Fourier methods for inference.''
\emph{Journal of the American Statistical Association}, \bold{76}(374), pp. 379--387.
Schmidt P (1982).
``An improved version of the Quandt-Ramsey MGF estimator for mixtures of normal distributions and switching regressions.''
\emph{Econometrica: Journal of the Econometric Society}, pp. 501--516.
Besbeas P and Morgan B (2008).
``Improved estimation of the stable laws.''
\emph{Statistics and Computing}, \bold{18}(2), pp. 219--231.
% end:bibentry:all
}
\keyword{package}
|
774095c9b4a28c2bbb7df335c03cd34b48976094 | ed42197170d1361f8c5f76de43d2e3c04c8ce79c | /R-package/R/pulsespec.R | 6173dce7e0cb99036d24c8cf5c2596cb3b467331 | [] | no_license | BayesPulse/libpulsatile | 2ab991bbffa111d70ec6a9e36c74bbe34141a227 | 42ef4acef1f0b663faf2a7446aaa67df2fed94d9 | refs/heads/master | 2022-05-01T21:52:54.961243 | 2019-09-06T00:41:47 | 2019-09-06T00:41:47 | 115,029,164 | 2 | 2 | null | 2019-12-19T02:11:16 | 2017-12-21T17:10:48 | Objective-C | UTF-8 | R | false | false | 9,753 | r | pulsespec.R |
#-------------------------------------------------------------------------------
# Functions for creating a pulse model specification
#-------------------------------------------------------------------------------
#' pulse_spec
#'
#' Generates a pulse_spec object -- the specification object required for
#' fitting a fit_pulse model.
#'
#'
#' @param location_prior_type Takes on two values: "order-statistic" and
#' "strauss". "order-statistic" uses every third order statistic of a Uniform
#' distribution for the pulse location prior and requires specification of the
#' prior parameter for mean pulse count ("prior_mean_pulse_count").
#' "strauss" uses the Strauss interacting point-process as a prior and requires
#' specification of "prior_mean_pulse_count", "prior_location_gamma", and
#' "prior_location_range".
#' @param prior_mass_mean mass mean hyperparm
#' @param prior_mass_var mass variance hyperparm
#' @param prior_width_mean width mean hyperparm (on variance scale)
#' @param prior_width_var width variance hyperparm (on variance scale)
#' @param prior_baseline_mean mean of prior on baseline
#' @param prior_baseline_var variance of prior on baseline
#' @param prior_halflife_mean mean of prior on half-life
#' @param prior_halflife_var variance of prior on half-life
#' @param prior_error_alpha Gamma shape parameter
#' @param prior_error_beta Gamma rate parameter
#' @param prior_location_gamma placeholder
#' @param prior_location_range placeholder
#' @param prior_sd_mass placeholder
#' @param prior_sd_width placeholder
#' @param prior_mean_pulse_count placeholder
#' @param sv_mass_mean placeholder
#' @param sv_width_mean placeholder
#' @param sv_baseline_mean placeholder
#' @param sv_halflife_mean placeholder
#' @param sv_error_var placeholder
#' @param sv_mass_sd placeholder
#' @param sv_width_sd placeholder
#' @param pv_baseline placeholder
#' @param pv_halflife placeholder
#' @param pv_mean_pulse_mass placeholder
#' @param pv_mean_pulse_width placeholder
#' @param pv_indiv_pulse_mass placeholder
#' @param pv_indiv_pulse_width placeholder
#' @param pv_sd_pulse_mass placeholder
#' @param pv_sd_pulse_width Proposal variance of the SD of the pulse widths (pulse widths are on variance scale)
#' @param pv_sdscale_pulse_mass placeholder
#' @param pv_sdscale_pulse_width placeholder
#' @param pv_pulse_location placeholder
#' @export
#' @keywords pulse simulation
pulse_spec <-
function(location_prior_type = c("strauss", "order-statistic"),
prior_mass_mean = 3.5,
prior_mass_var = 100,
prior_width_mean = 42,
prior_width_var = 1000,
prior_baseline_mean = 2.6,
prior_baseline_var = 100,
prior_halflife_mean = 45,
prior_halflife_var = 100,
prior_error_alpha = 0.0001,
prior_error_beta = 0.0001,
prior_location_gamma = 0,
prior_location_range = 40,
prior_sd_mass = 5,
prior_sd_width = 5,
prior_mean_pulse_count = 12,
sv_mass_mean = 3.5,
sv_width_mean = 42,
sv_baseline_mean = 2.6,
sv_halflife_mean = 45,
sv_error_var = 0.005,
sv_mass_sd = 1.6,
sv_width_sd = 35,
pv_baseline = 0.02,
pv_halflife = 1.5,
pv_mean_pulse_mass = 6,
pv_mean_pulse_width = 3700,
pv_indiv_pulse_mass = 1,
pv_indiv_pulse_width = 15000,
pv_sd_pulse_mass = 4.5,
pv_sd_pulse_width = 4000,
pv_sdscale_pulse_mass = 4,
pv_sdscale_pulse_width = 4,
pv_pulse_location = 65)
{
# TODO: Research better ways to do this range/valid-value checking. Pretty
# much all of the args need it.
location_prior_type <- match.arg(location_prior_type)
if (length(location_prior_type) > 1L)
stop(paste("location_prior_type is a required argument -- choose",
"'order-statistic' or 'strauss'"))
if (prior_mean_pulse_count <= 0)
stop(paste("prior_mean_pulse_count must be > 0."))
if (location_prior_type == "strauss") {
if (is.null(prior_location_gamma) | is.null(prior_location_range))
stop(paste("prior_location_gamma and prior_location_range are required",
"arguments when location_prior_type == 'strauss'"))
if (prior_location_gamma < 0 | prior_location_gamma > 1)
stop(paste("Invalid value for argument 'prior_location_gamma'; should",
"be in [0,1]"))
if (prior_location_range < 0)
stop(paste("Invalid value for argument 'prior_location_range'; should",
"be >= 0"))
} else {
if (!is.null(prior_location_gamma) | !is.null(prior_location_range))
message(paste("When location_prior_type is set to 'order-statistic'",
"prior_location_gamma and prior_location_range are not used."))
}
# Structure for single-subject, strauss prior model.
# NOTE: sv's use std dev, while priors use variances (want this consistent
# for any reason?)
# NOTE: need more clear label for max_sd's
ps_obj <-
structure(
list(location_prior = location_prior_type,
priors = list(baseline_mean = prior_baseline_mean,
baseline_variance = prior_baseline_var,
halflife_mean = prior_halflife_mean,
halflife_variance = prior_halflife_var,
mass_mean = prior_mass_mean,
mass_variance = prior_mass_var,
width_mean = prior_width_mean,
width_variance = prior_width_var,
mass_sd_param = prior_sd_mass,
width_sd_param = prior_sd_width,
error_alpha = prior_error_alpha,
error_beta = prior_error_beta,
pulse_count = prior_mean_pulse_count,
strauss_repulsion = prior_location_gamma,
strauss_repulsion_range = prior_location_range),
proposal_variances = list(mass_mean = pv_mean_pulse_mass,
width_mean = pv_mean_pulse_width,
mass_sd = pv_sd_pulse_mass,
width_sd = pv_sd_pulse_width,
baseline = pv_baseline,
halflife = pv_halflife,
location = pv_pulse_location,
pulse_mass = pv_indiv_pulse_mass,
pulse_width = pv_indiv_pulse_width,
sdscale_pulse_mass = pv_sdscale_pulse_mass ,
sdscale_pulse_width = pv_sdscale_pulse_width),
starting_values = list(baseline = sv_baseline_mean,
halflife = sv_halflife_mean,
errorsq = sv_error_var,
mass_mean = sv_mass_mean,
width_mean = sv_width_mean,
mass_sd = sv_mass_sd,
width_sd = sv_width_sd)),
class = "pulse_spec")
return(ps_obj)
}
#' @export
print.pulse_spec <- function(x, ...) {
cat("\nBayesian time-series analysis of pulsatile hormone data:
Model Specification Object\n\n")
cat("Model type:", paste0(x$model$model, "\n"))
cat("Number of iterations:",
formatC(x$model$iterations, format = "d", big.mark = ","), "\n")
cat("\n")
cat("Pulse mass:\n")
cat(" Fixed effect (mean)\n")
cat(" prior mean =", x$priors$mass_mean, "\n")
cat(" prior variance =", x$priors$mass_variance, "\n")
cat(" starting value =", x$starting_values$mass_mean, "\n")
cat(" proposal variance =", x$proposal_variances$mass_mean, "\n")
cat(" Fixed effect (SD)\n")
cat(" prior parameter =", x$priors$mass_sd_param, "\n")
cat(" starting value =", x$starting_values$mass_sd, "\n")
cat(" proposal variance =", x$proposal_variances$mass_sd, "\n")
cat(" Random effects (individual pulses)\n")
cat(" proposal variance =", x$proposal_variances$pulse_mass, "\n")
cat("\n")
cat("Pulse width:\n")
cat(" Fixed effect (mean)\n")
cat(" prior mean =", x$priors$width_mean, "\n")
cat(" prior variance =", x$priors$width_variance, "\n")
cat(" starting value =", x$starting_values$width_mean, "\n")
cat(" proposal variance =", x$proposal_variances$width_mean, "\n")
cat(" Fixed effect (SD)\n")
cat(" prior parameter =", x$priors$width_sd_param, "\n")
cat(" starting value =", x$starting_values$width_sd, "\n")
cat(" proposal variance =", x$proposal_variances$width_sd, "\n")
cat(" Random effects (individual pulses)\n")
cat(" proposal variance =", x$proposal_variances$pulse_width, "\n")
}
#------------------------------------------------------------------------------#
# End of file # End of file # End of file # End of file # End of file #
#------------------------------------------------------------------------------#
|
fab8a322591d7d565830d0d19e5018c7f16f725a | 7354030e192afadefd6fb89fb04473be493799bb | /run_analysis.R | b54521b47a6451b241302c03856160915833276c | [] | no_license | richbridgwater/tidy_data | 1c2508602ba8b7282957aab13eb966e41254856b | a5968db69243dfe3a9c9b8d201489b36acf826a3 | refs/heads/master | 2020-06-01T14:04:23.702808 | 2015-05-10T19:36:20 | 2015-05-10T19:36:20 | 29,826,552 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,598 | r | run_analysis.R | # This R script was created and tested on a MAC computer.
# The data folder needs to be located in your working directory and
# have the name "UCI HAR Dataset" for the script to work properly.
# Ok here we go...
# Let's first read in the features.txt file
features <- read.table("UCI HAR Dataset/features.txt")
# Now, I want to figure out columns to extract from the test and training data sets.
# We know we want the "mean" and "standard deviation" measurements, so I will
# search the newly created "features" object and extract two subsets
# based on the character strings "mean()" and "std()"
# extract a subset of mean measurement labels from "features"
subset_mean <- features[grep("mean()", features$V2, fixed = TRUE), ]
# extract a subset of standard deviation measurement labels from "features"
subset_std <- features[grep("std()", features$V2, fixed = TRUE), ]
# then I will combine the mean and std measurement label subsets into one table
subset_cmb <- rbind(subset_mean, subset_std)
# Let's import the remaining test and training data files
# Testing data --------------------------------------------------------------------
# get test subject IDs - 1 variable
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
# get test data measurements - 561 variables
x_test <- read.table("UCI HAR Dataset/test/X_test.txt")
# get test actyivity labels codes - 1 variable
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
# Training data -------------------------------------------------------------------
# get training subjects IDs - 1 variable
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt")
# get training data measurements - 561 variables
x_train <- read.table("UCI HAR Dataset/train/X_train.txt")
# get training label codes - 1 variable
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
# Now I'll start putting together the final test and training dataframes.
# I'll use just the "subset_cmb" object and extract the mean and
# standard deviation columns from the test and training dataframes that have 561 columns
# Which will reduce it to 66 columns.
x_test <- select(x_test, subset_cmb$V1)
x_train <- select(x_train, subset_cmb$V1)
# Let's now add the activity labels using cbind to both those new test and training tables.
test_bind <- cbind(y_test, x_test)
train_bind <- cbind(y_train, x_train)
# Then we can add the subject IDs to both those tables, again using cbind.
test_bind2 <- cbind(subject_test, test_bind)
train_bind2 <- cbind(subject_train, train_bind)
# I now have two tables for test and training structured
# like this --> | subject | activity | ME1 | ME2 | ME3 | ME4 ...and so on.
# Now I can combine both those tables (test and training) into one.
final_table <- rbind(test_bind2, train_bind2)
# Next I want to clean up the column names and remove the parenthesis and dashes from the final table
new_var_names <- gsub("\\(\\)","", subset_cmb$V2)
new_var_names <- gsub("\\-","", new_var_names)
# create vectors for first two column names
subject_label <- "subject"
activity_label <- "activity"
# combine all the column names
final_column_labels <- c(subject_label,activity_label,new_var_names)
# rename the column names in the final table
colnames(final_table) <- final_column_labels
#convert activity table to character to avoid cohersion errors
final_table$activity <- as.character(final_table$activity)
# convert all activity code numbers to text labels
final_table$activity[final_table$activity %in% "1"] <- "walking"
final_table$activity[final_table$activity %in% "2"] <- "walking_up"
final_table$activity[final_table$activity %in% "3"] <- "walking_dn"
final_table$activity[final_table$activity %in% "4"] <- "sitting"
final_table$activity[final_table$activity %in% "5"] <- "standing"
final_table$activity[final_table$activity %in% "6"] <- "laying"
# Convert to dplyr dataframe
final_table <- tbl_df(final_table)
# Finally, we'll group everything by the subject and activity columns and
# apply the summarise_each function to calculate the mean
tidy_data <- final_table%>%group_by(subject,activity)%>%summarise_each(funs(mean))
# Let's take a look at the final output
print(tidy_data)
# sum(tested3[,1])
# [1] 11.07991
# 11.07991/50
# [1] 0.2215982 |
f1954ce484953b87d790684e7c93790630a1d85e | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/pkr/examples/AUC.Rd.R | 14cc52c6d3e22b9990f71222e2f7cad07ed5a279 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 362 | r | AUC.Rd.R | library(pkr)
### Name: AUC
### Title: Calculate Area Under the Curve (AUC) and Area Under the first
### Moment Curve (AUMC) in a table format
### Aliases: AUC
### Keywords: AUC
### ** Examples
AUC(Theoph[Theoph$Subject==1, "Time"], Theoph[Theoph$Subject==1, "conc"])
AUC(Theoph[Theoph$Subject==1, "Time"], Theoph[Theoph$Subject==1, "conc"], down="Log")
|
0085bd732cbce3c640513004e41c8effd8edbdb2 | 725cce758902d6d9db049e87dc211c40ff10921e | /R/test.effect.rep.R | e80d1abc10e0dd9e6791ff78c17256c4b8a6a6b7 | [] | no_license | yaomin/dptmethods | a589e21dbff91075cea72fbbed40626fa643acee | 846a42d01c05e1a27fec290498e011b0f05d6882 | refs/heads/master | 2021-01-17T10:33:38.755292 | 2013-07-11T23:54:16 | 2013-07-11T23:54:16 | 7,569,298 | 1 | 0 | null | 2014-10-13T15:59:10 | 2013-01-11T23:58:27 | R | UTF-8 | R | false | false | 4,841 | r | test.effect.rep.R | test.effect.rep <-
function(cl,wins, psig, vars, group.label, rep.label,
n.core,
contr= cbind(c(0,1,1),c(0,1,-1)),
src.file="")
{
unique.wins <- unique(wins$ID)
n.wins <- length(unique.wins)
test.effect.worker <- function(i) {
## 31775 is one with NULL output
### Debug code
## if(i%%10 ==0) cat("**")
## cat(i)
## if(i%%10==0) cat("\n")
wins.by.id <- as.character(unlist(subset(wins,
subset=ID==unique.wins[i],
select="Win")))
pmean.sub <- psig[wins.by.id,]
pmean.sub.lme <- format.data.lme(pmean.sub,
group=as.numeric(factor(group.label)),
smpid=rep.label,
log.t=TRUE)
pmean.var.sub <- vars[wins.by.id,]
pmean.sub.lme.wt <- data.frame(pmean.sub.lme,
w = as.vector(unlist(pmean.var.sub)))
contrasts(pmean.sub.lme.wt$grp) <- make.RContrast(contr)
varfun <- varIdent(form = ~ 1 | grp)
lme.pmean.sub.wt <- try(lme(y ~ grp,
data=pmean.sub.lme.wt,
random= ~ 1 |smpid/win,
control=list(
msMaxIter=500,
maxIter=500,
opt="optim",
tolerance=1e-3),
weights=varfun),
TRUE)
if(!is(lme.pmean.sub.wt, 'try-error')) {
sigmaSq <- diag((lme.pmean.sub.wt$sigma*coef(lme.pmean.sub.wt$modelStruct$varStruct,
uncons=F,
allCoef=TRUE))^2)
##R00R00 <- lme.pmean.sub.wt$varFix%*%ginv(sigmaSq)
R00R00 <- lme.pmean.sub.wt$varFix%*%diag(1/diag(sigmaSq))
sigmaSq.d <- diag(as.vector(by(pmean.sub.lme.wt$w, pmean.sub.lme.wt$grp, mean)))
sigmaSq.updated <- sigmaSq + sigmaSq.d
varFix.updated <- R00R00%*%sigmaSq.updated
stdErr.updated <- sqrt(diag(varFix.updated))
this.tTable <- summary(lme.pmean.sub.wt)$tTable
this.tTable[,"Std.Error"] <- stdErr.updated
this.tTable[,"t-value"] <- this.tTable[,"Value"]/stdErr.updated
this.tTable[,"p-value"] <- pt(-abs(this.tTable[,"t-value"]), this.tTable[,"DF"])*2
res.i <- this.tTable
retn <- as.vector(res.i)
nm.1 <- dimnames(res.i)[[1]]
nm.2 <- dimnames(res.i)[[2]]
retn.names <- as.vector(t(sapply(nm.1, function(x) paste(x, nm.2, sep="."))))
names(retn) <- retn.names
} else {
retn <- NULL
}
retn
}
if(n.core>1) n.wins.cut <- as.numeric(cut(seq(n.wins), n.core)) else n.wins.cut <- rep(1, n.wins)
n.wins.seq <- seq(n.wins)
##sfExportAll(except=c( "globalNoExport" ))
envir.this <- environment()
varlist <- ls()
clusterExport(cl, varlist=varlist, envir=envir.this)
##sfSource(src.file)
## sfExport("wins", "psig", "vars", "contr", "n.core", "n.wins.cut","n.wins.seq",
## "unique.wins","n.wins","test.effect.worker","group.label","rep.label")
##sfSource()
test.effect.worker.group <- function(i) {
cat(i,"\n")
res.i <- sapply(n.wins.seq[n.wins.cut==i],test.effect.worker, simplify=F)
## debug:
## res.i <- sapply(n.wins.seq[n.wins.cut==i],
## function(x) {trythis <- try(test.effect.worker(x))
## if(is(trythis, "try-error")) cat("*",x, "\n")
## trythis
## },
## simplify=F)
##t(res.i)
res.i
}
##res <- unlist(sfLapply(seq(n.core), test.effect.worker.group), recursive=F)
res.list <- parLapply(cl, seq(n.core), test.effect.worker.group)
##res.list <- sapply(seq(n.core), test.effect.worker.group)
## take the 1st non-null for names and ncols
res.names <- NULL
res.ncols <- NULL
for(i in seq(along=res.list)){
for(j in seq(along=res.list[[i]])) {
a.res <- res.list[[i]][[j]]
if(!is.null(a.res)) {
res.names <- names(a.res)
res.ncols <- length(a.res)
break
}
}
if(!is.null(res.ncols)) break
}
res <- as.data.frame(matrix(unlist(res.list), ncol=res.ncols, byrow=T))
names(res) <- res.names
##res.notNull <- !as.vector(unlist(lapply(res.list, is.null)))
res.notNull <- !as.vector(unlist(lapply(res.list, function(x) sapply(x, is.null))))
##res.list <- res.list[res.notNull]
row.names(res) <- unique.wins[res.notNull]
res <- data.frame(ID=as.character(unique.wins[res.notNull]), res, stringsAsFactors=F)
res
}
|
036cadafdb965e8064e4c509f1bb957105ca13cf | c43d3ffc6604582707b73e500717df25409e7c97 | /Fusion/fun/diversityIdx.R | 2fadf8e5c87b6c14cbf9ccf53dab99512db187cd | [] | no_license | JAGOW/msc-phygeo-class-of-2017-JAGOW | b7f33c521256fa6a34cc7fd3978ff5906e127d18 | b7da535575d45117ba6192af45aa9dcc296b6433 | refs/heads/master | 2021-09-01T15:33:04.351840 | 2017-12-27T18:31:16 | 2017-12-27T18:31:16 | 115,546,644 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,637 | r | diversityIdx.R | # foliage height density
# Originally MacArthur & MacArthur (1961)
# Implemented after:
# Hashimoto, H., Imanishi, J., Hagiwara, A., Morimoto, Y., & Kitada, K. (2004). Estimating forest structure indices for evaluation of forest bird habitats by an airborne laser scanner. In M. Thies, B. Koch, H. Spiecker, & H. Weinacker (Eds.), Laser scanners for forest and landscape assessment: Proceedings of the ISPRS Working Group VIII/2, Freiburg, 3-6 October 2004, 254-258.
# http://www.isprs.org/proceedings/XXXVI/8-W2/HASHIMOTO.pdf
fun_fhd <- function(a) {
l <- raster::nlayers(a)
r <- -1 * ((a/a[[l]]) * log(a / (a/a[[l]])))
abs(sum(r[[1:(l-1)]]))
}
#slightly changed for GridMetric output (gives allready pi)
fun_fhd_fu <- function(b) {
a <- subset(b, 1:6)
l<-nlayers(a)
r<- -1 * ((a[[l]]) * log(a[[l]]))
abs(sum(r[[1:(l-1)]]))
}
# Vertical distribution ratio (VDR)
# VDR is a ratio of the distance between the canopy height return and the median height
# return. VDR = [CH-HOME]/CH.
# Forested regions are characterized by a dense canopy and sparse understory will exhibit lower VDR values
# Areas characterized by a more even distribution of biomass throughout the vertical profile will exhibit larger VDRs (closer to 1)
# Goetz, S. J., D. Steinberg, R. Dubayah, and B. Blair. 2007. Laser remote sensing of canopy habitat heterogeneity as a predictor of bird species richness in an eastern temperate forest, USA. Remote Sensing of Environment 108:254-263.
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.111.2979&rep=rep1&type=pdf
fun_vdr <- function(max,med) {
vdr <- (max[[1]] - med[[1]]) / max[[1]]
}
|
fcd6ed982d20040182c5ea3269c3890963b262fa | 7f880efddddc2e5414bdcfa3998591a0161daf0a | /Loan_Default_EDA_main.R | 36ef9cca22f24066eb50a2b3430466de45c72580 | [] | no_license | shubhrakarmahe/Loan-Deafult-Customer-Analysis | f5af35a3ed813c253d06f4cdc23af009bf174b8b | 47c9a0507e0d413838c07b257dedea690596416c | refs/heads/master | 2020-03-28T17:43:03.343658 | 2019-08-09T10:27:23 | 2019-08-09T10:27:23 | 148,815,827 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,706 | r | Loan_Default_EDA_main.R | # -------------------------- EDA Gramaner Case Study ---------------------------------------
# Group Member 1 - Harkirat Dhillon
# Group Member 2 - Shubhra Karmahe
# Group Member 3 - Yogesh BS Raju
# Group Member 4 - Vartika Tewari
# -------------------------------------------------------------------------------------------
# -------------------------- Problem Statement ----------------------------------------------
#
# The data given below contains the information about past loan applicants and whether they
# 'defaulted' or not. The aim is to identify patterns which indicate if a person is likely to
# default, which may be used for taking actions such as denying the loan, reducing the amount
# of loan, lending (to risky applicants) at a higher interest rate, etc.
# -------------------------------------------------------------------------------------------
# -------------------------- Load the required libraries ------------------------------------
load.libraries <- c('data.table', 'ggplot2', 'lubridate', 'dplyr','ggcorrplot','stringr',
'treemap','xray','devtools','ggcorrplot')
install.lib <- load.libraries[!load.libraries %in% installed.packages()]
for(libs in install.lib) install.packages(libs)
sapply(load.libraries, require, character = TRUE)
# -------------------------- Raw File import and Preliminary analysis -------------------------
# It will interpret (NA,n/a and empty string) in the raw file as NA.
dataset <- fread(input = "loan.csv", stringsAsFactors = TRUE, na.strings = c("NA","","n/a"), data.table = F)
# 39717 rows and 111 columns
nrow(dataset)
ncol(dataset)
head(dataset)
tail(dataset)
str(dataset)
summary(dataset)
# -------------------------- Data Preparation ------------------------------------------------
# 0 complete rows in dataset i.e. All the rows have missing data.
sum(complete.cases(dataset))
# 1. Validate Duplicated values for dataset; there are 39717 unique rows i.e. no duplicate rows
uniqueN(dataset)
# 2. checking for NA values
data.frame(colSums(is.na(dataset)))
# Out of 111 columns - 54 columns have all their row Values as NA.
# so ,we can exclude them from our analysis dataset.
sum(sapply(dataset, function(x) all(is.na(x))))
# create a new dataset loan after exclusion of those 54 columns
loan <- dataset[,-(which(colMeans(is.na(dataset)) == 1))]
# New dataset loan have 57 columns and 39717 rows
dim(loan)
str(loan)
# -------------------------- Data Cleaning & Univariate Analysisfor loan ---------------------------------------------
# 39703 incomplete rows
sum(!complete.cases(loan))
# -------------------------- Validate Case Mismatch & Other Data Issue-------------------------
# loan_status column - 3 unique values : Fully Paid,Charged Off and Active - no case mismatch
# most of the loan are in fully paid status.
# Pie chart indicate that a significant number of borrowers in our dataset paid off their loan -
# 83% of loan borrowers Fully paid the they amount borrowed, 2.9% of loan borrowers are paying currently
# 14.2% unfortunately defaulted.
table(loan$loan_status)
ggplot(loan, aes(x = "", fill = loan_status)) +
geom_bar(width = 1, col="black", aes(y=(..count..)/sum(..count..))) +
geom_text(aes(y=((..count..)/sum(..count..)),
label = scales::percent((..count..)/sum(..count..))),
stat = "count", size = 4,
position = position_stack(vjust = 0.5) ) +
coord_polar(theta = "y", start = 90) + theme_void()
# grade column - 7 unique values : A,B,C,D,E,F,G - no case mismatch
# B grade were issued most of the loans and G grade were issued the least number of loans.
table(loan$grade)
ggplot(data = loan, aes(grade)) + geom_bar(color = 'black', fill = 'dodgerblue') +
xlab('Grade') + ylab('No. of borrowers')
# sub_grade column - 35 unique values : example - A1,A2,A3,B1,C1 etc - no case mismatch
# B3 sub-grade were issued most of the loans and G5 sub-grade were issued the least number of loans.
table(loan$sub_grade)
ggplot(data = loan, aes(sub_grade)) + geom_bar(color = 'black', fill = 'dodgerblue') +
xlab ("Sub Grade") + ylab('No. of borrowers')
# verification_status column - 3 unique values : verified,source verified ans not verified- no mismatch
# applicants having verification status as NOT Verified were issued most of the loans
table(loan$verification_status)
ggplot(data = loan, aes(verification_status)) + geom_bar(color = 'black', fill = 'dodgerblue') +
xlab("Verification Status") + ylab('No. of borrowers')
# home_ownership column - 5 unique values : MORTGAGE,RENT,OWN,OTHER & NONE - no case mismatch
# as per data dictionary there is no NONE category.So we are assuming it as NA.
# applicants having house_ownership status as RENT or MORTGAGE were issued most of the loans
table(loan$home_ownership)
ggplot(data = loan, aes(home_ownership)) + geom_bar(color = 'black', fill = 'dodgerblue') +
xlab('House Ownership') + ylab('No. of borrowers')
# purpose column - 14 unique values : car,debt_consolidation etc.- no case mismatch
# Most of the loans were issued for debt_consolidation purpose
table(loan$purpose)
ggplot(data = loan, aes(purpose)) + geom_bar(color = 'black', fill = 'dodgerblue') +
ylab('No. of borrowers') + xlab("Purpose") + theme(axis.text.x = element_text(angle = 90))
# term column - 2 values : 36 months & 60 months - no case mismatch
# Data issue - remove months from 36 months and 60 months. Rename column as term_in_mths
# most of the loans were issued for term -36 months.
table(loan$term)
loan$term_in_mths <- str_replace_all(string = loan$term, pattern = 'months', replacement = '')
ggplot(data = loan, aes(term_in_mths)) + geom_bar(color = 'black', fill = 'dodgerblue', width = 0.5) +
xlab('Term (in months)') + ylab('No. of borrowers')
# int_rate column - 371 unique values
# Data issue :- remove % symbol and convert int_rate to numeric
# most of the loans were issued for 10-15 % interest rate
table(loan$int_rate)
loan$int_rate <- str_replace_all(string = loan$int_rate, pattern = "%",
replacement = "") %>% as.double()
ggplot(data = loan, aes(int_rate)) + geom_histogram(color = 'black', fill = 'dodgerblue', binwidth = 2) +
xlab('Interest Rate') + ylab('No. of borrowers')
# loan_amnt column - 885 unique values - as 29900,30500 etc
# majority of people have taken loan in range of 5000 - 10000 & very few people have taken loan >= 30000
table(loan$loan_amnt)
ggplot(loan,aes(loan_amnt)) + geom_histogram(binwidth = 5000 ,color = "black" , fill = "dodgerblue") +
xlab('Loan amount') + ylab('No. of borrowers') + scale_x_continuous(breaks = seq(0, 36000, by = 5000))
# dti column - 2868 unique values - as 10,9.77 etc
# majority of people have dti between 10 to 20 & few loan were granted for dti > 25
table(loan$dti)
ggplot(data = loan, aes(dti)) + geom_histogram(color = 'black', fill = 'dodgerblue' ,binwidth = 3) +
xlab('Debt to Income Ratio') + ylab('No. of borrowers')
# issue_d column - date in Mon-YY format - 55 unique values
# convert date to yyyy-mm-dd format
# most of the loans were issued in the year range of 2011-2012.
table(loan$issue_d)
loan$issue_d <- myd(loan$issue_d, truncated = 1)
ggplot(data = loan, aes(issue_d)) + geom_histogram(color = 'black', fill = 'dodgerblue', binwidth = 100) +
xlab("Issue Date") + ylab('No. of borrowers')
# addr_state column - 50 unique values : - no mismatch
# most of the loans were issued for CA state.
table(loan$addr_state)
ggplot(data = loan, aes(addr_state)) + geom_bar(color = 'black', fill = 'dodgerblue') +
ylab('No. of borrowers') + xlab('State')
# earliest_cr_line column - date format Mon-YY -526 unique values
# convert date to yyyy-mm-dd format
table(loan$earliest_cr_line)
loan$earliest_cr_line <- myd(loan$earliest_cr_line, truncated = 1)
# total_pymnt column : Round off decimal digits to 2 to maintain data consistency for the column.
# data issue :- round off to 2 decimal digits
table(loan$total_pymnt)
loan$total_pymnt <- round(x = loan$total_pymnt, digits = 2)
# total_rec_late_fee column :- Round off decimal digits to 2 to maintain data consistency for the column.
table(loan$total_rec_late_fee)
loan$total_rec_late_fee <- round(x = loan$total_rec_late_fee, digits = 2)
# collection_recovery_fee column : Round off decimal digits to 2 to maintain data consistency for the column.
table(loan$collection_recovery_fee)
loan$collection_recovery_fee <- round(x = loan$collection_recovery_fee, digits = 2)
# last_pymnt_d column - Format Mon-YY
# convert date to YYYY-MM-DD format
table(loan$last_pymnt_d)
loan$last_pymnt_d <- myd(loan$last_pymnt_d, truncated = 1)
#---------------------------------Missing value (NA) analysis----------------------------
# Looking for NA & NaN values
data.frame(colSums(is.na(loan)))
sapply(loan, function(x) length(which(is.nan(x))))
# emp_length column - 12 unique values : example 10+ years,1 year, <1 year,n/a
# data issue - If required then remove year/years,<,+ strings and convert emp_length to numeric
# As per data dictionary - 10 + years is assumed as 10 and <1 year is assumed as 0
# 1075 - Na values (no imputation is done for NA values)
# most of the loans were issued to emp_length of 10 + years
# and least number of loans were issued to emp_length of 9 years
table(loan$emp_length)
loan$emp_length_in_yrs <- str_replace_all(string = loan$emp_length, pattern = "years",
replacement = "") %>%
str_replace_all(pattern = "year", replacement = "") %>%
str_replace_all(pattern = "< 1", replacement = "0") %>%
str_replace_all(pattern = "10\\+", replacement = "10")
ggplot(data = loan, aes(emp_length_in_yrs)) + geom_bar(color = 'black', fill = 'dodgerblue') +
xlab('Employment Length(in years)') + ylab('No. of borrowers')
# title column - 19617 unique values including NA: Computer,JAL loan,personal etc.
# data issue - 10 NA values (no imputation is done for NA values)
# revol_util column : 1089 unique values including NA
# data issue - 50 NA values (no imputation is done for NA values) and (remove % sign and convert to double)
# most of the revol_util is at 0%
table(loan$revol_util)
loan$revol_util <- str_replace_all(string = loan$revol_util, pattern = "%",
replacement = "") %>% as.double()
# next_pymnt_d column - date format Mon-YY : 3 unique values including NA - Jul-16,Jun-16
# 38577 Na values - (no imputation is done for NA values)
# convert date to YYYY-MM-DD format
table(loan$next_pymnt_d)
loan$next_pymnt_d <- myd(loan$next_pymnt_d, truncated = 1)
# mths_since_last_delinq column - 96 unique values including NA - example 35,103,107,85 etc.
# 25682 NA values (no imputation is done for NA values)
table(loan$mths_since_last_delinq)
# mths_since_last_record column - 112 unique values including NA - example 113,71,0,19 etc
# 36931 NA values (no imputation is done for NA values)
table(loan$mths_since_last_record)
# last_credit_pull_d column - date format Mon-YY
# 2 NA values (no imputation is done for NA values)
# convert date to YYYY-MM-DD format
table(loan$last_credit_pull_d)
loan$last_credit_pull_d <- myd(loan$last_credit_pull_d, truncated = 1)
# chargeoff_within_12_mths column : 2 unique values including NA - 0
# Na values - 56 (no imputation is done for NA values)
table(loan$chargeoff_within_12_mths)
# pub_rec_bankruptcies column :- 4 unique values including NA - 0,1,2
# 697 NA values (no imputation is done for NA values)
# most of the applicants have 0 bankruptcies record.
table(loan$pub_rec_bankruptcies)
loan$pub_rec_bankruptcies <- as.factor(loan$pub_rec_bankruptcies)
ggplot(data = loan, aes(pub_rec_bankruptcies)) + geom_bar(color = 'black', fill = 'dodgerblue') +
xlab('Public Bankruptcies') + ylab('No. of borrowers')
# tax_liens column :- 2 unique values including NA - 0
# 39 NA values (no imputation is done for NA values)
table(loan$tax_liens)
# collections_12_mths_ex_med column : 2 unique values including NA - 0
# 56 NA values (no imputation is done for NA values)
table(loan$collections_12_mths_ex_med)
# emp_title column - 28823 unique values : example - US Army,Bank of AMerica,IBM,AT&T,Kraiser Permanete & NA
# 2453 Na values (no imputation is done for NA values)
table(loan$emp_title)
# ---------------------------Remove columns --------------------------------------------------
# Removing columns that will not be used in further analysis
data.frame(sapply(loan,function(y) length(unique(y))))
# term - its redundant column as new column term_in_mths is created during data cleaning
# emp_length - its redundant column as new column emp_length_in_yrs is created during data cleaning
# url & member_id- As we are keeping id as unique identifier
# desc - descriptive column
# zip_code - masked data can't be used for analysis.
# pymnt_plan column - 1 unique value : n
# initial_list_status - 1 unique value : f
# policy_code column - 1 unique value : 1
# application_type column : 1 unique value - INDIVIDUAL
# acc_now_delinq column : 1 unique value - 0
# delinq_amnt column : 1 unique value - 0
drop_cols <- c('term','url','member_id','desc','initial_list_status','zip_code','pymnt_plan',
'emp_length','policy_code','application_type','acc_now_delinq','delinq_amnt')
loan[,drop_cols] <- NULL
# ---------------------- Univariate analysis ends ---------------------------------------
# ---------------------- Segmented Univariate Analysis starts ---------------------------
# Term-wise loan status
# Charged off rate is lower for loan re-payment term for 60 months.
ggplot(loan,aes(x="",fill=loan_status))+ geom_bar(col="black",position = "fill") +
labs(x = "Loan Term (in months)", y = "Proportion", fill = "Loan status") + facet_grid(facets=. ~ term_in_mths)
# Effect of default rate on Loan Purpose
## It makes sense that if the borrower's small business is not doing well then it is difficult to repay the loan.
ggplot(data = loan %>% filter(loan_status == 'Charged Off'), aes(purpose)) +
geom_bar(color = "black" ,fill = 'dodgerblue') + xlab("Loan Purpose") + ylab('No. of borrowers') +
theme(axis.text.x = element_text(angle = 90), axis.text.y = element_blank(),
axis.ticks.y = element_blank())
# Effect of Grade on Interest Rate
ggplot(data = loan , aes(int_rate)) + geom_histogram(color = "black" ,fill = 'coral', binwidth = 5) +
facet_grid(.~grade) + xlab("Interest Rate") + ylab("Count")
# Bar plot of verification_status by loan status
ggplot(loan,aes(x=verification_status,fill=loan_status))+
geom_bar(col="black",position = "fill") +
labs(x="Verification Status", y="Proportion",
title="Proportion of borrowers by income verification status")
# Conclusion: Results show opposite of what we would expect.
# Loan amount segmented by loan_status
ggplot(loan,aes(x=loan_amnt,fill=loan_status))+
geom_histogram(col="black",position = "fill",
breaks=seq(0,35000,by=5000)) +
labs(x="Loan Amount($)", y="Proportion",
title="Proportion of borrowers by loan amount")
# Conclusion: With increase in loan amount the proportion of
# defaulted loans is increasing.
# ---------------------- Segmented Univariate Analysis ends ---------------------------
#----------------------------------------------------------------------------------------
# Identify potential columns for further analysis
# 1. Grade & sub_grade - each loan is rated with a grade that tries to capture the risk of default.
# the lower the grade, the higher the default risk is and, consequently,
# the higher the interest rate will be.
# 2. loan purpose - small business and debt consolidation are the most riskiest one.
# 3. dti - as low the ratio the better chances of not being default. The few factors affecting DTI are as below :-
# 3.1 annual income -
# 3.2 emp_length -
# 3.3 home ownership -
#
# 4. Interest_rate - the higher the interest rate, the higher the probability of default is. .
# 5. loan term - default rate is low for longer term loan.
# 6. inq_last_6mths - if there is credit inquiry in last 6 mths then the chances of default are higher
# 7. delinq_2yrs - if there is delinq record in 2 yrs then the chances of default are higher
# 8. pub_rec - if there is public record then the chances of default are higher
# 9. pub_rec_bankruptcies - f there is public record bankruptcies then the chances of default are higher
# ---------------------------------------------------------------------------------------
# --------------------------Bi variate Analysis -----------------------------------------
# -------------Bi variate analysis of loan amount with major categorical variables-------
# We are not treating outliers in this analysis.
# boxplot for loan amount and grade (to look for outliers)
# loan given to A grade applicants had most of the outlier values
# outliers decreases as we move from grade A to G.
ggplot(data = loan, aes(y = loan_amnt, x = grade)) + geom_boxplot(fill = 'orange') + ylab('Loan Amount') +
xlab('Grade')
# boxplot for loan_amount Vs term_in_mths with facets of grade (to look for outliers)
# There are no outliers for 60 months term loan. Outliers are present for 36 months term from Grade A to D.
# No. of loan are higher for 60 months term.
ggplot(data = loan, aes(y = loan_amnt, x = term_in_mths)) + geom_boxplot(fill = 'orange') +
ylab('Loan Amount') + xlab('Term(in months)')
# boxplot for loan_amount Vs home_ownership (to look for outliers)
# less loan amount is approved for applicant's having house_ownership as NONE.
# Most of the outliers are present for RENT.
ggplot(data = loan, aes(y = loan_amnt, x = home_ownership)) + geom_boxplot(fill = 'orange') +
ylab('Loan Amount') + xlab('House Ownership') + theme(axis.text.x = element_text(angle = 90))
# boxplot for loan_amount Vs loan_purpose (to look for outliers)
# so many outlier values.
# high loan amount is approved for loan purpose - small business
ggplot(data = loan, aes(y = loan_amnt, x = purpose)) + geom_boxplot(fill = 'orange') +
ylab('Loan Amount') + xlab('Loan Purpose') + theme(axis.text.x = element_text(angle = 90))
# boxplot for loan_amount Vs emp_length (to look for outliers)
# so many outlier values.
# most of the loan amount is approved for emp_length greater than 10 years
ggplot(data = loan, aes(y = loan_amnt, x = emp_length_in_yrs)) + geom_boxplot(fill = 'orange') +
ylab('Loan Amount') + xlab('Employment Length (in years)')
#-------------Bi variate analysis of interest rate with major categorical variables--------
# boxplot for int_rate and grade (to look for outliers)
# As the grade moves from A to G the interest rate also increases.
ggplot(data = loan, aes(y = int_rate, x = grade)) + geom_boxplot(fill = 'pink') +
ylab('Interest Rate') + xlab('Grade')
# boxplot for int_rate Vs term (to look for outliers)
# interest rate is bit lower for 36 months term and few outliers for 36 months term
ggplot(data = loan, aes(y = int_rate, x = term_in_mths)) + geom_boxplot(fill = 'pink') +
ylab('Interest Rate') + xlab('Term')
# boxplot for int_rate Vs home_ownership (to look for outliers)
# less loan amount is approved for applicant's having house_ownership as NONE.
ggplot(data = loan, aes(y = int_rate, x = home_ownership)) + geom_boxplot(fill = 'pink') +
ylab('Interest Rate') + xlab('House Ownership') + theme(axis.text.x = element_text(angle = 90))
# boxplot for int_rate Vs loan_purpose (to look for outliers)
# interest rate is usally higer for loan purpose - small business & debt consolidation
ggplot(data = loan, aes(y = int_rate, x = purpose)) + geom_boxplot(fill = 'pink') +
ylab('Interest Rate') + xlab('Purpose') + theme(axis.text.x = element_text(angle = 90))
# boxplot for int_rate Vs emp_length (to look for outliers)
# interest rate is almost same for all emp_length except for emp_length = NA
ggplot(data = loan, aes(y = int_rate, x = emp_length_in_yrs)) + geom_boxplot(fill = 'pink') +
ylab('Interest Rate') + xlab('Employment Length(in years)')
#-------------Bi variate analysis of dti with major categorical variables--------
# boxplot for dti and grade (to look for outliers)
# Lower dti - better grade (No outliers)
ggplot(data = loan, aes(y = dti, x = grade)) + geom_boxplot(fill = 'blue') +
xlab('Grade') + ylab('DTI')
# boxplot for dti Vs term (to look for outliers)
# dti is low for applicants having 36 months term ( No outliers)
ggplot(data = loan, aes(y = dti, x = term_in_mths)) + geom_boxplot(fill = 'blue') +
xlab('Term( in months)') + ylab('DTI')
# boxplot for dti Vs loan_purpose (to look for outliers)
# dti is very high for loan purpose - credit card and debt consolidation
ggplot(data = loan, aes(y = dti, x = purpose)) + geom_boxplot(fill = 'blue') +
xlab('Purpose') + ylab('DTI') + theme(axis.text.x = element_text(angle = 90))
# --------------------------Tree Map ------------------------------------------
# Tree map for analysing loan purpose with loan amount
lpurp_df <- loan %>% select(purpose, loan_amnt) %>%
na.omit() %>% group_by(purpose) %>%
dplyr::summarise(volume = n(),
average_amnt =
sum(as.numeric(loan_amnt), rm.na = TRUE)/n())
lpurp_df <- lpurp_df[!lpurp_df$purpose == "", ]
treemap(lpurp_df, index = "purpose", vSize = "volume",
vColor = "average_amnt",
range = c(5000, 14000),
type = "manual",
palette = c("yellow", "green", "orange", "orange2", "firebrick"),
algorithm = "pivotSize",
sortID = "-size",
title = "Loan Purpose",
title.legend = "Avg Amount",
fontfamily.labels = "serif",
fontsize.labels = 16,
fontsize.legend = 10,
fontface.labels = 1,
position.legend = "bottom",
force.print.labels = T,
align.labels = list(c("left", "top")),
border.col = "white")
# Debt consolidation is the most common reason for # borrowing. The greatest advantage of peer-to-peer
# lending is the low cost. Most consumers choose to consolidate debt to enjoy lower borrowing costs.
# The different color is related to the average amount of a loan. Loans for debt consolidation,
# credit card, house, and small business usually have higher average amount than other purposes.
#-------------------------------------------------------------------------------
# --------------------------- Derived Matrix -----------------------------------
# 1. ROI metric- Return on funds invested by investors
loan$returns_from_inv <- round(((as.numeric(loan$total_pymnt - loan$funded_amnt_inv))/
as.numeric(loan$funded_amnt))*100,2)
ggplot(loan,aes(x = returns_from_inv, fill = loan_status)) + geom_density() +
labs(x = 'Returns from Investments', y = 'Proportion', fill = 'Loan status')
# Investors have a negative ROI for charged off loans, huge loss of money
# on the other hand ROI from fully paid loans is highest
# 2. is_bad metric - It is applied on the observations having loan_status as ''Charged Off' to check
# whether they were likely to default or not.
loan_default <- loan %>% filter(loan_status == 'Charged Off')
loan_default$is_bad <- ifelse(((loan_default$delinq_2yrs > 0) |
(loan_default$pub_rec > 0) |
(as.numeric(loan_default$pub_rec_bankruptcies) > 0) |
(as.numeric(loan_default$emp_length) < 1)), 1, 0)
# 5561 applicant's are most probably to default.
table(loan_default$is_bad)
# 3. Ratio of loan amount by annual income
loan$loan_amnt_by_annual_inc <- round(loan$loan_amnt/loan$annual_inc, digits = 2)
# Histogram of loan_amnt_by_annual_inc
ggplot(loan,aes(x = loan_amnt_by_annual_inc,fill = loan_status)) +
geom_histogram(col = "black", position = "fill", breaks = seq(0,1,by = 0.2)) +
labs(x = "Ratio of loan amount to annual income", y = "Proportion", fill = "Loan status")
# Conclusion: Higher the ratio, higher is the proportion of defaulters.
# ---------------------------Correlation Matrix -------------------------------
xray::anomalies(loan)
num_vars <- loan %>% sapply(is.numeric) %>% which() %>% names()
loan_corr <- loan[, num_vars]
loan_corr_drop <- c(xray::anomalies(loan_corr)$problem_variables$Variable)
loan_corr[ ,loan_corr_drop] <- NULL
corr <- cor(loan_corr, use = "complete.obs")
ggcorrplot(corr, hc.order = TRUE, type = "lower", lab = TRUE, lab_size = 3, method="circle",
colors = c("tomato2", "white", "springgreen3"), title="Correlogram", ggtheme=theme_bw)
# ------------------------------------------------------------------------------ |
72f48675fadc646bee6d19aa02cfe40972262707 | 3f19e4dc806ffd52e5ca0a660659d2700dad1623 | /R/add_milliseconds.R | 6c1d794b0ca7270fa79b8663902d524dad90155b | [
"MIT"
] | permissive | benjcunningham/subtitler | 09b2a67c976c963df4e60a59d48d915284acc985 | 4e119a633af13533e69eea5a0ceb2d6dfd215290 | refs/heads/master | 2021-01-20T11:44:10.863069 | 2017-12-28T04:24:28 | 2017-12-28T04:24:28 | 82,632,633 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | add_milliseconds.R | #' Add Milliseconds to a SubRip (SRT) Timestamp
#'
#' Convenience method for directly adding milliseconds to a timestamp.
#' Equivalent to \code{as_timestamp(as_milliseconds(x) + ms)}.
#'
#' @param x Vector of timestamps.
#' @param ms Numeric vector of milliseconds to add.
#'
#' @return A character vector corresponding to x.
#'
#' @export
add_milliseconds <- function(x, ms) {
as_timestamp(as_milliseconds(x) + ms)
}
|
411a32debc1b7a94973b4484143576799b9f6ea7 | 0489e5eb7ebc7288bf7b64805bf2a253ffc2e84d | /nibrs_extras.R | 550ae8a3545e63624d13ba98d7c144b8505841af | [
"MIT"
] | permissive | mseewaters/capstone | 223396b27fc98b4176468b2a68263cd0e30f0f73 | e43bf2b2e4b1f9cbf335c2e9bded04c2b59eb335 | refs/heads/master | 2021-01-10T21:37:47.775295 | 2015-05-02T22:57:52 | 2015-05-02T22:57:52 | 27,968,610 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,591 | r | nibrs_extras.R | ##############
trPerc = .8
data.smote.f <- SMOTE(target.harm ~ ., data.m1, perc.over = 100)
idx2 <- sample(1:nrow(data.smote.f),as.integer(trPerc*nrow(data.smote.f)))
train <- data.smote.f[idx2,]
test <- data.smote.f[-idx2,]
model.rf <- randomForest(target.harm ~ ., data=train, ntree=500, nodesize=2)
pr.rf <- predict(model.rf,type="prob",test[,-ncol(test)])[,2]
rf.pred <- prediction(pr.rf, test$target.harm)
rf.perf <- performance(rf.pred,"tpr","fpr")
plot(rf.perf,main="ROC Curve for randomForest",col=2,lwd=2)
auc <- performance(rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
auc
data.smote.f <- SMOTE(target.harm ~ ., data.m1, perc.over = 100)
idx2 <- sample(1:nrow(data.smote.f),as.integer(trPerc*nrow(data.smote.f)))
train <- data.smote.f[idx2,]
test <- data.smote.f[-idx2,]
model.rf <- randomForest(target.harm ~ ., data=train, ntree=500, nodesize=2)
pr.rf <- predict(model.rf,type="prob",test[,-ncol(test)])[,2]
rf.pred <- prediction(pr.rf, test$target.harm)
rf.perf <- performance(rf.pred,"tpr","fpr")
#plot(rf.perf,col=2,lwd=2, add=TRUE)
auc <- performance(rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
auc
data.smote.f <- SMOTE(target.harm ~ ., data.m1, perc.over = 100)
idx2 <- sample(1:nrow(data.smote.f),as.integer(trPerc*nrow(data.smote.f)))
train <- data.smote.f[idx2,]
test <- data.smote.f[-idx2,]
model.rf <- randomForest(target.harm ~ ., data=train, ntree=500, nodesize=2)
pr.rf <- predict(model.rf,type="prob",test[,-ncol(test)])[,2]
rf.pred <- prediction(pr.rf, test$target.harm)
rf.perf <- performance(rf.pred,"tpr","fpr")
#plot(rf.perf,col=2,lwd=2, add=TRUE)
auc <- performance(rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
auc
data.smote.f3 <- SMOTE(target.harm ~ ., data.m3, perc.over = 100)
idx2 <- sample(1:nrow(data.smote.f3),as.integer(trPerc*nrow(data.smote.f3)))
train <- data.smote.f3[idx2,]
test <- data.smote.f3[-idx2,]
model.rf <- randomForest(target.harm ~ ., data=train, ntree=500, nodesize=2)
pr.rf <- predict(model.rf,type="prob",test[,-ncol(test)])[,2]
rf.pred <- prediction(pr.rf, test$target.harm)
rf.perf <- performance(rf.pred,"tpr","fpr")
plot(rf.perf,col=3,lwd=2, add=TRUE)
auc <- performance(rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
auc
data.smote.f3 <- SMOTE(target.harm ~ ., data.m3, perc.over = 100)
idx2 <- sample(1:nrow(data.smote.f3),as.integer(trPerc*nrow(data.smote.f3)))
train <- data.smote.f3[idx2,]
test <- data.smote.f3[-idx2,]
trPerc = .80
idx <- sample(1:nrow(data.m3),as.integer(trPerc*nrow(data.m3)))
holdout <- data.m3[-idx,]
data.smote <- SMOTE(target.harm ~ ., data.m3[idx,], perc.over = 100)
table(data.smote$target.harm)
train <- data.smote
#trPerc = .9
#idx2 <- sample(1:nrow(data.smote),as.integer(trPerc*nrow(data.smote)))
#train <- data.smote[idx2,]
#test <- data.smote[-idx2,]
test.num <- 3500
model.rf <- randomForest(target.harm ~ ., data=train, ntree=500, nodesize=2, strata=train$type, sampsize=c(SingleVO=.725*test.num, MultVic=.15*test.num, MultOff=.1*test.num, MultVO=.025*test.num))
pr.rf <- predict(model.rf,type="prob",holdout[,-ncol(holdout)])[,2]
rf.pred <- prediction(pr.rf, holdout$target.harm)
rf.perf <- performance(rf.pred,"tpr","fpr")
plot(rf.perf,main="ROC Curve for randomForest (3 reps)",col=2,lwd=2)
plot(rf.perf,col=3,lwd=2, add=TRUE)
#compute area under curve
auc <- performance(rf.pred,"auc")
auc <- unlist(slot(auc, "y.values"))
importance(model.rf)
data.smote.f3 <- SMOTE(target.harm ~ ., data.m3, perc.over = 100)
idx2 <- sample(1:nrow(data.smote.f3),as.integer(trPerc*nrow(data.smote.f3)))
train <- data.smote.f3[idx2,]
test <- data.smote.f3[-idx2,]
model.rf <- randomForest(target.harm ~ ., data=train, ntree=500, nodesize=2)
pr.rf <- predict(model.rf,type="prob",test[,-ncol(test)])[,2]
rf.pred <- prediction(pr.rf, test$target.harm)
rf.perf <- performance(rf.pred,"tpr","fpr")
plot(rf.perf,col=3,lwd=2, add=TRUE)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
legend(title="Approach to data preparation", 0.55, 0.3, c('1: Incident based (n=4757)','3: victim/offender based (n=6426)'), 2:3)
res1 <- performanceEstimation(
c(PredTask(target.harm ~ ., data.m1), PredTask(target.harm ~ ., data.m3)),
c(workflowVariants("standardWF", learner = "svm",
learner.pars=list(cost=c(1,100), gamma=c(0.1,0.001)),
evaluator.pars=list(stats=c("rec","prec", "F"), posClass='1')),
workflowVariants("standardWF", learner = "randomForest",
learner.pars=list(ntree = c(25,500)),
evaluator.pars=list(stats=c("rec","prec", "F"), posClass='1')),
workflowVariants("standardWF", learner = "naiveBayes", evaluator.pars=list(stats=c("rec","prec","F"), posClass='1')),
workflowVariants("standardWF", learner = "bagging", evaluator.pars=list(stats=c("rec","prec","F"), posClass='1')),
workflowVariants("standardWF", learner = "ada", evaluator.pars=list(stats=c("rec","prec","F"), posClass='1'))
),
CvSettings(nFolds=5, nReps=3))
plot(res1)
### test individual basic models before running performance estimation
trPerc = .90
idx <- sample(1:nrow(data.m3),as.integer(trPerc*nrow(data.m3)))
holdout <- data.m3[-idx,]
data.smote <- data.m3[idx,]
trPerc = .9
idx2 <- sample(1:nrow(data.smote),as.integer(trPerc*nrow(data.smote)))
train <- data.smote[idx2,]
test <- data.smote[-idx2,]
# RF model evaluation and holdout analysis (non-sampled data)
model.rf <- randomForest(target.harm ~ ., data=train)
pred.rf <- predict(model.rf, test[,-ncol(test)])
classificationMetrics(test$target.harm,pred.rf,stats=c("rec","prec","F"),posClass='1')
table(test[,ncol(test)], pred.rf)
pred.rf <- predict(model.rf, holdout[,-ncol(holdout)])
classificationMetrics(holdout$target.harm,pred.rf,stats=c("rec","prec","F"), posClass='1')
table(holdout[,ncol(holdout)], pred.rf)
trPerc = .90
idx <- sample(1:nrow(data.m3),as.integer(trPerc*nrow(data.m3)))
holdout <- data.m3[-idx,]
data.smote <- SMOTE(target.harm ~ ., data.m3[idx,], perc.over = 100)
trPerc = .9
idx2 <- sample(1:nrow(data.smote),as.integer(trPerc*nrow(data.smote)))
train <- data.smote[idx2,]
test <- data.smote[-idx2,]
# RF model evaluation and holdout analysis (non-sampled data)
model.rf <- randomForest(target.harm ~ ., data=train)
pred.rf <- predict(model.rf, test[,-ncol(test)])
classificationMetrics(test$target.harm,pred.rf,stats=c("rec","prec","F"),posClass='1')
table(test[,ncol(test)], pred.rf)
pred.rf <- predict(model.rf, holdout[,-ncol(holdout)])
classificationMetrics(holdout$target.harm,pred.rf,stats=c("rec","prec","F"), posClass='1')
table(holdout[,ncol(holdout)], pred.rf) |
b595dafe3e29f278b981807a94b770ba57cc9420 | 7a8ae73cfd590fd83273e3ec47d37011ab4d8089 | /tsne/man/tsne.Rd | 227010b6bd7d8dd83078ccc67976f658af2c04f9 | [] | no_license | jdonaldson/rtsne | 3dc0bc8f8d82cae1508fb06c40f03dda97195817 | a33cc0087dea7dfa7671d4d6f0049dbc7b2f77c9 | refs/heads/master | 2021-01-17T08:37:47.280295 | 2019-08-27T17:50:58 | 2019-08-27T17:50:58 | 4,193,788 | 56 | 25 | null | 2019-08-21T23:40:49 | 2012-05-01T16:02:53 | R | UTF-8 | R | false | false | 3,085 | rd | tsne.Rd | \name{tsne}
\Rdversion{1.1}
\alias{tsne}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
The t-SNE method for dimensionality reduction
}
\description{
Provides a simple function interface for specifying t-SNE dimensionality reduction on R matrices or "dist" objects.
}
\usage{
tsne(X, initial_config = NULL, k = 2, initial_dims = 30, perplexity = 30,
max_iter = 1000, min_cost = 0, epoch_callback = NULL, whiten = TRUE,
epoch=100)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{
The R matrix or "dist" object
}
\item{initial_config}{
an argument providing a matrix specifying the initial embedding for X. See Details.
}
\item{k}{
the dimension of the resulting embedding.
}
\item{initial_dims}{
The number of dimensions to use in reduction method.
}
\item{perplexity}{
Perplexity parameter. (optimal number of neighbors)
}
\item{max_iter}{
Maximum number of iterations to perform.
}
\item{min_cost}{
The minimum cost value (error) to halt iteration.
}
\item{epoch_callback}{
A callback function used after each epoch (an epoch here means a set number of iterations)
}
\item{whiten}{
A boolean value indicating whether the matrix data should be whitened.
}
\item{epoch}{
The number of iterations in between update messages.
}
}
%%\details{
%% ~~ If necessary, more details than the description above ~~
%%}
\value{
An R object containing a \emph{ydata} embedding matrix, as well as a the matrix of probabilities \emph{P}
}
\details{
When the initial_config argument is specified, the algorithm will automatically enter the \emph{final momentum} stage. This stage has less large scale adjustment to the embedding, and is intended for small scale tweaking of positioning. This can greatly speed up the generation of embeddings for various similar X datasets, while also preserving overall embedding orientation.
}
\references{
L.J.P. van der Maaten and G.E. Hinton. Visualizing High-Dimensional Data Using t-SNE. \emph{Journal of Machine Learning Research} 9 (Nov) : 2579-2605, 2008.
L.J.P. van der Maaten. Learning a Parametric Embedding by Preserving Local Structure. In \emph{Proceedings of the Twelfth International Conference on Artificial Intelligence and Statistics} (AISTATS), JMLR W&CP 5:384-391, 2009.
}
\author{
Justin Donaldson (jdonaldson@gmail.com)
}
%%\note{
%% ~~further notes~~
%%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\link{dist}
}
\examples{\dontrun{
colors = rainbow(length(unique(iris$Species)))
names(colors) = unique(iris$Species)
ecb = function(x,y){ plot(x,t='n'); text(x,labels=iris$Species, col=colors[iris$Species]) }
tsne_iris = tsne(iris[,1:4], epoch_callback = ecb, perplexity=50)
# compare to PCA
dev.new()
pca_iris = princomp(iris[,1:4])$scores[,1:2]
plot(pca_iris, t='n')
text(pca_iris, labels=iris$Species,col=colors[iris$Species])
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
dda4013c33a575e932c1638db3e4231e560c93a3 | d434ec91242aad694c4e2d78580b60a9da3ce29a | /tests/testthat/test_numbers2words.R | 9b913f54134c756437a5460160d701d4fd9103d8 | [
"MIT"
] | permissive | rmsharp/rmsutilityr | 01abcdbc77cb82eb4f07f6f5d8a340809625a1c5 | d5a95e44663e2e51e6d8b0b62a984c269629f76c | refs/heads/master | 2021-11-20T08:45:23.483242 | 2021-09-07T17:28:22 | 2021-09-07T17:28:22 | 97,284,042 | 0 | 2 | MIT | 2021-09-07T17:28:22 | 2017-07-15T01:17:14 | R | UTF-8 | R | false | false | 2,042 | r | test_numbers2words.R | context("numbers2words")
test_that("numbers2words forms the correct expression", {
num_1 <- 1
num_12 <- 12
num_123 <- 123
num_0 <- 0
num_0.1 <- 0.1
num_0.499 <- 0.499
num_0.5 <- 0.5
num_minus_1 <--1
num_1.2 <- 1.2
num_1.499 <- 1.499
num_1.5 <- 1.5
num_1.55 <- 1.55
num_minus_0.6 <--0.6
num_211 <- 211
num_210 <- 210
num_201 <- 201
num_minus_211 <- -211
num_minus_210 <- -210
num_543210 <- 543210
num_minus_543210 <- -543210
num_9876543210 <- 9876543210
num_98765432109876543210 <- 98765432109876543210
expect_equal(numbers2words(num_1), "one")
expect_equal(numbers2words(num_12), "twelve")
expect_equal(numbers2words(num_123), "one hundred twenty three")
expect_equal(numbers2words(num_0), "zero")
expect_equal(numbers2words(num_0.1), "zero")
expect_equal(numbers2words(num_0.499), "zero")
expect_equal(numbers2words(num_0.5), "zero")
expect_equal(numbers2words(num_minus_1), "negative one")
expect_equal(numbers2words(num_1.2), "one")
expect_equal(numbers2words(num_1.499), "one")
expect_equal(numbers2words(num_1.5), "two")
expect_equal(numbers2words(num_1.55), "two")
expect_equal(numbers2words(num_minus_0.6), "negative one")
expect_equal(numbers2words(num_211), "two hundred eleven")
expect_equal(numbers2words(num_210), "two hundred ten")
expect_equal(numbers2words(num_201), "two hundred one")
expect_equal(numbers2words(num_minus_211), "negative two hundred eleven")
expect_equal(numbers2words(num_minus_210), "negative two hundred ten")
expect_equal(numbers2words(num_543210),
"five hundred forty three thousand, two hundred ten")
expect_equal(numbers2words(num_minus_543210),
"negative five hundred forty three thousand, two hundred ten")
expect_equal(numbers2words(
num_9876543210),
paste0("nine billion, eight hundred seventy six million, ",
"five hundred forty three thousand, two hundred ten"))
expect_error(numbers2words(num_98765432109876543210), "98765432109876543488 is too large!")
})
|
0541f012f55640d9135f7d165d0ec57b7eae94f7 | 8be06d2d32cf1e0b8a78ee58af10a8c68a224cad | /man/get_top10_recommendations.Rd | 3867aa66beed5679b2a37d72f8ece331f81238e0 | [
"MIT"
] | permissive | naveen-chalasani/R.vengers | ae07f0cf073b9fc06ebfa0ccef814ef221131941 | 1b8d01858304742501647783877b17aef8abfbc7 | refs/heads/main | 2023-03-18T02:16:03.513801 | 2021-02-26T04:13:54 | 2021-02-26T04:13:54 | 336,475,803 | 0 | 1 | NOASSERTION | 2021-02-26T04:13:54 | 2021-02-06T06:54:09 | R | UTF-8 | R | false | true | 883 | rd | get_top10_recommendations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_top10_recommendations.R
\name{get_top10_recommendations}
\alias{get_top10_recommendations}
\title{This function returns the attributes of Top 10 most popular movies and TV shows by Genre.}
\usage{
get_top10_recommendations(genre = "adventure")
}
\arguments{
\item{genre}{Genre of recommendations can be - action, adventure, comedy, romance, drama, scifi, horror, animation
Default genre is adventure when there is no input from user.}
}
\value{
A dataframe that contains the list of search results.
Dataframe contains Title, Type, Year, Genre, Runtime (in minutes), Actors, Director, Awards, and IMDB Rating.
}
\description{
This function returns the attributes of Top 10 most popular movies and TV shows by Genre.
}
\examples{
get_top10_recommendations()
get_top10_recommendations(genre = 'drama')
}
|
fa48d16e3a7d17bdd26e2ab2277d50735d442403 | 91e198371a4a706c4ba7cebd4eecc487bdde32e2 | /plot2.R | fc171062285691c31fc448ffa488652995fb7a6e | [] | no_license | arnielarson/ExData_Plotting1 | f5d382cecc47df404101a03c60da245dc3ec673b | b9615ba4fbb06ce95af45a632cabeec708578481 | refs/heads/master | 2021-01-15T09:00:00.983464 | 2015-10-11T18:10:04 | 2015-10-11T18:10:04 | 43,989,383 | 0 | 0 | null | 2015-10-10T02:07:30 | 2015-10-10T02:07:30 | null | UTF-8 | R | false | false | 681 | r | plot2.R | #
# plot2.R - Arnie Larson
#
# Script for creating plots
# for Coursera Exploratory Analysis Course:
# https://class.coursera.org/exdata-033
#
# To create plot2, source('plot2.R'), then call run()
#
#
# Run script to generate plot2
# Depends on plot1.R - in cur directory
#
run <- function() {
source('plot1.R')
png("plot2.png")
d<-load()
ss<-selectSubset(d)
plot2(ss)
dev.off()
}
plot2 <- function(ss, ylab="Global Active Power (kilowatts)") {
# plot a time series plot - convert Dates and Times, plot vs. variable
plot(strptime(paste(ss$Date,ss$Time),"%d/%m/%Y %H:%M:%S"),ss$Global_active_power,
type="l", xlab="",ylab=ylab)
}
|
cf31a7030631423f21bec332c5197414e949afa7 | cc27046a2efda8b361b99a2cc82238ec6b09bc78 | /hello_aliens.R | c42b00fd7b7f474d31ef7733931df692caddd6ef | [] | no_license | ybing97/36-350 | 54e4de4d5671525fc949ea626a7e0261a2352729 | 09e786dc937033100f747c82c0f9e8257ab9e24b | refs/heads/master | 2021-08-28T01:09:02.264506 | 2017-12-11T01:24:50 | 2017-12-11T01:24:50 | 109,200,702 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 435 | r | hello_aliens.R | cat("Hi Aliens! I am from the planet Earth, and have been selected as a spokesperson for all humans.
I am currently taking a course called Statistical Computing, where I get to learn R.
But you must be wondering, What is life on Earth like? Life on Earth is wonderful! How is life as an Alien?
What do you look like? Do you have plans to visit Earth someday?
If so, I hope we can meet one day and become friends!
Have a great day!")
|
ab179cff0a885204df24845f7f53495e44d6d176 | 7e5e5139f817c4f4729c019b9270eb95978feb39 | /Introduction to Statistics in R/distributions and central limit theorem/3.R | 1b460f05f4465e68b2ec36d2fb4cac83bfab2cc5 | [] | no_license | Pranav-Polavarapu/Datacamp-Data-Scientist-with-R-Track- | a45594a8a9078076fe90076f675ec509ae694761 | a50740cb3545c3d03f19fc79930cb895b33af7c4 | refs/heads/main | 2023-05-08T19:45:46.830676 | 2021-05-31T03:30:08 | 2021-05-31T03:30:08 | 366,929,815 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 319 | r | 3.R | #3
# Calculate new average amount
new_mean <- 5000 * 1.2
# Calculate new standard deviation
new_sd <- 2000 * 1.3
# Simulate 36 sales
new_sales <- new_sales %>%
mutate(amount = rnorm(36, mean = new_mean, sd = new_sd))
# Create histogram with 10 bins
ggplot(new_sales, aes(amount)) +
geom_histogram(bins = 10)
|
32c1ad98b56a4c793a0a7193802d17dc33956813 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hipread/tests/test-list-full.R | 28013b0e463bf41d7cd1253ccb4b761c9ffafbb1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,715 | r | test-list-full.R | library(hipread)
context("Read list")
test_that("basic example matches long format", {
data_list <- hipread_list(
hipread_example("test-basic.dat"),
list(
H = hip_fwf_widths(
c(1, 3, 3, 3, 2),
c("rt", "hhnum", "hh_char", "hh_dbl", "hh_impdbl"),
c("character", "character", "character", "double", "double"),
trim_ws = c(TRUE, FALSE, TRUE, NA, NA),
imp_dec = c(NA, NA, NA, 0, 1)
),
P = hip_fwf_widths(
c(1, 3, 1, 3, 1),
c("rt", "hhnum", "pernum", "per_dbl", "per_mix"),
c("character", "character", "integer", "double", "character"),
trim_ws = c(TRUE, FALSE, NA, NA, TRUE),
imp_dec = c(NA, NA, NA, 0, NA)
)
),
hip_rt(1, 1)
)
data_long <- hipread_long(
hipread_example("test-basic.dat"),
list(
H = hip_fwf_widths(
c(1, 3, 3, 3, 2),
c("rt", "hhnum", "hh_char", "hh_dbl", "hh_impdbl"),
c("character", "character", "character", "double", "double"),
trim_ws = c(TRUE, FALSE, TRUE, NA, NA),
imp_dec = c(NA, NA, NA, 0, 1)
),
P = hip_fwf_widths(
c(1, 3, 1, 3, 1),
c("rt", "hhnum", "pernum", "per_dbl", "per_mix"),
c("character", "character", "integer", "double", "character"),
trim_ws = c(TRUE, FALSE, NA, NA, TRUE),
imp_dec = c(NA, NA, NA, 0, NA)
)
),
hip_rt(1, 1)
)
expect_equal(
data_long[data_long$rt == "H", c("rt", "hhnum", "hh_char", "hh_dbl", "hh_impdbl")],
data_list$H
)
expect_equal(
data_long[data_long$rt == "P", c("rt","hhnum", "pernum", "per_dbl", "per_mix")],
data_list$P
)
})
|
569bf6d3299c089fafaffecb0ef1f349743842f7 | d69867150a555a1b61dc730e54d00c2add63543b | /man/ISA_DIR.Rd | 93a25bbcec898386ba810bc7c4ff6effc78da621 | [] | no_license | cran/Metadata | 7eed851ee1e787ac17cf343f7a7213fa50cb1262 | 90103717fec6174b6110bfb17524395ab2d59653 | refs/heads/master | 2021-01-25T10:00:29.306675 | 2011-09-04T00:00:00 | 2011-09-04T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 263 | rd | ISA_DIR.Rd | \name{ISA_DIR}
\alias{ISA_DIR}
\docType{data}
\title{ Directory for ISA data
}
\description{Default directory for ISA data
}
\usage{ ISA_DIR }
\format{
The format is:
chr "ISA"
}
\examples{
print(ISA_DIR)
}
\keyword{directories}
|
f50f517964cdb6429cca9f1dce18fdec694f3112 | 681a3c635318c89a27c8df56cba0b7596d809fff | /MIGSA/R/MIGSAmGSZ.R | d1b8db421d634823ca5803b737c3c05b3440d5ab | [] | no_license | jcrodriguez1989/MIGSA_paper_SupplementarySoftware | 5cfc244a80563c233cdd65d8ec48e742e0173404 | eb38f9bdbba1d605dee010b83914f9e74447f372 | refs/heads/master | 2020-03-24T01:03:01.734016 | 2018-12-20T20:52:04 | 2018-12-20T20:52:04 | 142,320,308 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,270 | r | MIGSAmGSZ.R | #'MIGSAmGSZ
#'
#'\code{MIGSAmGSZ} is an optimized mGSZ version. It runs much faster than the
#'original mGSZ version, moreover it can run in multicore technology.
#'It allows to analyze RNAseq data by using \code{\link[limma]{voom}} function.
#'mGSZ: Gene set analysis based on Gene Set Z scoring function and asymptotic
#'p-value.
#'
#'@param x gene expression data matrix (rows as genes and columns as samples).
#'@param y gene set data (list).
#'@param l vector of response values (example:c("Cond1","Cond1","Cond2",
#'"Cond2","Cond2")).
#'@param use.voom logical indicating wether use voom or not (if RNAseq data we
#'recommend using use.voom=TRUE).
#'@param rankFunction internal use.
#'@param min.sz minimum size of gene sets (number of genes in a gene set) to
#'be included in the analysis.
#'@param pv estimate of the variance associated with each observation.
#'@param w1 weight 1, parameter used to calculate the prior variance obtained
#'with class size var.constant. This penalizes especially small classes and
#'small subsets. Values around 0.1 - 0.5 are expected to be reasonable.
#'@param w2 weight 2, parameter used to calculate the prior variance obtained
#'with the same class size as that of the analyzed class. This penalizes small
#'subsets from the gene list. Values around 0.3 and 0.5 are expected to be
#'reasonable.
#'@param vc size of the reference class used with wgt1.
#'@param p number of permutations for p-value calculation.
#'@param ... not in use.
#'
#'@return A data.frame with gene sets p-values and additional information.
#'
#'@docType methods
#'@name MIGSAmGSZ
#'@rdname MIGSAmGSZ
#'
#'@exportMethod MIGSAmGSZ
#'
setGeneric(name="MIGSAmGSZ", def=function(x, y, l, ...) {
standardGeneric("MIGSAmGSZ")
})
#'@inheritParams MIGSAmGSZ
#'@rdname MIGSAmGSZ
#'@aliases MIGSAmGSZ,matrix,list,vector-method
#'
#'@importFrom BiocParallel bpparam
#'@importClassesFrom edgeR DGEList
#'@importFrom edgeR DGEList
#'@importClassesFrom limma MAList
#'@include FitOptions-class.R
#'@include GSEAparams.R
#'@include MGSZ.R
#'
#'@examples
#'nGenes <- 1000; # 1000 genes
#'nSamples <- 30; # 30 subjects
#'geneNames <- paste("g", 1:nGenes, sep=""); # with names g1 ... g1000
#'## Create random gene expression data matrix.
#'set.seed(8818);
#'exprData <- matrix(rnorm(nGenes*nSamples),ncol=nSamples);
#'rownames(exprData) <- geneNames;
#'
#'## There will be 40 differentialy expressed genes.
#'nDeGenes <- nGenes/25;
#'## Lets generate the offsets to sum to the differentialy expressed genes.
#'deOffsets <- matrix(2*abs(rnorm(nDeGenes*nSamples/2)), ncol=nSamples/2);
#'
#'## Randomly select which are the DE genes.
#'deIndexes <- sample(1:nGenes, nDeGenes, replace=FALSE);
#'exprData[deIndexes, 1:(nSamples/2)] <-
#'exprData[deIndexes, 1:(nSamples/2)] + deOffsets;
#'
#'## 15 subjects with condition C1 and 15 with C2.
#'conditions <- rep(c("C1", "C2"),c(nSamples/2,nSamples/2));
#'
#'nGSets <- 200; # 200 gene sets
#'## Lets create randomly 200 gene sets, of 10 genes each
#'gSets <- lapply(1:nGSets, function(i) sample(geneNames, size=10));
#'names(gSets) <- paste("set", as.character(1:nGSets), sep="");
#'
#'\dontrun{
#'mGSZres <- MIGSAmGSZ(exprData, gSets, conditions);
#'}
#'
setMethod(
f="MIGSAmGSZ",
signature=c("matrix", "list", "vector"),
definition=function (x, y, l, use.voom=FALSE, rankFunction=NA,
min.sz=5, pv=0, w1=0.2, w2=0.5, vc=10, p=200) {
# it formats almost the same inputs as mGSZ and uses MIGSAs mGSZ.
# setting all MIGSA parameters
if (use.voom) {
exprData <- DGEList(counts=x);
if (!is(rankFunction, "function")) {
rankFunction <- voomLimaRank;
}
} else {
exprData <- new("MAList", list(M=x));
if (!is(rankFunction, "function")) {
rankFunction <- mGszEbayes;
}
}
fitOptions <- FitOptions.default(l);
params <- GSEAparams(
perm_number=p,
min_sz=min.sz,
pv=pv,
w1=w1,
w2=w2,
vc=vc
);
gSets <- y;
mgszRes <- MIGSA_mGSZ(exprData, fitOptions, gSets,
rankFunction, params);
return(mgszRes);
}
)
|
c7187a7b533e36d44e15940d2ceb749b78e23368 | 13b7644d364c93056f43f7befb6689ed95ce09b8 | /server.R | a3ced1078938b8a17437be5d9040e007c4d03ebb | [] | no_license | heshuaidavid/shiny-app | 2aa6da7a8f2bc77bd60ba00bc7034fbb9866fb2e | f391f45f8cc78cb1ac938dc783b8f65cfef606e0 | refs/heads/master | 2021-01-22T02:17:22.266642 | 2017-06-02T19:43:55 | 2017-06-02T19:43:55 | 92,345,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,020 | r | server.R | source('helpers.R')
#pre-treatment
shinyServer(
function(input, output) {
observeEvent(input$check_formation, {
if (input$check_formation) {
output$pdcurve <- renderPlot({
args <- switch(input$formation_var,
"Barnett" = list(production_data, Barnett, input$range[1], input$range[2]),
"Eagle Ford" = list(production_data, Eagle_Ford, input$range[1], input$range[2]),
"Cotton Valley" = list(production_data, Cotton_Valley, input$range[1], input$range[2]),
"Permian" = list(production_data, Permian, input$range[1], input$range[2]))
do.call(formation_curve, args)
})
output$texas_county <- renderPlot({
args1 <- switch(input$formation_var,
"Barnett" = list(texas, Barnett),
"Eagle Ford" = list(texas, Eagle_Ford),
"Cotton Valley" = list(texas, Cotton_Valley),
"Permian" = list(texas, Permian))
do.call(plot_county, args1)
})
} else {
observeEvent(input$log, {
if (input$log) {
output$pdcurve <- renderPlot({
args <- list(production_data, input$county_var, input$range[1], input$range[2])
do.call(county_curve_log,args)
})
output$texas_county <- renderPlot({
args <- list(texas, input$county_var)
do.call(plot_county, args)
})
} else {
output$pdcurve <- renderPlot({
args <- list(production_data, input$county_var, input$range[1], input$range[2])
do.call(county_curve,args)
})
output$texas_county <- renderPlot({
args <- list(texas, input$county_var)
do.call(plot_county, args)
})
}
})
}
})
}
) |
d3d0248d613cfe4dc4664556f2ead6f9db61f7fb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/logmult/examples/color.Rd.R | c23a5d1738357afc10de0666eb74274d4f00cb23 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 175 | r | color.Rd.R | library(logmult)
### Name: color
### Title: Two Cross-Classifications of Eye Color by Hair Color
### Aliases: color
### Keywords: datasets
### ** Examples
## see ?rc
|
272b6905f378e383f6be8fd2ee4b3278e904d0b9 | 291aaf7c7b48912c4ef929f25f6e9c7d2b09e933 | /R/qtleffects_base.R | 142c3ae91495841294fde3949fdb7290b7c806fd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ellisztamas/qtltools | d3cce1cbbf15463cf551d32b3f3435a8f869646c | a9e018741519b29beb6e97bb4d34cf3814e349eb | refs/heads/master | 2021-01-11T02:04:48.412978 | 2018-07-11T06:34:47 | 2018-07-11T06:34:47 | 70,811,342 | 0 | 0 | null | 2016-10-13T14:09:58 | 2016-10-13T13:50:24 | R | UTF-8 | R | false | false | 1,554 | r | qtleffects_base.R | #' Base plot for qtleffects
#'
#' Creates an empty plot for plotting QTL effect sizes from a qtleffects object.
#'
#' @param qtleffects A qtleffects object from qtleffects() denoting x-value positions.
#' @param y A vector of two elements denoting minimum and maximum y-vales. Equivalent to ylim argument.
#' @param ylabel Axis label for the y-axis. If NULL, this deaults to 'Effect size'.
#' @param tick.length Length of ticks for marker locations. If NULL, defaults to 3% of the total.
#' y-axis length. Negative values will anchor ticks above the minimum y value.
#' @param tick.lwd Line widths for marker locations.
#' @param box If TRUE, a border will be drawn around the plot.
#'
#' @export
qtleffects_base <-
function(qtleffects, y, ylabel=NULL, tick.length=NULL, tick.lwd=0.5, box=TRUE){
if(length(y)!=2){
print("y must be a vector with two elements denoting maximum and minumum y-values.")
return(NULL)
}
if(is.null(ylabel)) ylabel <- "Effect size"
if(is.null(tick.length)) tick.length <- abs(y[1] - y[2]) * 0.03
# Create an empty plot with no axes
plot(c(1, length(qtleffects$chr)*100), y, type='n', xlab = 'Chromosome', ylab=ylabel, axes=F, xlim = c(0, length(qtleffects$chr)*100)+50)
# add axes back in.
axis(2)
axis(1, at=100*(1:length(qtleffects$chr)), labels = qtleffects$chr)
# Plot marker labels
segments(qtleffects$marker_positions$plot_position, min(y),
qtleffects$marker_positions$plot_position, min(y)-tick.length,
lwd = tick.lwd)
if(box == TRUE) box() # add the plot border
}
|
28d4b3c1fa18acb3fc8a931601964c9835d2bb29 | f9db3b703033871888a6bda79ae1cc8ba0e38367 | /unit2/ass1.R | f830480b255a6d6c26ae6686a54fafa87ef56583 | [] | no_license | goncer/intelligentSystems | 59d62a10b35243501a2f51bee1c5c63b76f8ddd3 | dc0b2d948c5e6a3e7ff860e00e8a979ba23ea9bd | refs/heads/master | 2020-05-29T08:42:07.127326 | 2016-10-16T15:28:31 | 2016-10-16T15:28:31 | 70,087,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,180 | r | ass1.R | setwd("/home/gonzo/workspace/R/intelligent/intelligentSystems/unit2")
library(tm)
library(ggplot2)
library(wordcloud)
library(RWeka)
library(reshape2)
library(SnowballC)
options(mc.cores=1)
source.pos = DirSource("/home/gonzo/workspace/R/intelligent/intelligentSystems/unit2/txt_sentoken/pos", encoding = "UTF-8")
corpus = Corpus(source.pos)
length(corpus)
summary(corpus[1:3])
corpus[1]
corpus[[1]]
meta(corpus[[1]])
meta(corpus[[1]])$id
tdm = TermDocumentMatrix(corpus)
tdm
inspect(tdm[2000:2003,100:103])
length(dimnames(tdm)$Terms)
head(dimnames(tdm)$Terms,10)
tail(dimnames(tdm)$Terms,10)
freq=rowSums(as.matrix(tdm))
head(freq,10)
tail(freq,10)
plot(sort(freq, decreasing = T),col="blue",main="Word frequencies", xlab="Frequency-based rank", ylab = "Frequency")
tail(sort(freq),n=10)
sum(freq == 1)
getTransformations()
doc=corpus[1]
doc[[1]]$content[1]
stopwords()
doc = tm_map(doc,removeWords,stopwords())
doc[[1]]$content[1]
doc = tm_map(corpus[1],removePunctuation)
doc[[1]]$content[1]
doc = tm_map(doc,removeNumbers)
doc[[1]]$content[1]
doc = tm_map(doc,stripWhitespace)
doc[[1]]$content[1]
doc = tm_map(doc,stemDocument)
doc[[1]]$content[1]
# transformation!
tdm = TermDocumentMatrix(corpus,control=list(stopwords = T,removePunctuation = T,removeNumbers = T,stemming = T))
tdm
inspect(tdm[2000:2003,100:103])
length(dimnames(tdm)$Terms)
head(dimnames(tdm)$Terms,10)
tail(dimnames(tdm)$Terms,10)
#frequencies!!
freq=rowSums(as.matrix(tdm))
head(freq,10)
tail(freq,10)
plot(sort(freq, decreasing = T),col="blue",main="Word frequencies", xlab="Frequency-based rank", ylab = "Frequency")
tail(sort(freq),n=10)
sum(freq == 1)
#####Create TDM with transformations and custom stopwords###
doc = corpus[1]
doc[[1]]$content[1]
myStopwords = c(stopwords(),"film","films","movie","movies")
doc = tm_map(corpus[1],removeWords,myStopwords)
doc[[1]]$content[1]
tdm = TermDocumentMatrix(corpus,
control=list(stopwords = myStopwords,
removePunctuation = T,
removeNumbers = T,
stemming = T))
tdm
inspect(tdm[2000:2003,100:103])
length(dimnames(tdm)$Terms)
head(dimnames(tdm)$Terms,10)
tail(dimnames(tdm)$Terms,10)
freq=rowSums(as.matrix(tdm))
head(freq,10)
tail(freq,10)
plot(sort(freq, decreasing = T),col="blue",main="Word frequencies", xlab="Frequency-based rank", ylab = "Frequency")
# Ten most frequent terms
tail(sort(freq),n=10)
sum(freq == 1)
####barplot@@
high.freq=tail(sort(freq),n=10)
hfp.df=as.data.frame(sort(high.freq))
hfp.df$names <- rownames(hfp.df)
ggplot(hfp.df, aes(reorder(names,high.freq), high.freq)) +
geom_bar(stat="identity") + coord_flip() +
xlab("Terms") + ylab("Frequency") +
ggtitle("Term frequencies")
####Create a TDM with TF-IDF weights
tdm.tfidf = TermDocumentMatrix(corpus,
control = list(weighting = weightTfIdf,
stopwords = myStopwords,
removePunctuation = T,
removeNumbers = T,
stemming = T))
tdm.tfidf
freq=rowSums(as.matrix(tdm.tfidf))
plot(sort(freq, decreasing = T),col="blue",main="Word TF-IDF frequencies", xlab="TF-IDF-based rank", ylab = "TF-IDF")
tail(sort(freq),n=10)
#####Make an association analysis####3
asoc.star = as.data.frame(findAssocs(tdm,"star", 0.5))
asoc.star$names <- rownames(asoc.star)
asoc.star
ggplot(asoc.star, aes(reorder(names,star), star)) +
geom_bar(stat="identity") + coord_flip() +
xlab("Terms") + ylab("Correlation") +
ggtitle("\"star\" associations")
asoc.indi = as.data.frame(findAssocs(tdm,"indiana", 0.5))
asoc.indi$names <- rownames(asoc.indi)
asoc.indi
ggplot(asoc.indi, aes(reorder(names,indiana), indiana)) +
geom_bar(stat="identity") + coord_flip() +
xlab("Terms") + ylab("Correlation") +
ggtitle("\"indiana\" associations")
####Create a word-document frequency graph
tdm.small = removeSparseTerms(tdm,0.5)
dim(tdm.small)
tdm.small
inspect(tdm.small[1:4,1:4])
matrix.tdm = melt(as.matrix(tdm.small), value.name = "count")
head(matrix.tdm)
ggplot(matrix.tdm, aes(x = Docs, y = Terms, fill = log10(count))) +
geom_tile(colour = "white") +
scale_fill_gradient(high="#FF0000" , low="#FFFFFF")+
ylab("Terms") +
theme(panel.background = element_blank()) +
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank())
####Create a word cloud
pal=brewer.pal(8,"Blues")
pal=pal[-(1:3)]
freq = sort(rowSums(as.matrix(tdm)), decreasing = T)
set.seed(1234)
word.cloud=wordcloud(words=names(freq), freq=freq,
min.freq=400, random.order=F, colors=pal)
###Create a bigram wordcloud
corpus.ng = tm_map(corpus,removeWords,c(stopwords(),"s","ve"))
corpus.ng = tm_map(corpus.ng,removePunctuation)
corpus.ng = tm_map(corpus.ng,removeNumbers)
BigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
tdm.bigram = TermDocumentMatrix(corpus.ng,
control = list(tokenize = BigramTokenizer))
freq = sort(rowSums(as.matrix(tdm.bigram)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
head(freq.df, 20)
wordcloud(freq.df$word,freq.df$freq,max.words=100,random.order = F, colors=pal)
ggplot(head(freq.df,15), aes(reorder(word,freq), freq)) +
geom_bar(stat="identity") + coord_flip() +
xlab("Bigrams") + ylab("Frequency") +
ggtitle("Most frequent bigrams")
####Create a trigram wordcloud
TrigramTokenizer <- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
tdm.trigram = TermDocumentMatrix(corpus.ng,
control = list(tokenize = TrigramTokenizer))
freq = sort(rowSums(as.matrix(tdm.trigram)),decreasing = TRUE)
freq.df = data.frame(word=names(freq), freq=freq)
head(freq.df, 20)
wordcloud(freq.df$word,freq.df$freq,max.words=100,random.order = F, colors=pal)
ggplot(head(freq.df,15), aes(reorder(word,freq), freq)) +
geom_bar(stat="identity") + coord_flip() +
xlab("Trigrams") + ylab("Frequency") +
ggtitle("Most frequent trigrams")
|
fe01d1a4d4d30e6f034da2979026d729235749a7 | 2d8acfab5bcf43e8f77f17e2b5943e3a741b1321 | /man-roxygen/sr.R | 1021dbb427b6d8ad4d2db6b14bd7a72c613357f2 | [] | no_license | shabbychef/SharpeR | 347b2e181ac3bafe83723df4f01b110cfd85b116 | df46933d5e0b7576ffb5a46b9fb70607f89ed0e0 | refs/heads/master | 2022-06-04T00:27:23.216085 | 2021-08-18T17:50:04 | 2021-08-18T17:50:04 | 7,334,041 | 20 | 5 | null | 2013-09-13T22:04:43 | 2012-12-27T00:52:58 | R | UTF-8 | R | false | false | 186 | r | sr.R | #' @family sr
#' @references
#'
#' Sharpe, William F. "Mutual fund performance." Journal of business (1966): 119-138.
#' \url{https://ideas.repec.org/a/ucp/jnlbus/v39y1965p119.html}
#'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.