blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8132f59c486c159faefb88ce83c2dccff747e3dc
|
ff9bae38626a26dde54a3db17143ce096046d2c3
|
/ui.R
|
d4da0e53b9a4cd8704c5b49f5434d36e7b990a86
|
[] |
no_license
|
briantcs/dataproduct
|
b32fd5ff9114cb537b38c7aa02ea6819d6c94c1e
|
f5d658e11c38cb73f347f3868fc3705f0d43c8a3
|
refs/heads/master
| 2021-01-10T14:19:15.234222
| 2015-05-24T21:49:54
| 2015-05-24T21:49:54
| 36,194,154
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 464
|
r
|
ui.R
|
library(shiny)
shinyUI(
pageWithSidebar(
# Application title
headerPanel("Prediction on diabetes"),
sidebarPanel(
numericInput('glucose', 'Glucose mg/dl', 90, min = 20, max = 250, step = 1),
submitButton('Enter')
),
mainPanel(
h3('Results of prediction'),
h4('Your submission'),
verbatimTextOutput("inputValue"),
h4('Which resulted in a prediction of '),
verbatimTextOutput("prediction")
)
)
)
|
059c1908c70bd6d07ffbdfc15047920aee5b84c3
|
22d4f7e013b930e3fbcde0dbcb6765621e92a5bb
|
/code/1-get-labels-from-form.R
|
0cdb19b038d5a2bd25285181650bb59749eb10f7
|
[] |
no_license
|
unhcr-iraq/pmt-refugees
|
b8cce8cfbe02d966ab7ec7307cdbd34fcea43d31
|
d99164c9c5ab1c2f1d8044c3bbb01e1a517987d0
|
refs/heads/master
| 2021-01-14T08:41:23.655456
| 2016-09-21T19:28:22
| 2016-09-21T19:28:22
| 68,604,928
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,295
|
r
|
1-get-labels-from-form.R
|
########################################################################
################# Trying ot get the form nicely with labels
## https://gist.github.com/mrdwab/28c13a0537044aeb5dc0
########################################################################
#source("code/1-kobo_form_downloader.R")
#kobo_form_downloader (formid, user = usernamepasswordunhcr , api = "https://kobocat.unhcr.org/api/v1/")
## Load survey structure in XLS form
form_tmp <- "data/R_PMT_v1.xls"
survey <- read_excel(form_tmp, sheet = "survey")
## Avoid columns without names
survey <- survey[ ,c("type", "name" , "label"# "label::English",
#"label::Arabic" ,"hint::Arabic",
# "hint::English", "relevant", "required", "constraint", "constraint_message::Arabic",
# "constraint_message::English", "default", "appearance", "calculation", "read_only" ,
# "repeat_count"
)]
## need to delete empty rows from the form
survey <- as.data.frame(survey[!is.na(survey$type), ])
#str(survey)
#levels(as.factor(survey$type))
### We can now extract the id of the list name to reconstruct the full label fo rthe question
survey$listname <- ""
## Extract for select_one
survey$listname <- with(survey, ifelse(grepl("select_one", ignore.case = TRUE, fixed = FALSE, useBytes = FALSE, survey$type) ,
paste0( substr(survey$type , (regexpr("select_one", survey$type , ignore.case=FALSE, fixed=TRUE))+10,250)),survey$listname))
survey$type <- with(survey, ifelse(grepl("select_one", ignore.case = TRUE, fixed = FALSE, useBytes = FALSE, survey$type), paste0("select_one"),survey$type))
## Extract for select multiple & clean type field
survey$listname <- with(survey, ifelse(grepl("select_multiple", ignore.case = TRUE, fixed = FALSE, useBytes = FALSE, survey$type),
paste0( substr(survey$type , (regexpr("select_multiple", survey$type , ignore.case=FALSE, fixed=TRUE))+16,250)),survey$listname ))
survey$type <- with(survey, ifelse(grepl("select_multiple", ignore.case = TRUE, fixed = FALSE, useBytes = FALSE, survey$type), paste0("select_multiple_d"),survey$type))
## Remove space
survey$listname <- trim(survey$listname)
#str(survey)
write.csv (survey, "data/survey.csv")
survey_temp <- survey[ ,c("type", "name" , "label")]
### Bind choices
choices <- read_excel(form_tmp, sheet = "choices")
names(choices)[4] <- "listname"
choices <- choices[,c("listname", "name" , "label")]
choices1 <- join(x=choices, y=survey, by="listname", type="left")
choices1$type <- with(choices1, ifelse(grepl("select_one", ignore.case = TRUE, fixed = FALSE, useBytes = FALSE, choices1$type), paste0("select_one_d"),choices1$type))
choices1$type <- with(choices1, ifelse(grepl("select_multiple_d", ignore.case = TRUE, fixed = FALSE, useBytes = FALSE, choices1$type), paste0("select_mutiple"),choices1$type))
names(choices1)[5] <- "nameq"
names(choices1)[6] <- "labelq"
choices1$labelfull <- paste0(choices1$labelq, sep = ": ", choices1$label)
names(choices1)
choices2 <- choices1[,c("type", "name" , "labelfull")]
names(choices2)[3] <- "label"
# Remove duplicates based on Sepal.Width columns
choices3 <- choices2[!duplicated(choices2$name), ]
survey_all <- rbind(survey_temp,choices3)
write.csv (survey_all, "data/surveyall.csv")
## get variable name from data
rm(datalabel)
datalabel <- as.data.frame( names(data))
names(datalabel)[1] <- "nameor"
datalabel$nameor <- as.character(datalabel$nameor)
## new variables name without /
datalabel$namenew <- str_replace_all(datalabel$nameor, "/", ".")
## let's recode the variable of the dataset using short label - column 3 of my reviewed labels
#names(data) <- datalabel[, 2]
## Extract the variable name as defined in the form
datalabel$length <- str_length(datalabel$namenew)
#str(datalabel)
## Find the next dot to parse the label
datalabel$find <- regexpr(".", datalabel$namenew, fixed = TRUE, useBytes = TRUE)
#summary(datalabel$find)
datalabel$nameor2 <- substr(datalabel$namenew,datalabel$find+1, 200)
datalabel$find2 <- regexpr(".",datalabel$nameor2, fixed = TRUE, useBytes = TRUE)
datalabel$nameor3 <- substr(datalabel$nameor2,datalabel$find2 +1, 200)
datalabel$find3 <- regexpr(".",datalabel$nameor3, fixed = TRUE, useBytes = TRUE)
datalabel$nameor4 <- substr(datalabel$nameor3,datalabel$find3 +1, 200)
datalabel$find4 <- regexpr(".",datalabel$nameor4, fixed = TRUE, useBytes = TRUE)
datalabel$nameor5 <- substr(datalabel$nameor4,datalabel$find4 +1, 200)
datalabel$find5 <- regexpr(".",datalabel$nameor5, fixed = TRUE, useBytes = TRUE)
datalabel$nameor6 <- substr(datalabel$nameor5,datalabel$find5 +1, 200)
## backup
datalabel1 <- datalabel
## merging now with lables
datalabel <- datalabel1
names(datalabel)
names(datalabel)[13] <- "name"
datalabel <- join(x=datalabel, y=survey_all, by="name", type="left")
names(datalabel)
names(datalabel)[13]<- "nameor6"
##Check if duplicate
datalabel$dup <- duplicated(datalabel$nameor)
write.csv(datalabel, "data/datalabel.csv")
rm(choices, choices1, choices2, choices3, datalabel1, survey, survey_all, survey_temp, form_tmp)
|
67f0e56de4add1ed137b0c1d93b2a467b3b1e54a
|
e2708c67f6f355ad37ff42116fe9861fd6051313
|
/server.R
|
997a60ac420f93c08fc3690eaef81369b55a35e1
|
[] |
no_license
|
abrarali21/ShinyAppProject
|
154a6761370803a42cbeb4f2ff033473ef8ea342
|
29420bb236df809991bfd7866fe1d9666a03cc40
|
refs/heads/master
| 2016-08-12T08:20:46.848120
| 2015-12-27T23:01:36
| 2015-12-27T23:01:36
| 48,663,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 377
|
r
|
server.R
|
library(UsingR)
model <- lm(dist ~ speed, cars)
slope <- as.numeric(model$coeff["speed"])
y_intercept <- as.numeric(model$coeff["(Intercept)"])
stoppingDistance <- function(speed) slope*speed + y_intercept
shinyServer(
function(input, output) {
output$inputValue <- renderPrint({input$speed})
output$prediction <- renderPrint({stoppingDistance(input$speed)})
}
)
|
5fa88a9ee9a0c4b9f73d48e33cf0707d5142d99c
|
22327732e8e257e5181b0ab6e61e714309a71cad
|
/exdata/plot6.r
|
5fb634006bbf212128baeab42a8bbd78665d7308
|
[] |
no_license
|
li-xin-yi/TuQiang-Street-301
|
a1f2e183f5ab50a22931737f4a8b0b8986f668ce
|
cc4a19175f4cd1630a4084496c2403ff3f2e660c
|
refs/heads/master
| 2022-04-03T00:34:02.396763
| 2019-01-06T12:34:08
| 2019-01-06T12:34:08
| 62,716,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,281
|
r
|
plot6.r
|
library('ggplot2')
library('reshape2')
Baltimore<-NEI[NEI$fips=='24510',]
LA<-NEI[NEI$fips=='06037',]
s_vehicle<-SCC[grep('Vehicle',SCC$Short.Name),]
Bal_em<-Baltimore[Baltimore$SCC %in% s_vehicle$SCC,c('year','Emissions')]
LA_em<-LA[LA$SCC %in% s_vehicle$SCC,c('year','Emissions')]
Bal_total<-aggregate(Emissions~year,data=Bal_em,FUN=sum)
LA_total<-aggregate(Emissions~year,data=LA_em,FUN=sum)
emi1<-data.frame(matrix(nrow=4,ncol=0))
emi2<-data.frame(matrix(nrow=4,ncol=0))
emi1['year']<-Bal_total$year
emi2['year']<-Bal_total$year
emi1['Baltimore']<-Bal_total$Emissions
emi1['LA']<-LA_total$Emissions
emi2['Baltimore']<-Bal_total$Emissions/Bal_total$Emissions[1]
emi2['LA']<-LA_total$Emissions/LA_total$Emissions[1]
d1=melt(emi1,id=1)
d2=melt(emi2,id=1)
d1['type']=list(rep('Absolute (T)',4))
d2['type']=list(rep('Rate (with Emissions in 1999 as 1)',4))
d=rbind(d1,d2)
g<-ggplot(data=d,aes(x=as.factor(year),y=value,col=variable))+geom_point()+geom_line(aes(group=variable))+facet_wrap(~type,scales='free',nrow=2)+xlab('year')+ylab('Emissions')+scale_colour_discrete(name ="Location",breaks=c("Baltimore", "LA"),labels=c("Baltimore City", "Los Angeles County"))+ggtitle('Changes in Total Emissions from Motor Vehicle (1999-2008)')
ggsave('plot6.png',width = 6,height = 5)
|
a9dfb2905301667a6a975326e36063d8f7549391
|
364dcb95aac6dff3f8548768dc99bba945ec81b6
|
/man/guide_legend.Rd
|
21dcbe783390d93b38491d56be52e9f5c93551cb
|
[
"MIT"
] |
permissive
|
tidyverse/ggplot2
|
3ef62b72861c246b13ffc2d95678079984fe65c0
|
c76b9aeda648e9b6022b7169021e854c3d3890cb
|
refs/heads/main
| 2023-08-31T07:08:20.846510
| 2023-08-17T16:19:44
| 2023-08-17T16:19:44
| 19,438
| 4,632
| 1,971
|
NOASSERTION
| 2023-09-14T13:25:40
| 2008-05-25T01:21:32
|
R
|
UTF-8
|
R
| false
| true
| 5,985
|
rd
|
guide_legend.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guide-legend.R
\name{guide_legend}
\alias{guide_legend}
\title{Legend guide}
\usage{
guide_legend(
title = waiver(),
title.position = NULL,
title.theme = NULL,
title.hjust = NULL,
title.vjust = NULL,
label = TRUE,
label.position = NULL,
label.theme = NULL,
label.hjust = NULL,
label.vjust = NULL,
keywidth = NULL,
keyheight = NULL,
direction = NULL,
default.unit = "line",
override.aes = list(),
nrow = NULL,
ncol = NULL,
byrow = FALSE,
reverse = FALSE,
order = 0,
...
)
}
\arguments{
\item{title}{A character string or expression indicating a title of guide.
If \code{NULL}, the title is not shown. By default
(\code{\link[=waiver]{waiver()}}), the name of the scale object or the name
specified in \code{\link[=labs]{labs()}} is used for the title.}
\item{title.position}{A character string indicating the position of a
title. One of "top" (default for a vertical guide), "bottom", "left"
(default for a horizontal guide), or "right."}
\item{title.theme}{A theme object for rendering the title text. Usually the
object of \code{\link[=element_text]{element_text()}} is expected. By default, the theme is
specified by \code{legend.title} in \code{\link[=theme]{theme()}} or theme.}
\item{title.hjust}{A number specifying horizontal justification of the
title text.}
\item{title.vjust}{A number specifying vertical justification of the title
text.}
\item{label}{logical. If \code{TRUE} then the labels are drawn. If
\code{FALSE} then the labels are invisible.}
\item{label.position}{A character string indicating the position of a
label. One of "top", "bottom" (default for horizontal guide), "left", or
"right" (default for vertical guide).}
\item{label.theme}{A theme object for rendering the label text. Usually the
object of \code{\link[=element_text]{element_text()}} is expected. By default, the theme is
specified by \code{legend.text} in \code{\link[=theme]{theme()}}.}
\item{label.hjust}{A numeric specifying horizontal justification of the
label text. The default for standard text is 0 (left-aligned) and 1
(right-aligned) for expressions.}
\item{label.vjust}{A numeric specifying vertical justification of the label
text.}
\item{keywidth}{A numeric or a \code{\link[grid:unit]{grid::unit()}} object specifying
the width of the legend key. Default value is \code{legend.key.width} or
\code{legend.key.size} in \code{\link[=theme]{theme()}}.}
\item{keyheight}{A numeric or a \code{\link[grid:unit]{grid::unit()}} object specifying
the height of the legend key. Default value is \code{legend.key.height} or
\code{legend.key.size} in \code{\link[=theme]{theme()}}.}
\item{direction}{A character string indicating the direction of the guide.
One of "horizontal" or "vertical."}
\item{default.unit}{A character string indicating \code{\link[grid:unit]{grid::unit()}}
for \code{keywidth} and \code{keyheight}.}
\item{override.aes}{A list specifying aesthetic parameters of legend key.
See details and examples.}
\item{nrow}{The desired number of rows of legends.}
\item{ncol}{The desired number of column of legends.}
\item{byrow}{logical. If \code{FALSE} (the default) the legend-matrix is
filled by columns, otherwise the legend-matrix is filled by rows.}
\item{reverse}{logical. If \code{TRUE} the order of legends is reversed.}
\item{order}{positive integer less than 99 that specifies the order of
this guide among multiple guides. This controls the order in which
multiple guides are displayed, not the contents of the guide itself.
If 0 (default), the order is determined by a secret algorithm.}
\item{...}{ignored.}
}
\description{
Legend type guide shows key (i.e., geoms) mapped onto values.
Legend guides for various scales are integrated if possible.
}
\details{
Guides can be specified in each \verb{scale_*} or in \code{\link[=guides]{guides()}}.
\code{guide = "legend"} in \verb{scale_*} is syntactic sugar for
\code{guide = guide_legend()} (e.g. \code{scale_color_manual(guide = "legend")}).
As for how to specify the guide for each scale in more detail,
see \code{\link[=guides]{guides()}}.
}
\examples{
\donttest{
df <- expand.grid(X1 = 1:10, X2 = 1:10)
df$value <- df$X1 * df$X2
p1 <- ggplot(df, aes(X1, X2)) + geom_tile(aes(fill = value))
p2 <- p1 + geom_point(aes(size = value))
# Basic form
p1 + scale_fill_continuous(guide = guide_legend())
# Control styles
# title position
p1 + guides(fill = guide_legend(title = "LEFT", title.position = "left"))
# title text styles via element_text
p1 + guides(fill =
guide_legend(
title.theme = element_text(
size = 15,
face = "italic",
colour = "red",
angle = 0
)
)
)
# label position
p1 + guides(fill = guide_legend(label.position = "left", label.hjust = 1))
# label styles
p1 +
scale_fill_continuous(
breaks = c(5, 10, 15),
labels = paste("long", c(5, 10, 15)),
guide = guide_legend(
direction = "horizontal",
title.position = "top",
label.position = "bottom",
label.hjust = 0.5,
label.vjust = 1,
label.theme = element_text(angle = 90)
)
)
# Set aesthetic of legend key
# very low alpha value make it difficult to see legend key
p3 <- ggplot(mtcars, aes(vs, am, colour = factor(cyl))) +
geom_jitter(alpha = 1/5, width = 0.01, height = 0.01)
p3
# override.aes overwrites the alpha
p3 + guides(colour = guide_legend(override.aes = list(alpha = 1)))
# multiple row/col legends
df <- data.frame(x = 1:20, y = 1:20, color = letters[1:20])
p <- ggplot(df, aes(x, y)) +
geom_point(aes(colour = color))
p + guides(col = guide_legend(nrow = 8))
p + guides(col = guide_legend(ncol = 8))
p + guides(col = guide_legend(nrow = 8, byrow = TRUE))
# reversed order legend
p + guides(col = guide_legend(reverse = TRUE))
}
}
\seealso{
Other guides:
\code{\link{guide_bins}()},
\code{\link{guide_colourbar}()},
\code{\link{guide_coloursteps}()},
\code{\link{guides}()}
}
\concept{guides}
|
64878dbe8f437468926943e43c04cd5749373205
|
94cd03120f87b8c2fd3d691da0eb9ab3de691c31
|
/make_plots.R
|
ca9215ba40f2184b4b301ceb867dbb11edd725c2
|
[
"MIT"
] |
permissive
|
schaugf/ImageVAE
|
6df13e858b9508b5416c68284ae374f548c5d8cf
|
1228d50ac9161e1ebe4e49da8b61fca86959e442
|
refs/heads/master
| 2022-08-14T00:19:35.103604
| 2022-07-21T14:44:02
| 2022-07-21T14:44:02
| 134,479,822
| 8
| 9
| null | 2018-08-21T00:49:38
| 2018-05-22T21:51:17
|
Python
|
UTF-8
|
R
| false
| false
| 3,459
|
r
|
make_plots.R
|
# VAE analysis
library(optparse)
library(ggplot2)
library(tidyr)
library(RColorBrewer)
library(Rtsne)
option_list = list(
make_option(c('-d', '--results_dir'), type='character', default=NA,
help='VAE results dir'),
make_option(c('-s', '--save_dir'), type='character', default='plots',
help='save directory'),
make_option(c('-x', '--img_width'), type='numeric', default=5,
help='image width (in)'),
make_option(c('-y', '--img_height'), type='numeric', default=4,
help='image height (in)')
)
opt_parser = OptionParser(option_list=option_list)
opt = parse_args(opt_parser)
if (is.na(opt$results_dir)) { stop('encoding file missing') }
opt$save_dir = file.path(opt$results_dir, opt$save_dir)
dir.create(opt$save_dir, showWarnings=FALSE)
# load data files
cat('loading data files...\n')
encodings <- data.frame(read.csv(file.path(opt$results_dir, 'encodings.csv'), header=F))
names(encodings) <- paste0('vae', seq(1:ncol(encodings)))
umap_embedding = data.frame(read.csv(file.path(opt$results_dir, 'embedding_umap.csv'), header=F))
names(umap_embedding) = c('umap1', 'umap2')
tsne_embedding = data.frame(read.csv(file.path(opt$results_dir, 'embedding_tsne.csv'), header=F))
names(tsne_embedding) = c('tsne1', 'tsne2')
zvar = data.frame(read.csv(file.path(opt$results_dir, 'z_log_var.csv'), header=F))
names(zvar) <- paste0('zvar', seq(1:ncol(encodings)))
zmean = data.frame(read.csv(file.path(opt$results_dir, 'z_mean.csv'), header=F))
names(zmean) <- paste0('zmean', seq(1:ncol(encodings)))
master_df <- cbind(encodings, umap_embedding, tsne_embedding)
# load training log
training_log = data.frame(read.csv(file.path(opt$results_dir, 'training.log'), header=T, sep='\t'))
# color palette
myPalette <- colorRampPalette(rev(brewer.pal(11, "Spectral")))
sc <- scale_colour_gradientn(colours = myPalette(100), limits=c(-3, 3))
sf <- scale_fill_gradientn(colours = myPalette(100), limits=c(-3, 3))
##################
cormat = cor(encodings)
pdf(file = file.path(opt$save_dir,
'vae_heatmap.pdf'),
height = 6,
width = 6)
heatmap.2(cormat,
dendrogram='both',
trace='none',
col=myPalette,
key=F,
margins=c(6,6),
lhei=c(.2, 4),
lwid=c(.5, 4))
dev.off()
# plot loss trends
cat('generating plots...\n')
ggplot(training_log[-1,]) +
geom_point(aes(x=epoch, y=log(loss))) +
geom_line(aes(x=epoch, y=log(loss))) +
theme_minimal() +
xlab('training epoch') +
ylab('log loss') +
ggsave(file.path(opt$save_dir, 'training.pdf'),
height=opt$img_height, width=opt$img_width, units='in', device='pdf')
# plot umap projections
ggplot(umap_embedding) +
geom_point(aes(x=umap1, y=umap2)) +
theme_minimal() +
ggsave(file.path(opt$save_dir, 'projection_umap.pdf'),
height=opt$img_height, width=opt$img_width, units='in', device='pdf')
# plot tsne projection
ggplot(tsne_embedding) +
geom_point(aes(x=tsne1, y=tsne2)) +
theme_minimal() +
ggsave(file.path(opt$save_dir, 'projection_tsne.pdf'),
height=opt$img_height, width=opt$img_width, units='in', device='pdf')
# plot encoding densities
pd = encodings %>%
gather()
ggplot(pd) +
geom_density(aes(x=value, fill=key), alpha=0.2) +
theme_minimal() +
ggsave(file.path(opt$save_dir, 'latent_distribution.pdf'),
height=opt$img_height, width=opt$img_width, units='in', device='pdf')
|
f1a0f96e67ef580de2b46e4226a3d53459fcc319
|
94a40d5a2a8813031b710279bf378e6533b78dbe
|
/R/RM2quat_svg.R
|
e87ed9346837dcaa348f426e8be1592f61bf1f38
|
[] |
no_license
|
aaronolsen/svgViewR
|
d583340a3a2af42df94b1f4a7fdb6f3db61f4539
|
714847fd601f6c4015b632055cb5dbe7edafe957
|
refs/heads/master
| 2021-01-17T17:25:37.368215
| 2020-10-02T13:40:31
| 2020-10-02T13:40:31
| 70,349,213
| 11
| 3
| null | 2019-06-13T14:58:18
| 2016-10-08T18:11:52
|
R
|
UTF-8
|
R
| false
| false
| 1,092
|
r
|
RM2quat_svg.R
|
RM2Quat_svg <- function(RM){
# http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
tr <- RM[1,1] + RM[2,2] + RM[3,3]
if (tr > 0) {
S <- sqrt(tr+1.0) * 2 # S <- 4*qw
qw <- 0.25 * S
qx <- (RM[3,2] - RM[2,3]) / S
qy <- (RM[1,3] - RM[3,1]) / S
qz <- (RM[2,1] - RM[1,2]) / S
} else if ((RM[1,1] > RM[2,2])&(RM[1,1] > RM[3,3])) {
S <- sqrt(1.0 + RM[1,1] - RM[2,2] - RM[3,3]) * 2 # S <- 4*qx
qw <- (RM[3,2] - RM[2,3]) / S
qx <- 0.25 * S
qy <- (RM[1,2] + RM[2,1]) / S
qz <- (RM[1,3] + RM[3,1]) / S
} else if (RM[2,2] > RM[3,3]) {
S <- sqrt(1.0 + RM[2,2] - RM[1,1] - RM[3,3]) * 2 # S <- 4*qy
qw <- (RM[1,3] - RM[3,1]) / S
qx <- (RM[1,2] + RM[2,1]) / S
qy <- 0.25 * S
qz <- (RM[2,3] + RM[3,2]) / S
} else {
S <- sqrt(1.0 + RM[3,3] - RM[1,1] - RM[2,2]) * 2 # S <- 4*qz
qw <- (RM[2,1] - RM[1,2]) / S
qx <- (RM[1,3] + RM[3,1]) / S
qy <- (RM[2,3] + RM[3,2]) / S
qz <- 0.25 * S
}
# Create quaternion vector
quat <- c(-qx, -qy, -qz, qw)
# Is always unit?
#print(sum(quat^2))
quat
}
|
823da550750f5be073212e856f4e91ed13de0b6b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/refund/examples/ffpcplot.Rd.R
|
22c04862ccaf42f46f329d89ed88d2513c0f14de
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
ffpcplot.Rd.R
|
library(refund)
### Name: ffpcplot
### Title: Plot PC-based function-on-function regression terms
### Aliases: ffpcplot
### ** Examples
## Not run:
##D #see ?ffpc
## End(Not run)
|
b33dc53097ac39bb6225f301208d1684dcb6216d
|
d48261d78cc71adfec615d96308f71d057f5b596
|
/R/practice_Part09.r
|
3546928ea0fa759bd4f230047674eecc5935f336
|
[
"MIT"
] |
permissive
|
dayoungMM/TIL
|
97cc33977d90bcac131460f498a228a50d300cf0
|
b844ef5621657908d4c256cdfe233462dd075e8b
|
refs/heads/master
| 2020-08-16T11:13:08.243190
| 2020-08-10T08:29:40
| 2020-08-10T08:29:40
| 215,495,171
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,834
|
r
|
practice_Part09.r
|
## 9장. 데이터분석 프로젝트
"한국인의 삶을 파악해보자."
#### 09-1 '한국복지패널데이터 ####
# • 한국보건사회연구원 발간
# • 가구의 경제활동을 연구해 정책 지원에 반영할 목적
# • 2006~2015년까지 전국에서 7000여 가구를 선정해 매년 추적 조사
# • 경제활동, 생활실태, 복지욕구 등 수천 개 변수에 대한 정보로 구성
rm(list=ls())
## 09-1. ‘한국복지패널데이터’ 분석 준비하기 ---------------------------- ##
#install.packages("foreign") # foreign 패키지 설치
library(foreign) # SPSS 파일 로드
library(dplyr) # 전처리
library(ggplot2) # 시각화
library(readxl) # 엑셀 파일 불러오기
# download : http://bit.ly/Koweps_hpc10_2015_v2
# 데이터 불러오기
raw_welfare <- read.spss(file = "Data/Koweps_hpc10_2015_beta1.sav",
to.data.frame = T)
View(raw_welfare[1:3,])
# 복사본 만들기
welfare <- raw_welfare
head(welfare)
tail(welfare)
View(welfare[1:3,])
dim(welfare) # 16664 957
str(welfare)
summary(welfare)
# p211. 직종코드집 참고하기: Data/Koweps_Codebook.xlsx
welfare <- rename(welfare,
sex = h10_g3, # 성별
birth = h10_g4, # 태어난 연도
marriage = h10_g10, # 혼인 상태
religion = h10_g11, # 종교
income = p1002_8aq1, # 월급
code_job = h10_eco9, # 직종 코드
code_region = h10_reg7) # 지역 코드
welfare <- welfare %>% select(sex, birth, marriage,
religion, income, code_job,
code_region)
# rm(raw_welfare)
save(welfare, file="welfare.rData")
# 데이터 분석 절차 참고 p212
#### 09-2 성별에 따른 월급 차이 ####
## --"성별에 따라 월급이 다를까?"-------------------- ##
class(welfare$sex)
table(welfare$sex)
# 이상치 확인
table(welfare$sex)
# 이상치 결측 처리
welfare$sex <- ifelse(welfare$sex == 9, NA, welfare$sex)
# 결측치 확인
table(is.na(welfare$sex))
# 성별 항목 이름 부여: 코드 표 참조.
welfare$sex <- ifelse(welfare$sex == 1, "male", "female")
table(welfare$sex)
qplot(welfare$sex)
# - Quiz: sex를 factor로 바꾸어라. 이때 male을 1, female은 2로 코딩한다.
welfare$sex <- as.factor(welfare$sex)
welfare$sex <- factor(welfare$sex, levels = c('male','female'))
table(welfare$sex)
str(welfare$sex)
## ----- 월급 변수 검토 및 전처리 ----------------------------- ##
class(welfare$income)
summary(welfare$income)
qplot(welfare$income)
#install.packages("fBasics")
#library(fBasics)
#skewness(welfare$income, na.rm=T) # 보통 3 이내면 그냥 조정 안하고 분석해도 된다
qplot(welfare$income) + xlim(0, 1000)
# 이상치 확인
summary(welfare$income)
prop.table(table(is.na(welfare$income)))
prop.table(table(welfare$income == 0))
# 이상치 결측 처리: 월급이 없는 사람은 평균에서 제외해보자.
welfare$income <- ifelse(welfare$income %in% c(0, 9999), NA, welfare$income)
sum(welfare$income == 9999, na.rm=T) #이상치 잘 처리되었나 확인
# 결측치 확인
table(is.na(welfare$income))
names(welfare)
welfare$income
# 월급이 결측치인 사람들의 성별 비율을 계산해보자.
wf <- welfare %>% filter(is.na(income)) %>%
group_by(sex) %>%
summarise(n = n())
wf <- as.data.frame(wf)
wf$n <- wf$n / sum(wf$n)
wf
# 모든 데이터에 대한 성별비율은?
prop.table(table(welfare$sex))
## ---성별 월급 중간값 계산하기 ------------------------------- ##
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mean_income = mean(income))
sex_income_med <- welfare %>%
filter(!is.na(income)) %>%
group_by(sex) %>%
summarise(mean_income = median(income))
sex_income_med
sex_income
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
#### 09-3 나이별 월급 ####
## -------"몇 살 때 월급을 가장 많이 받을까?"------- ##
head(welfare$birth)
class(welfare$birth)
summary(welfare$birth)
qplot(welfare$birth)
# 이상치 확인
summary(welfare$birth)
# 결측치 확인
table(is.na(welfare$birth))
# 이상치 결측 처리
welfare$birth <- ifelse(welfare$birth == 9999, NA, welfare$birth)
table(is.na(welfare$birth))
## birth에는 결측치가 없다.
## ----------- 파생변수 만들기 - 나이 ----------------- ##
welfare$age <- 2015 - welfare$birth + 1
summary(welfare$age)
qplot(welfare$age)
## --- 나이에 따른 월급 평균은? --------------- ##
age_income <- welfare %>% select(age, income) %>%
filter(!is.na(income)) %>%
group_by(age) %>%
summarise(mean_income = mean(income))
head(age_income)
ggplot(data = age_income, aes(x = age, y = mean_income)) + geom_line()
# Quiz: 경제인구의 실업율을 구해보자.
# [20, 60] 응답이 0, 9999나온 데이터의 비율.
# - raw_welfare$p1002_8aq1에서 결측치는 "none"
# - 0은 "no"
# - 나머지는 "yes"로 설정한 후,
# - 20살에서 60살까지의 연령대에 대해 이에 대한 빈도수를 계산해보자.
#경제인구 실업률
unique(welfare$age)
n_datas<-as.numeric(count(work_pop))
n_datas
work_pop <- welfare %>% filter(age >=20 & age< 60)
work_pop %>% mutate(notworking = ifelse(is.na(income)|income ==0, 1,0)) %>%
group_by(notworking) %>% filter(notworking ==1)%>%
summarise(notwrk=n()/n_datas)
notwrk <-work_pop %>% mutate(notworking = ifelse(is.na(income)|income ==0, 1,0)) %>%
group_by(notworking) %>%
summarise(notwrk=n())
notwrk
ggplot(data = sex_income, aes(x = sex, y = mean_income)) + geom_col()
ggplot(data = notwrk, aes(x = notworking, y=notwrk )) + geom_col()
#성별별 빈도수
work_pop <- welfare %>% filter(age >=20 & age< 60)
head(work_pop)
ggplot(data = work_pop, aes(x = age)) + geom_histogram(bins=5)
ggplot(data = work_pop, aes(x = age)) + geom_bar()
work_pop <- welfare %>% filter(age >=20 & age< 60)
work_pop %>%
mutate(notworking = ifelse(is.na(income)|income ==0, 1,0), age10 = age %/% 10 * 10) %>%
select(sex, age10,notworking ) %>%
group_by(sex, age10,notworking) %>%
summarise(n= n())
# Quiz: income의 결측치를 0으로 평가할 때,
# 성별 income의 평균을 비교하라.
work_pop$income<-ifelse(is.na(work_pop$income ), 0, work_pop$income)
work_pop %>% group_by(sex) %>% summarise(mean = mean(income))
#### 09-4 연령대별 월급 차이 ####
## --------- "어떤 연령대의 월급이 가장 많을까?" ------- ##
welfare <- welfare %>%
mutate(ageg = ifelse(age < 30, "young",
ifelse(age <= 59, "middle", "old")))
table(welfare$ageg)
qplot(welfare$ageg)
## ------- 1. 연령대별 월급 평균표 만들기 --------- ##
ageg_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg) %>%
summarise(mean_income = mean(income))
ageg_income
ggplot(data = ageg_income, aes(x = ageg, y = mean_income)) + geom_col()
## -------- 막대 정렬 : 초년, 중년, 노년 나이 순 ---------- ##
ggplot(data = ageg_income, aes(x = ageg, y = mean_income)) +
geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
# Quiz : 20대 30대 40대 50대 60대 이상으로 분류해보시오. 10대는 제거.
# 10대는 제거하고 수입 평균을 구하시오
age_income<-welfare %>% mutate(age10 = age %/%10 * 10,
age10 = ifelse (age10 > 60, 60,age10)) %>%
filter(age10 != 10 & !is.na(income)) %>%
group_by(age10) %>%
summarise(income_avg = mean(income))
age_income
ggplot(data = age_income, aes(x=age10, y=income_avg)) + geom_col()
#### 09-5 연령대별 성별 평균 월급 표 ####
# ------- "성별 월급 차이는 연령대별로 다를까?" -----
# 먼저 위의 코드를 활용하여 직접 작성해봅시다.
age_income<-welfare %>% mutate(age10 = age %/%10 * 10)
age_sex_income <- age_income %>% filter(!is.na(income)) %>%
group_by(age10, sex) %>%
summarise(mean_income = mean(income))
ggplot(age_sex_income, aes(age10, mean_income,
fill=sex)) +
geom_col()
## ----- 연령별 등급으로 다시 작성하면 ----------- ##
sex_income <- welfare %>%
filter(!is.na(income)) %>%
group_by(ageg, sex) %>%
summarise(mean_income = mean(income))
sex_income
ggplot(data = sex_income, aes(x = ageg, y = mean_income, fill = sex)) +
geom_col() +
scale_x_discrete(limits = c("young", "middle", "old"))
ggplot(data = sex_income, aes(x = ageg, y = mean_income, fill = sex)) +
geom_col(position = "dodge") +
scale_x_discrete(limits = c("young", "middle", "old"))
## Quiz: 나이 및 성별 월급 차이 분석하기 --------------- ##
# 나이 및 성별 월급 평균표를 작성하여 그래프로 표현해보자.
# - income의 비 결측치를 필터링
# - age, sex로 그룹
# - income에 대해 평균을 구하여 sex_age를 생성한다.
# - sex_age로 그래프를 그린다.
sex_age<-welfare %>%
filter(!is.na(income))%>%
group_by(age,sex) %>%
summarise(avg_income= mean(income))
sex_age
ggplot(data = sex_age, aes(x = age, y = avg_income, group=sex,colour=sex)) +
geom_line()
#### 09-6 직업별 월급 차이 ####
## -------- "어떤 직업이 월급을 가장 많이 받을까?" ---------- ##
class(welfare$code_job)
table(welfare$code_job, useNA="ifany")
### - 직업분류코드 목록 불러오기.
library(readxl)
list_job <- read_excel("Data/Koweps_Codebook.xlsx",
col_names = T, sheet = 2)
head(list_job)
dim(list_job)
## 결합
v1 = c(1, 2, 3); v2 = c(5, 4, 3)
cbind(v1, v2)
rbind(v1, v2)
## - welfare에 직업명 결합
welfare <- left_join(welfare, list_job, id = "code_job")
table(welfare$code_job, useNA="ifany")
table(is.na(welfare$code_job), useNA="ifany")
head(welfare)
welfare %>%
filter(!is.na(code_job)) %>%
select(code_job, job) %>%
head(10)
## ---------- job별 평균 임금을 계산하기 ------------- ##
job_income <- welfare %>%
filter(!is.na(job) & !is.na(income)) %>%
group_by(job) %>%
summarise(mean_income = mean(income))
head(job_income)
top10 <- job_income %>%
arrange(desc(mean_income)) %>%
head(10)
top10
ggplot(data = top10, aes(x = reorder(job, mean_income), y = mean_income)) +
geom_col() +
coord_flip()
# 하위 10위 추출
bottom10 <- job_income %>%
arrange(mean_income) %>%
head(10)
bottom10
# 그래프 만들기
ggplot(data = bottom10, aes(x = reorder(job, -mean_income),
y = mean_income)) +
geom_col() +
coord_flip() +
ylim(0, 850)
#### 09-7 성별 직업 빈도 ####
## -------- 성별로 어떤 직업이 가장 많을까? ------- ##
# 남성 직업 빈도 상위 10개 추출
job_male <- welfare %>%
filter(!is.na(job) & sex == "male") %>%
group_by(job) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
head(10)
job_male
# 여성 직업 빈도 상위 10개 추출
job_female <- welfare %>%
filter(!is.na(job) & sex == "female") %>%
group_by(job) %>%
summarise(n = n()) %>%
arrange(desc(n)) %>%
head(10)
job_female
str(data)
# 남성 직업 빈도 상위 10개 직업
ggplot(data = job_male, aes(x = reorder(job, n), y = n)) +
geom_col() +
coord_flip()
# 여성 직업 빈도 상위 10개 직업
ggplot(data = job_female, aes(x = reorder(job, n), y = n)) +
geom_col() +
coord_flip()
#---- Quiz: 연령 등급에서 노년층(ageg = old)을 제외하고 분석하면:
# 1. 노년층 제외 남성 직업 빈도 상위 10개 추출
# 2. 노년층 제외 여성 직업 빈도 상위 10개 추출
# 3. 노년층 제외 남성 직업 빈도 상위 10개 직업 그래프
# 4. 노년층 제외 여성 직업 빈도 상위 10개 직업 그래프
# Quiz: 직업 빈도 상위 10개에 대한 남녀 비율
# Quiz : 월급 상위 30개 직업에 대한 남녀 비율\
#### 09-8 종교 유무에 따른 이혼율 ####
## -------- 종교가 있는 사람들이 이혼을 덜 할까? ------- ##
class(welfare$religion)
table(welfare$religion, useNA = 'ifany')
# 종교 유무 이름 부여
welfare$religion <- ifelse(welfare$religion == 1, "yes", "no")
table(welfare$religion)
qplot(welfare$religion, geom = 'bar')
ggplot(welfare, aes(religion, fill=factor(religion))) +
geom_bar()
## ----- 혼인 상태 변수 검토 및 전처리 -------- ##
class(welfare$marriage)
table(welfare$marriage, useNA = "ifany")
# 이혼 여부 변수 만들기
welfare$group_marriage <- ifelse(welfare$marriage == 1, "marriage",
ifelse(welfare$marriage == 3, "divorce", NA))
table(welfare$group_marriage)
table(is.na(welfare$group_marriage))
qplot(welfare$group_marriage)
## ------- 종교 유무에 따른 이혼율 분석 -------- ##
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(religion, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
religion_marriage
religion_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
count(religion, group_marriage) %>%
group_by(religion) %>%
mutate(pct = round(n/sum(n)*100, 1))
religion_marriage
# 이혼율 표 만들기
divorce <- religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(religion, pct)
divorce
ggplot(data = divorce, aes(x = religion, y = pct)) + geom_col()
## ---- 연령대 및 종교 유무에 따른 이혼율 분석 ---- ##
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
group_by(age10, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_marriage
ageg_marriage <- welfare %>%
filter(!is.na(group_marriage)) %>%
count(age10, group_marriage) %>%
group_by(age10) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 초년 제외, 이혼 추출
ageg_divorce <- ageg_marriage %>%
filter(!(age10 %in% c(20, 30)) & group_marriage == "divorce") %>%
select(age10, pct)
ageg_divorce
# 그래프 만들기
ggplot(data = ageg_divorce, aes(x = age10, y = pct)) + geom_col()
## ------ 연령 대 및 종교 유무에 따른 이혼율 표 --------- ##
# 연령대, 종교유무, 결혼상태별 비율표 만들기
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
group_by(ageg, religion, group_marriage) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 1))
ageg_religion_marriage
ageg_religion_marriage <- welfare %>%
filter(!is.na(group_marriage) & ageg != "young") %>%
count(ageg, religion, group_marriage) %>%
group_by(ageg, religion) %>%
mutate(pct = round(n/sum(n)*100, 1))
# 연령대 및 종교 유무별 이혼율 표 만들기
df_divorce <- ageg_religion_marriage %>%
filter(group_marriage == "divorce") %>%
select(ageg, religion, pct)
df_divorce
relig_mark = factor(ifelse(df_divorce$religion==1,
"yes", "no"))
ggplot(data = df_divorce, aes(x = ageg, y = pct,
fill = relig_mark)) +
geom_col(position = "dodge")
#### 09-9 지역별 연령 비율 ####
## ------- 노년층이 많은 지역은 어디일까? -------- ##
class(welfare$code_region)
table(welfare$code_region)
# 지역 코드 목록 만들기
list_region <- data.frame(code_region = c(1:7),
region = c("서울",
"수도권(인천/경기)",
"부산/경남/울산",
"대구/경북",
"대전/충남",
"강원/충북",
"광주/전남/전북/제주도"))
list_region
# 지역명 변수 추가
welfare <- left_join(welfare, list_region, id = "code_region")
welfare %>%
select(code_region, region) %>%
head
## ------ 지역 별 연령대 비율 분석 ---------- ##
region_ageg <- welfare %>%
group_by(region, ageg) %>%
summarise(n = n()) %>%
mutate(tot_group = sum(n)) %>%
mutate(pct = round(n/tot_group*100, 2))
head(region_ageg)
region_ageg <- welfare %>%
count(region, ageg) %>%
group_by(region) %>%
mutate(pct = round(n/sum(n)*100, 2))
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip()
## -------------- 막대 정렬하기 : 노년층 비율 높은 순 ------------------ ##
# 노년층 비율 내림차순 정렬
list_order_old <- region_ageg %>%
filter(ageg == "old") %>%
arrange(pct)
list_order_old
# 지역명 순서 변수 만들기
order <- list_order_old$region
order
ggplot(data = region_ageg, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
# 다르게 나타내면:
ggplot(data = list_order_old, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
# ------- 연령대 순으로 막대 색깔 나열하기 -----------#
class(region_ageg$ageg)
levels(region_ageg$ageg)
region_ageg$ageg <- factor(region_ageg$ageg)
region_ageg$ageg <- factor(region_ageg$ageg,
level = c("old", "middle", "young"))
str(region_ageg$ageg)
levels(region_ageg$ageg)
table(region_ageg$ageg)
ggplot(data = list_order_old, aes(x = region, y = pct, fill = ageg)) +
geom_col() +
coord_flip() +
scale_x_discrete(limits = order)
# Quiz: 직업종별 지역분포를 작성해보자.
# - 각 직업코드의 첫 digit을 직업종으로 하자.
# - 1 ~ 10까지의 직업종에 대해 지역별 비율을 구한다.
# - 7대 지역을 x축으로 직업종의 비율을 y축으로 bar 챠트를 작성한다.
|
76da3f430707abaf96ef4fc95d3bc8efaa4356b8
|
13d4d0ccddb34e1142c3cdcd11bb18704c9354ab
|
/coding sample.R
|
5fdc8173f6933dd44a7f43975ae99ecdf7fbdb84
|
[] |
no_license
|
marck404/tsne-example
|
0f1144a1f504ce8a6d88486258b71917300db245
|
f62577174ba960ff9348c81d11d72dde0ca9e189
|
refs/heads/master
| 2022-11-24T12:40:31.576256
| 2020-08-01T23:33:54
| 2020-08-01T23:33:54
| 284,356,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,265
|
r
|
coding sample.R
|
#### Project application of dimensionality reduction and cluster analysis
#### Marcos Torres Vivanco
library(readr) ## read data
library(scatterplot3d) ## 3D graph
library(Rtsne) ## t-sne algorithm
## We load the MNIST-like dataset of 28x28 labeled fashion images
fashion_mnist_test <- read_csv("fashion-mnist_test.csv")
lab <- as.numeric(t(fashion_mnist_test[,1])) ## label of data
data <- fashion_mnist_test[,-1] ## images
## example of one image
v <- rev(as.numeric(data[10,]))
v1 <- t(matrix(v,ncol=28,byrow=T))
par(mar=c(0,0,0,0))
image(v1,col=grey(seq(0,1,length=256)))
## filtering the shoes from the data
shoes <- subset(fashion_mnist_test,label==7)
sh <- shoes[,-1]
## we use T-SNE to find a 2D representation of the shoes images
set.seed(20)
m7 <- as.matrix(sh)
mitsne <- Rtsne(m7, pca=TRUE, perplexity=40,theta=0.0,check_duplicates = FALSE)
plot(mitsne$Y,main="T-sne fashion MNIST shoes")
## we find clusters in the cloud of points
m <- as.data.frame(mitsne$Y)
clus <- kmeans(m,3)
plot(m,col=clus$cluster)
nube <- data.frame(clus$cluster,m)
shclust <- data.frame(clus$cluster,sh)
## We obtain samples from each cluster
m1 <- which(shclust$clus.cluster==1)
s1 <- sample(m1,6)
m2 <- which(shclust$clus.cluster==2)
s2 <- sample(m2,6)
m3 <- which(shclust$clus.cluster==3)
s3 <- sample(m3,6)
## the data we choose in each cluster
plot(m,col=clus$cluster)
points(m[s1,],pch=20,lwd=20,col=1)
points(m[s2,],pch=20,lwd=20,col=2)
points(m[s3,],pch=20,lwd=20,col=3)
## images of the samples from each cluster
par(mfrow=c(2,3))
for (i in s1) {
v <- rev(as.numeric(sh[i,]))
v1 <- matrix(v,ncol=28)
par(mar=c(0,0,0,0))
image(v1,col=grey(seq(0,1,length=256)))
}
par(mfrow=c(2,3))
for (i in s2) {
v <- rev(as.numeric(sh[i,]))
v1 <- matrix(v,ncol=28)
par(mar=c(0,0,0,0))
image(v1,col=grey(seq(0,1,length=256)))
}
par(mfrow=c(2,3))
for (i in s3) {
v <- rev(as.numeric(sh[i,]))
v1 <- matrix(v,ncol=28)
par(mar=c(0,0,0,0))
image(v1,col=grey(seq(0,1,length=256)))
}
## we can notice that TSNE identify particularities from each cluster
## for example the second cluster has boot like shoes
|
3f5d27c8cda45fb8c974a282efd26b727c2ddb4d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TDA/examples/clusterTree.Rd.R
|
d4f9ab6d940200574cd52581bf3730e36f0c0853
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,121
|
r
|
clusterTree.Rd.R
|
library(TDA)
### Name: clusterTree
### Title: Density clustering: the cluster tree
### Aliases: clusterTree print.clusterTree
### Keywords: nonparametric
### ** Examples
## Generate data: 3 clusters
n <- 1200 #sample size
Neach <- floor(n / 4)
X1 <- cbind(rnorm(Neach, 1, .8), rnorm(Neach, 5, 0.8))
X2 <- cbind(rnorm(Neach, 3.5, .8), rnorm(Neach, 5, 0.8))
X3 <- cbind(rnorm(Neach, 6, 1), rnorm(Neach, 1, 1))
X <- rbind(X1, X2, X3)
k <- 100 #parameter of knn
## Density clustering using knn and kde
Tree <- clusterTree(X, k, density = "knn")
TreeKDE <- clusterTree(X, k, h = 0.3, density = "kde")
par(mfrow = c(2, 3))
plot(X, pch = 19, cex = 0.6)
# plot lambda trees
plot(Tree, type = "lambda", main = "lambda Tree (knn)")
plot(TreeKDE, type = "lambda", main = "lambda Tree (kde)")
# plot clusters
plot(X, pch = 19, cex = 0.6, main = "cluster labels")
for (i in Tree[["id"]]){
points(matrix(X[Tree[["DataPoints"]][[i]],],ncol = 2), col = i, pch = 19,
cex = 0.6)
}
#plot kappa trees
plot(Tree, type = "kappa", main = "kappa Tree (knn)")
plot(TreeKDE, type = "kappa", main = "kappa Tree (kde)")
|
253cd3161e4f0599d657d09627fffd887e367a2d
|
8e98acb706449542a9e4e40038da103f8c2704b6
|
/elicitation/lib/partition.points.R
|
8952ac65b945da4e11ef8b02ab7ebe441629ff3b
|
[] |
no_license
|
tommite/pubs-code
|
f4e21dcafd999f8d7b995cefdffb3786c49b247e
|
b2b33e97adbef68787122519384000c55f956811
|
refs/heads/master
| 2016-09-05T09:01:44.793555
| 2014-05-09T12:08:03
| 2014-05-09T12:08:03
| 5,935,181
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
r
|
partition.points.R
|
if (!is.loaded("partitionPointsForR")) dyn.load("lib/elicitation.so")
partition.points <- function(points, x0, normal) {
stopifnot(is.matrix(points))
N <- nrow(points)
n <- ncol(points)
stopifnot(length(x0) == n)
stopifnot(length(normal) == n)
.C("partitionPointsForR",
as.integer(N),
as.integer(n),
points,
x0,
normal,
partition=array(0.0, dim=N),
NAOK=FALSE, DUP=FALSE, PACKAGE="elicitation"
)$partition > 0
}
|
073ef3c0155d6d7b34af26fafe78e592fb4d7b2d
|
6527ae2a45fd24d0f76757cb93ffef178dd78d06
|
/R/adaptiveHM.main.swHM.R
|
99cff4a7fd57cd0a008dfcb47615c965122cddaf
|
[] |
no_license
|
benliemory/adaptiveHM
|
9ef1413265aaa5fb36a734b35a61a81778d2e899
|
06cf79557d6897633daa42e4d8874cff107346e4
|
refs/heads/master
| 2020-02-26T17:42:34.461583
| 2017-02-05T03:20:54
| 2017-02-05T03:20:54
| 56,416,803
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,485
|
r
|
adaptiveHM.main.swHM.R
|
adaptiveHM.main.swHM <-
function(Control,Treatment, IPBT.prior=FALSE,winSize = 50,
history=NA,IPBT.id=NA)
{
if ( (IPBT.prior==FALSE & all(is.na(history)==1) ) |
(IPBT.prior==TRUE & all(is.na(IPBT.id)==1) ) )
stop("Historical information is missing!\nPlease provide historical data or use IPBT prior!")
if(IPBT.prior==FALSE)
{
hist_var = apply(history,1,var)
}
if(IPBT.prior==TRUE)
{
data(IPBT3digits)
data(SampleSize)
hist_var = IPBT3digits[,IPBT.id]^2
}
mean_treat=rowMeans(Treatment)
mean_control=rowMeans(Control)
mu=mean_treat-mean_control
n = dim(Control)[2]
var=numeric(length = nrow(Control))
row.names(Control)=1:nrow(Control)
#geneNames = row.names(Control)
geneNames = names(hist_var)
s2est=apply(Control,1,var)
gene_num=length(hist_var)
order_var=order(hist_var)
ids=order_var[1:winSize]
ix = order_var[1:(2*winSize+1)]
var[ids]=bayesHierVar.swHM(Control[ix,],s2est[ix])[1:winSize]
var[order_var[(winSize+1):(gene_num-winSize)]] = sapply((winSize+1):(gene_num-winSize),
function(x){ ix = order_var[(x-winSize):(x+winSize)]
bayesHierVar.swHM(Control[ix,],s2est[ix])[(winSize+1)]})
ids=order_var[(gene_num-winSize+1):gene_num]
ix = order_var[(gene_num-2*winSize):gene_num]
var[ids]= bayesHierVar.swHM(Control[ix,],s2est[ix])[(winSize+2):(2*winSize+1)]
Adjust_t = mu/sqrt(var)*sqrt(2/n)
Pvalue_appro = 2*pt(abs(Adjust_t) ,df = 2*n -2, lower.tail = FALSE)
FDR = p.adjust(Pvalue_appro, method = "BH" )
Output = data.frame(Probe_id = geneNames,
Adjust_t = Adjust_t,
fold_change = rowMeans(Control) - rowMeans(Treatment),
P_value_appro = Pvalue_appro,
False_Discovery_Rate = FDR
)
Output = Output[order(abs(Output$Adjust_t),decreasing = T),]
}
|
0af82e07b0bad273b8c556c22906384aaa9cd904
|
c26390b0aaf512666dfae5003ebac48a2bad0e0a
|
/tests/testthat.R
|
a217cf4239cc096d0b110ec64406fd917b182566
|
[] |
no_license
|
cran/OrthoPanels
|
25edd572167c9803cbc4e7ffbc48a56aeb5c09da
|
e97143db293bdcc810a5d1daf5ad1c309e1d3ddf
|
refs/heads/master
| 2022-06-30T19:49:52.617343
| 2022-06-09T04:20:01
| 2022-06-09T04:20:01
| 48,085,305
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 70
|
r
|
testthat.R
|
library(testthat)
library(OrthoPanels)
test_check("OrthoPanels")
|
1551bd7f36c419e6ad0b313e8ae66210f3825e0f
|
5c1d82472a00e69eee7f730fc16af5868814bb0a
|
/Twitter.code.R
|
c2b046795dce1364ed493467a4b4870c5f9c5841
|
[] |
no_license
|
rahulxc/TwitterDataExtraction
|
157bb087fee540269a39ad00f8640e4583b9add1
|
4f763b01c417a430f6b23290e5eb0e32ef095334
|
refs/heads/master
| 2021-01-19T16:11:27.899341
| 2016-02-28T20:40:17
| 2016-02-28T20:40:17
| 52,742,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,136
|
r
|
Twitter.code.R
|
install.packages('streamR')
install.packages('ROAuth')
install.packages('RCurl')
install.packages("twitteR")
library(ROAuth)
library(streamR)
library(RCurl)
library(twitteR)
#create your OAuth credential
credential <- OAuthFactory$new(consumerKey='2eqBXiNahfEsBNK9Hgyppdy4O',
consumerSecret='XA3xnJUkg4gFQA6Uiq9iTJwRMFddrKCa3fxuqVF5ONJNCFWdqf',
requestURL='https://api.twitter.com/oauth/request_token',
accessURL='https://api.twitter.com/oauth/access_token',
authURL='https://api.twitter.com/oauth/authorize')
#authentication process
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
download.file(url="http://curl.haxx.se/ca/cacert.pem", destfile="cacert.pem")
credential$handshake(cainfo="cacert.pem")
#function to actually scrape Twitter
filterStream( file.name="tweets_test.json",
track="lol", tweets=1000, oauth=credential, timeout=10, lang='en' )
tweet_df <- parseTweets(tweets='tweets_test.json')
summary(tweet_df)
#using the Twitter dataframe
tweet_df$created_at
tweet_df$text
plot(tweet_df$friends_count, tweet_df$followers_count) #plots scatterplot
cor(tweet_df$friends_count, tweet_df$followers_count) #returns the correlation coefficient
api_key <- "2eqBXiNahfEsBNK9Hgyppdy4O" # From dev.twitter.com
api_secret <- " XA3xnJUkg4gFQA6Uiq9iTJwRMFddrKCa3fxuqVF5ONJNCFWdqf" # From dev.twitter.com
token <- " 26043650-gKRGypzAJbx5mq4O5bxEzC9VDDxnBOzaAvqLF5FQJ" # From dev.twitter.com
token_secret <- "FoSGi6Ctf6TIIWfemRPIQpcMzGHxuJxL6BSNDBsuZuFFY" # From dev.twitter.com
# Create Twitter Connection
setup_twitter_oauth(api_key, api_secret, token, token_secret)
install.packages("base64enc")
check_twitter_oauth()
# Run Twitter Search. Format is searchTwitter("Search Terms", n=100, lang="en", geocode="lat,lng", also accepts since and until).
tweets <- searchTwitter("Obamacare OR ACA OR 'Affordable Care Act' OR #ACA", n=100, lang="en", since="2014-08-20")
|
9cd952119b910974d4d187827a1636aefdb1f2cf
|
4177b2bf0ec555c67d8da4dfa6f42cacd508fe92
|
/man/plot_trips_per_person.Rd
|
236c76d0d2ba19621d6bb1970b3805e883dcbef9
|
[] |
no_license
|
transportfoundry/tfplotr
|
8827cb9a6e84b5d8c44f3ed9d6651f0a2b9e6722
|
1f0dfc4119899a097d50918635af5bde797fec38
|
refs/heads/master
| 2021-03-24T12:36:58.926686
| 2017-05-31T16:53:11
| 2017-05-31T16:53:11
| 66,585,106
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 377
|
rd
|
plot_trips_per_person.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/histogram.R
\name{plot_trips_per_person}
\alias{plot_trips_per_person}
\title{Plot histogram of trips per person.}
\usage{
plot_trips_per_person(tbl)
}
\arguments{
\item{tbl}{tbl of data with at least the following columns: .}
}
\value{
A plot.
}
\description{
Plot histogram of trips per person.
}
|
3b98fd1a23e34ac46221a1cf77aec0816a075916
|
12a9bea8cfff9e5dcd44651102a5e0adf477164a
|
/tests/vadim.R
|
9b9ee481e5def1dc29f8de6da2499e77db5ca066
|
[] |
no_license
|
duncantl/RLLVMCompile
|
2b98a04f1f7e71f973a281b40457f5730e38f284
|
7fad5bd394a6f74ace0f6053a5d08e4f15cf3a1f
|
refs/heads/master
| 2021-01-19T01:42:02.316459
| 2017-03-07T00:49:31
| 2017-03-07T00:49:31
| 3,894,344
| 32
| 3
| null | 2015-03-03T13:27:54
| 2012-04-01T18:04:28
|
R
|
UTF-8
|
R
| false
| false
| 589
|
r
|
vadim.R
|
library(RLLVMCompile)
f1 <- function(x, n) { for (i in 2:n) x[i] = x[i-1] } # 1L
n = 1e6; iA = seq(2,n); x = double(n);
f1 <- function(x, n) {
for (i in 2:n)
x[i] = x[i-1] # 1L
}
f1c = compileFunction(f1, VoidType, list(DoublePtrType, Int32Type))
ee = ExecutionEngine(f1c)
.llvm(f1c, x, n, .ee = ee)
if(FALSE) {
tm.ll = system.time(replicate(10, .llvm(f1c, x, n, .ee = ee)))
library(compiler)
f1a <- function(x, iA) for (i in iA) x[i] = x[i-1]
f1cmp = cmpfun(f1a)
tm.bc = system.time(replicate(10, f1cmp(x, iA)) )
tm.r = system.time(f1a(x, iA))
}
|
aa7600b0afc06a13e9a9568ad00b5f2035b7e3cc
|
a588dd1a34555dd71c898c82fbc7016dcc9cbdb3
|
/LearningCurve/R/fetchAllDataFromServer.R
|
a1bc4d731a8f441bf1e8a00e9a98913ac8369b70
|
[] |
no_license
|
NEONKID/StudyProtocolSandbox
|
5e9b0d66d88a610a3c5cacb6809c900a36bc35c3
|
c26bd337da32c6eca3e5179c78ac5c8f91675c0f
|
refs/heads/master
| 2020-03-23T14:02:11.887983
| 2018-10-19T05:33:13
| 2018-10-19T05:33:13
| 141,651,747
| 0
| 1
| null | 2018-07-20T02:10:06
| 2018-07-20T02:10:06
| null |
UTF-8
|
R
| false
| false
| 7,749
|
r
|
fetchAllDataFromServer.R
|
# @file fetchAllDataFromServer.R
#
# Copyright 2016 Observational Health Data Sciences and Informatics
#
# This file is part of LargeScalePrediction package
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Creates the cohorts needed for the abalysis and the gets the patient level prediction data from the server
#' @description
#' This function creates the 'at risk' and 'outcome' cohorts using files saved in the package to the
#' workDatabaseSchema database in the studyCohortTable table and then extracts the plpData
#' using these cohorts.
#'
#' @details
#' This function creates the 'at risk' and 'outcome' cohorts using files saved in the package to the
#' workDatabaseSchema database in the studyCohortTable table and then extracts the plpData
#' using these cohorts.
#'
#' @param connectionDetails An R object of type\cr\code{connectionDetails} created using the
#' function \code{createConnectionDetails} in the
#' \code{DatabaseConnector} package.
#' @param cdmDatabaseSchema The name of the database schema that contains the OMOP CDM
#' instance. Requires read permissions to this database. On SQL
#' Server, this should specifiy both the database and the schema,
#' so for example 'cdm_instance.dbo'.
#' @param oracleTempSchema For Oracle only: the name of the database schema where you want
#' all temporary tables to be managed. Requires create/insert
#' permissions to this database.
#' @param workDatabaseSchema The name of the database schema that is the location where the
#' cohort data used to define the study cohorts is available
#' @param studyCohortTable The tablename that contains the study cohorts.
#' @param workFolder The directory where the results will be saved to
#'
#' @return
#' Returns TRUE when finished
#' @export
fetchAllDataFromServer <- function(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
workDatabaseSchema = workDatabaseSchema,
studyCohortTable = studyCohortTable,
workFolder = workFolder,
verbosity=INFO){
#checking inputs:
#TODO
flog.seperator()
flog.info('Starting data extraction')
flog.seperator()
# create the cohort table if it doest exist
flog.info('Connecting to database')
conn <- ftry(DatabaseConnector::connect(connectionDetails),
error = stop, finally = flog.info('Connected')
)
flog.info('Checking work cohort table exists')
exists <- studyCohortTable%in%DatabaseConnector::getTableNames(conn, workDatabaseSchema)
if(!exists){
flog.info('Creating work cohort table')
sql <- "create table @target_database_schema.@target_cohort_table(cohort_definition_id bigint, subject_id bigint, cohort_start_date datetime, cohort_end_date datetime)"
sql <- SqlRender::translateSql(sql, targetDialect = connectionDetails$dbms)$sql
sql <- SqlRender::renderSql(sql,
target_database_schema=workDatabaseSchema,
target_cohort_table=studyCohortTable)$sql
ftry(DatabaseConnector::executeSql(conn,sql),
error = stop, finally = flog.info('Cohort table created'))
}
# insert the at risk cohort:
flog.info('Inserting risk cohort into cohort table')
target_cohort_id <- 109
sql <- SqlRender::loadRenderTranslateSql('t2dm_narrow.sql',
"LearningCurve",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable,
target_cohort_id = target_cohort_id)
DatabaseConnector::executeSql(conn, sql, progressBar = TRUE, reportOverallTime = FALSE)
# insert all the outcome cohorts
outcomes <- system.file("settings", "OutcomesOfInterest.csv", package = "LearningCurve")
outcomes <- read.csv(outcomes)
nrOutcomes <- nrow(outcomes)
flog.info(paste0('Inserting ',nrOutcomes,' outcomes into cohort table'))
for(i in 1:nrOutcomes){
flog.info(paste0('Inserting ', outcomes$name[i],' (',i,'/',nrOutcomes,')'))
sql <- SqlRender::loadRenderTranslateSql(paste0(outcomes[i,2],'.sql'),
"LearningCurve",
dbms = connectionDetails$dbms,
oracleTempSchema = oracleTempSchema,
cdm_database_schema = cdmDatabaseSchema,
target_database_schema = workDatabaseSchema,
target_cohort_table = studyCohortTable,
target_cohort_id = outcomes[i,1])
DatabaseConnector::executeSql(conn, sql, progressBar = TRUE, reportOverallTime = FALSE)
}
# load the covariateSettings
pathToSettings <- system.file("settings", "covariateSettings.R", package = "LearningCurve")
source(pathToSettings)
# get the plpData
flog.info('Extracting plpData')
plpData <- PatientLevelPrediction::getPlpData(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
cohortId = target_cohort_id, #need to create this
outcomeIds = outcomes[,1],
cohortDatabaseSchema = workDatabaseSchema,
cohortTable = studyCohortTable,
outcomeDatabaseSchema = workDatabaseSchema,
outcomeTable = studyCohortTable,
cdmVersion = "5",
washoutPeriod = 365,
covariateSettings = covSettings)
# save the plpData
flog.info('Saving plpData')
if(!dir.exists(file.path(workFolder,'data'))){dir.create(file.path(workFolder,'data'))}
PatientLevelPrediction::savePlpData(plpData, file=file.path(workFolder,'data'))
flog.info('Done.')
return(TRUE)
}
|
fdfb8c179a210c8a9fca8046ec571e9cb4f4666b
|
981cbaf799599f6d23bf79cdeb4ef72a8f3eb8a5
|
/script/1_read.R
|
e73d74af3241fa58a5a8eea94818e64b01ab1839
|
[] |
no_license
|
noahhhhhh/Santander_Customer_Satisfaction
|
51249cdc53ef6fcf545cd0e069e3b5e3458857af
|
2cce8e82ab12659445818f42316cdd8e7ae9d8b6
|
refs/heads/master
| 2021-01-17T17:14:28.761063
| 2016-05-10T02:38:32
| 2016-05-10T02:38:32
| 54,017,892
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,981
|
r
|
1_read.R
|
setwd("/Volumes/Data Science/Google Drive/data_science_competition/kaggle/Santander_Customer_Satisfaction/")
m(list = ls()); gc();
require(data.table)
require(bit64)
#######################################################################################
## 1.0 read ###########################################################################
#######################################################################################
dt.train.raw <- fread("../data/Santander_Customer_Satisfaction/train.csv", integer64 = "numeric")
dt.test.raw <- fread("../data/Santander_Customer_Satisfaction/test.csv", integer64 = "numeric")
dim(dt.train.raw); dim(dt.test.raw)
# [1] 76020 371
# [1] 75818 370
## check the balance of target of dt.train.raw
table(dt.train.raw$TARGET)
# 0 1
# 73012 3008 : more than 20:1
#######################################################################################
## 2.0 combine ########################################################################
#######################################################################################
## set -1 to target to dt.test.raw
dt.test.raw[, TARGET := -1]
dim(dt.train.raw); dim(dt.test.raw)
# [1] 76020 371
# [1] 75818 371
## rearrange the column names of dt.test.raw
dt.test.raw <- dt.test.raw[, names(dt.train.raw), with = F]
## check if the column names are identical
identical(names(dt.train.raw), names(dt.test.raw))
# [1] TRUE
## combine
dt.all <- rbind(dt.train.raw, dt.test.raw)
dim(dt.all)
# [1] 151838 371
## check the number of dt.test.raw
dim(dt.all[dt.all$TARGET == -1])
# [1] 75818 371
dim(dt.test.raw)
# [1] 75818 371
#######################################################################################
## save ###############################################################################
#######################################################################################
save(dt.all, file = "../data/Santander_Customer_Satisfaction/RData/dt_all.RData")
|
e3a6d77157b11c454696b9fa6072f36dbd5a7110
|
cefe97a2c584fd093cae95e9e34d3e2950155360
|
/k_medians.R
|
272c48c46b29bd06a00040124580e1930b4a8e57
|
[] |
no_license
|
Vaspann/R-Guides-Clustering-
|
d8942fcdc009d6ae4741afbc09c1b7425d34cf2f
|
c6d8815cf4bc297aab124c15c0aa652b75707307
|
refs/heads/main
| 2023-08-16T06:42:55.094484
| 2021-10-17T11:35:06
| 2021-10-17T11:35:06
| 373,459,308
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,326
|
r
|
k_medians.R
|
#k-medoids computes cluster centroids using medians instead of means
#In practice, if there are no extreme outliers in the dataset then k-means and k-medoids
#will produce similar results.
#Load the packages
library(kohonen)
library(factoextra)
library(tidyverse)
library(cluster)
#For this example we will use the wine dataset built under the kohonen package
#The wine dataset contains the results of a chemical analysis of wines grown in a specific area of Italy.
#A dataset containing 177 rows and thirteen columns; object vintages contains the class labels.
#Load the data
data("wines")
wines <- as.data.frame(wines)
set.seed(123)
#Make sure varibales are continuous
str(wines)
summary(wines)
#plot for visuals and relationships
plot(wines)
#scale if necessary to have a mean of 0 and sd of 1
scaled_wine <- scale(wines)
summary(scaled_wine)
head(scaled_wine)
#to perform k-medoids clustering in R we can use the pam() function,
#which stands for “partitioning around medians”
#how to choose k (the number of clusters)
#1. Number of Clusters vs. the Total Within Sum of Squares
#first, we’ll use the fviz_nbclust() function to create a plot of the
#number of clusters vs. the total within sum of squares:
fviz_nbclust(scaled_wine, pam, method = "wss", linecolor = "red")
#the total within sum of squares will typically always decrease
#as we increase the number of clusters, so when we create this
#type of plot we look for an “elbow” where the sum of squares begins
#to flatten.
#for this plot the elbow occurs at k = 3
#choosing k beyond that will likely result in overfitting
#2. Number of Clusters vs. Gap Statistic
#another way to determine the optimal number of clusters is to use a metric known
#as the gap statistic, which compares the total intra-cluster variation for different
#values of k with their expected values for a distribution with no clustering.
#we can calculate the gap statistic for each number of clusters using clusGap function
Gap_stat <- clusGap(scaled_wine, FUN = pam, K.max = 10, B = 50)
#plot of clusters vs. gap statistic using fviz_gap_stat function
fviz_gap_stat(Gap_stat)
#again this plot confirms that k = 3 is the optimal number of clusters
#with the highest gap statistics
#now we can perform k-medioids clustering with optimal k
set.seed(123)
model_k_medians <- pam(scaled_wine, k = 3)
model_k_medians
#note that the 3 cluster centroids are actual observations in the dataset
#as we are using medians
#we can visualize the clusters on a scatterplot that displays the first two principal
#components on the axes using the fivz_cluster
#plot results of k-medoids model
fviz_cluster(model_k_medians, data = scaled_wine)
#since we already know there are 3 vintages, therefore k = 3
#However if we didn't know k then the best options would be to plot:
##1. Number of Clusters vs. the Total Within Sum of Squares
##2. Number of Clusters vs. Gap Statistic
#and then decide for k value
#compare our results
table(model_k_medians$clustering, vintages)
#there is signinficant overlap between Barolo and Grignolino vintages due to similar measurements
#in addition it is always better to have more data to feed your algorithm as more data points
#will show distinct clusters if there are any and make overlapping less significant
|
9e70a7e9c7fa6f018adb4a5485a243b3a02cd969
|
e19b396f0dc7c8e8b489a02e650f4fbee6606d5e
|
/static/functionality/old_ashtree/ashtree_stage02.R
|
df5d297b3f8eacdf6299c35b3d5fce62391fdf05
|
[] |
no_license
|
djnavarro/robust-tools
|
0886db364f7048906d0513091e406b5d369ea3a1
|
e3c9f445bd426f687022f928879adfcda24af958
|
refs/heads/master
| 2020-12-21T00:01:27.017012
| 2020-05-30T08:03:12
| 2020-05-30T08:03:12
| 236,247,979
| 24
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
ashtree_stage02.R
|
library(tidyverse)
radians <- function(degree) {
2 * pi * degree / 360
}
# STAGE 2: talk about the structure of a function, the idea of internal
# variables and the return values for a function
grow_sapling <- function() {
sapling <- tibble(
old_x = 0, # sapling grows from the origin
old_y = 0, # sapling grows from the origin
new_x = 0, # sapling doesn't grow horizontally
new_y = 1, # sapling does grow vertically
angle = 90, # angle from horizontal is 90 degrees
scale = 1, # length of the sapling is 1
)
return(sapling)
}
|
67f1c98a4c468a56bd2be210346919660bab8eeb
|
67803b1f0b20b8b63d0117a3b85f6c7832c9b7fb
|
/R/Main.R
|
0c7ceca63fc3c2e6e1d4ec1a8d6eed4963ec454c
|
[
"Apache-2.0"
] |
permissive
|
vperaEMC/RanitidineStudy
|
48fb9e0941b95e2f5413534e3f68ec7480708d86
|
f55fd2620bbdadfe169edac12a3e7b64ec04c5b3
|
refs/heads/master
| 2023-02-16T18:08:59.683403
| 2021-01-16T09:58:25
| 2021-01-16T09:58:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,837
|
r
|
Main.R
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of DrugUtilization
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Execute the Study
#'
#' @details
#' This function executes the DrugUtilization Study. The \code{createCohorts}, \code{runAnalyses},
#' arguments are intended to be used to run parts of the full study at a time, but none of the parts
#' are considered to be optional.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in
#' the DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to
#' have write priviliges in this schema. Note that for SQL Server, this
#' should include both the database and schema name, for example
#' 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database
#' schema. This table will hold the exposure and outcome cohorts used in
#' this study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward
#' slashes (/). Do not use a folder on a network drive since this greatly
#' impacts performance.
#' @param databaseId A short string for identifying the database (e.g. 'Synpuf').
#' @param databaseName The full name of the database (e.g. 'Medicare Claims Synthetic Public
#' Use Files (SynPUFs)').
#' @param databaseDescription A short description (several sentences) of the database.
#' @param createCohorts Create the cohortTable table with the exposure and outcome cohorts?
#' @param runAnalyses Perform the cohort method analyses?
#' @param createTables Generate all the result tables?
#' @param maxCores How many parallel cores should be used? If more cores are made
#' available this can speed up the analyses.
#' @param minCellCount The minimum number of subjects contributing to a count before it can
#' be included in packaged results.
#'
#' @examples
#' \dontrun{
#' connectionDetails <- createConnectionDetails(dbms = "postgresql",
#' user = "joe",
#' password = "secret",
#' server = "myserver")
#'
#' execute(connectionDetails,
#' cdmDatabaseSchema = "cdm_data",
#' cohortDatabaseSchema = "study_results",
#' cohortTable = "cohort",
#' oracleTempSchema = NULL,
#' outputFolder = "c:/temp/study_results",
#' maxCores = 4)
#' }
#'
#' @export
execute <- function(connection = NULL,
connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema = cdmDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema = cohortDatabaseSchema,
outputFolder,
databaseId = "Unknown",
databaseName = "Unknown",
createCohorts = TRUE,
runAnalyses = TRUE,
createTables = TRUE,
exportResults = TRUE,
addIndex = FALSE,
selfManageTempTables = TRUE,
vocabularyDatabaseSchema = cdmDatabaseSchema,
cdmDrugExposureSchema = cdmDatabaseSchema,
drugExposureTable = "drug_exposure",
cdmObservationPeriodSchema = cdmDatabaseSchema,
observationPeriodTable = "observation_period",
cdmPersonSchema = cdmDatabaseSchema,
personTable = "person",
minCellCount = 5,
debug = FALSE,
debugSqlFile = "") {
if (!file.exists(outputFolder))
dir.create(outputFolder, recursive = TRUE)
if (!is.null(getOption("fftempdir")) && !file.exists(getOption("fftempdir"))) {
warning("fftempdir '", getOption("fftempdir"), "' not found. Attempting to create folder")
dir.create(getOption("fftempdir"), recursive = TRUE)
}
if (is.null(connection)) {
connection <- DatabaseConnector::connect(connectionDetails)
on.exit(DatabaseConnector::disconnect(connection))
}
packageName = "DrugUtilization"
# Load created cohorts
pathToCsv <- system.file("settings", "CohortsToCreate.csv", package = packageName)
cohorts <- readr::read_csv(pathToCsv, col_types = readr::cols())
cohorts$atlasId <- NULL
cohortsOfInterest <- cohorts[cohorts$cohortId < 10, ]
if ("atlasName" %in% colnames(cohorts)) {
cohorts <- dplyr::rename(cohorts, cohortName = "name", cohortFullName = "atlasName")
} else {
cohorts <- dplyr::rename(cohorts, cohortName = "name", cohortFullName = "fullName")
}
writeToCsv(cohorts, file.path(outputFolder, "cohort.csv"))
getSql <- function(name) {
pathToSql <- system.file("sql", "sql_server", paste0(name, ".sql"), package = packageName)
sql <- readChar(pathToSql, file.info(pathToSql)$size)
return(sql)
}
cohorts$sql <- sapply(cohorts$cohortName, getSql)
getJson <- function(name) {
pathToJson <- system.file("cohorts", paste0(name, ".json"), package = packageName)
json <- readChar(pathToJson, file.info(pathToJson)$size)
return(json)
}
cohorts$json <- sapply(cohorts$cohortName, getJson)
ParallelLogger::addDefaultFileLogger(file.path(outputFolder, "log.txt"))
on.exit(ParallelLogger::unregisterLogger("DEFAULT"))
if (createCohorts) {
ParallelLogger::logInfo("Creating cohorts")
createCohorts(connection = connection,
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
addIndex = addIndex,
oracleTempSchema = oracleTempSchema,
outputFolder = outputFolder)
}
if (runAnalyses) {
ParallelLogger::logInfo("Running Drug Utilization analyses")
dusAnalysis(connection = connection,
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
oracleTempSchema = oracleTempSchema,
debug = debug,
outputFolder = outputFolder,
debugSqlFile = debugSqlFile,
databaseId = databaseId,
databaseName = databaseName,
addIndex = addIndex,
selfManageTempTables = selfManageTempTables,
vocabularyDatabaseSchema,
cdmDrugExposureSchema,
drugExposureTable,
cdmObservationPeriodSchema,
observationPeriodTable,
cdmPersonSchema,
personTable)
}
if (createTables) {
ParallelLogger::logInfo("Creating All Tables")
createAllTables(connection = connection,
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
oracleTempSchema = oracleTempSchema,
debug = debug,
outputFolder = outputFolder,
debugSqlFile = debugSqlFile,
minCellCount = minCellCount,
databaseId,
databaseName)
ParallelLogger::logInfo("Gathering prevalence proportion")
getProportion <- function(row, proportionType) {
data <- getProportionByType(connection = connection,
connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
cohortDatabaseSchema = cohortDatabaseSchema,
proportionType = proportionType,
ingredient = row$cohortId)
if (nrow(data) > 0) {
data$cohortId <- row$cohortId
}
return(data)
}
prevalenceData <- lapply(split(cohortsOfInterest, cohortsOfInterest$cohortId), getProportion, proportionType = "prevalence")
prevalenceData <- do.call(rbind, prevalenceData)
if (nrow(prevalenceData) > 0) {
prevalenceData$databaseId <- databaseId
prevalenceData <- enforceMinCellValue(prevalenceData, "cohortCount", minCellCount)
prevalenceData <- enforceMinCellValue(prevalenceData, "proportion", minCellCount/prevalenceData$numPersons)
}
writeToCsv(prevalenceData, file.path(outputFolder, "prevalence_proportion.csv"))
# Incidence
ParallelLogger::logInfo("Gathering incidence proportion")
incidenceData <- lapply(split(cohortsOfInterest, cohortsOfInterest$cohortId), getProportion, proportionType = "incidence")
incidenceData <- do.call(rbind, incidenceData)
if (nrow(incidenceData) > 0) {
incidenceData$databaseId <- databaseId
incidenceData <- enforceMinCellValue(incidenceData, "cohortCount", minCellCount)
incidenceData <- enforceMinCellValue(incidenceData, "proportion", minCellCount/incidenceData$numPersons)
}
writeToCsv(incidenceData, file.path(outputFolder, "incidence_proportion.csv"))
}
if (exportResults) {
exportResults(outputFolder,databaseId)
}
invisible(NULL)
}
|
e02f65a3796642690d195f2fd0840609d7b572e1
|
75ad9a7a0e9ab4f831151cb1902ad4614616995a
|
/source/DataCreators/KeeferDataExtender.R
|
3478e249a56d299283d104e58030adfe8146fe72
|
[] |
no_license
|
christophergandrud/CrisisDataIssues
|
e1eb49f187c0478c5cc5304085b751c06f2b1155
|
98d8b9e15d4987b4fb85ef0195e40ad088a48efd
|
refs/heads/master
| 2021-01-25T07:28:13.323633
| 2014-06-26T13:29:16
| 2014-06-26T13:29:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,396
|
r
|
KeeferDataExtender.R
|
################
# Keefer (2007) data extender
# Christopher Gandrud
# 14 March 2014
###############
# Load packages
library(DataCombine)
library(countrycode)
library(psData)
library(forecast)
library(plyr)
library(WDI)
library(foreign)
library(repmis)
# Fuction for keefer rolling 3 year averages
rollmean3r <- function(x){
x <- shift(x, -2, reminder = FALSE)
ma(x, 3, centre = FALSE)
}
rollmean3f <- function(x){
x <- shift(x, 2, reminder = FALSE)
ma(x, 3, centre = FALSE)
}
rollmean33 <- function(x){
xR <- rollmean3r(x)
xF <- rollmean3f(x)
Comb <- (xR + xF)/2
}
#### Fiscal transfers data (both Laeven and Valencia (2012) and Keefer (2007))
Fiscal <- read.csv('/git_repositories/CrisisDataIssues/data/KefferFiscal.csv',
stringsAsFactors = FALSE)
Fiscal <- VarDrop(Fiscal, c('country', 'Notes'))
Fiscal$HonohanCrisisOngoing[is.na(Fiscal$HonohanCrisisOngoing)] <- 0
# Include ongoing crisis variable
Ongoing <- read.csv('/git_repositories/amcData/BaseFiles/LaeVal2012/LVCrisisResponseFull.csv',
stringsAsFactors = FALSE )
Ongoing <- CountryID(Ongoing, countryVar = 'Country')
Ongoing$ongoing <- 0
Ongoing$ongoing[Ongoing$End == 'ongoing'] <- 1
Ongoing <- Ongoing[, c('iso2c', 'Start', 'ongoing')]
names(Ongoing) <- c('iso2c', 'year', 'ongoingLV')
Fiscal <- dMerge(Fiscal, Ongoing, Var = c('iso2c', 'year'), all = TRUE)
Fiscal <- subset(Fiscal, iso2c != '') # Drop Czechoslovakia
# Polity IV data
PolityData <- PolityGet(vars = c('polity2'), OutCountryID = 'iso2c', duplicates = 'drop')
PolityData <- DropNA(PolityData, c('polity2'))
PolityData <- VarDrop(PolityData, 'country')
#### Database of Political Institutions data
dpiVars <- c('eiec', 'checks', 'stabns', 'allhouse')
DpiData <- DpiGet(vars = dpiVars, OutCountryID = 'iso2c', duplicates = 'drop')
DpiData[, dpiVars][DpiData[, dpiVars] == -999] <- NA
# DpiData <- DropNA(DpiData, c('eiec', 'checks'))
DpiData <- DpiData[order(DpiData$country, DpiData$year), ]
# Dichotomize electoral competitiveness
DpiData$DiEiec <- 0
DpiData$DiEiec[DpiData$eiec >= 6] <- 1
# Create Keefer Forward and Backward Lags
DpiData <- ddply(DpiData, .(country), transform, DiEiec33 = rollmean33(DiEiec))
DpiData <- ddply(DpiData, .(country), transform, Checks33 = rollmean33(checks))
# Create Keefer political stability backwards lag
DpiData <- ddply(DpiData, .(country), transform, stabnsLag3 = rollmean3r(stabns))
# Find residuals for lagged check (modified from Keefer)
SubKeefer <- DropNA(DpiData, c('DiEiec33', 'Checks33'))
ResidKeefer <- lm(DiEiec33 ~ Checks33, data = SubKeefer)
SubKeefer$ChecksResiduals33 <- ResidKeefer$residuals
SubKeefer <- SubKeefer[, c('iso2c', 'year', 'ChecksResiduals33')]
# Create straight 3 year moving average lags and lead
## Lags
DpiData <- ddply(DpiData, .(country), transform, DiEiecLag3 = rollmean3r(DiEiec))
DpiData <- ddply(DpiData, .(country), transform, ChecksLag3 = rollmean3r(checks))
DpiData <- ddply(DpiData, .(country), transform, allhouseLag3 = rollmean3r(allhouse))
### Checks residuals 3 year lag
SubLag <- DropNA(DpiData, c('DiEiecLag3', 'allhouseLag3'))
ResidLag <- lm(DiEiecLag3 ~ allhouseLag3, data = SubLag)
SubLag$allhouseResidualsLag3 <- ResidLag$residuals
SubLag <- SubLag[, c('iso2c', 'year', 'allhouseResidualsLag3')]
### Allhouse residuals 3 year lag
SubLagAll <- DropNA(DpiData, c('DiEiecLag3', 'ChecksLag3'))
ResidLag <- lm(DiEiecLag3 ~ ChecksLag3, data = SubLagAll)
SubLagAll$ChecksResidualsLag3 <- ResidLag$residuals
SubLagAll <- SubLagAll[, c('iso2c', 'year', 'ChecksResidualsLag3')]
## Leads
DpiData <- ddply(DpiData, .(country), transform, DiEiecLead3 = rollmean3f(DiEiec))
DpiData <- ddply(DpiData, .(country), transform, ChecksLead3 = rollmean3f(checks))
DpiData <- ddply(DpiData, .(country), transform, stabnsLead3 = rollmean3f(stabns))
DpiData <- ddply(DpiData, .(country), transform, allhouseLead3 = rollmean3f(allhouse))
### Checks residuals 3 year lead
SubLead <- DropNA(DpiData, c('DiEiecLead3', 'ChecksLead3'))
ResidLead <- lm(DiEiecLead3 ~ ChecksLead3, data = SubLead)
SubLead$ChecksResidualsLead3 <- ResidLead$residuals
SubLead <- SubLead[, c('iso2c', 'year', 'ChecksResidualsLead3')]
### Allhouse residuals 3 year lead
SubLeadAll <- DropNA(DpiData, c('DiEiecLead3', 'allhouseLead3'))
ResidLeadAll <- lm(DiEiecLead3 ~ allhouseLead3, data = SubLeadAll)
SubLeadAll$allhouseResidualsLead3 <- ResidLeadAll$residuals
SubLeadAll <- SubLeadAll[, c('iso2c', 'year', 'allhouseResidualsLead3')]
### Create 3 year leads for time periods begining 2 years in the future
DpiData <- slideMA(DpiData, Var = 'DiEiec', GroupVar = 'country', periodBound = 5, offset = 2)
DpiData <- slideMA(DpiData, Var = 'checks', GroupVar = 'country', periodBound = 5, offset = 2)
DpiData <- slideMA(DpiData, Var = 'stabns', GroupVar = 'country', periodBound = 5, offset = 2)
### Checks residuals leads for time periods begining 2 years in the future
SubLead3 <- DropNA(DpiData, c('DiEiecMA5_2', 'checksMA5_2'))
ResidLead3 <- lm(DiEiecMA5_2 ~ checksMA5_2, data = SubLead3)
SubLead3$ChecksResidualsLead5_2 <- ResidLead3$residuals
SubLead3 <- SubLead3[, c('iso2c', 'year', 'ChecksResidualsLead5_2')]
##### Winset and selectorate data ####
Win <- WinsetCreator()
Win <- VarDrop(Win, 'country')
#### IMF program
IMF <- IMF_WBGet(sheets = c('IMF SBA 5', 'IMF EFF 5', 'IMF SAF 5'))
IMF <- DropNA(IMF, 'IMF.SBA.5')
IMF$IMFProgramAny <- 0
IMF$IMFProgramAny[IMF$IMF.SBA.5 == 1 | IMF$IMF.EFF.5 == 1 | IMF$IMF.SAF.5 == 1] <- 1
IMF <- IMF[order(IMF$country, IMF$year), ]
IMF <- SpreadDummy(IMF, Var = 'IMFProgramAny', GroupVar = 'country', spreadBy = -3, NewVar = 'IMFProgramLag3')
IMF <- SpreadDummy(IMF, Var = 'IMFProgramAny', GroupVar = 'country', spreadBy = 3, NewVar = 'IMFProgramLead3')
IMF <- VarDrop(IMF, 'country')
#### AMC ####
AMC <- read.csv(file = '/git_repositories/amcData/MainData/CleanedPartial/AMCFull.csv')
AMC <- AMC[, c('country', 'year', 'AMCAnyCreated')]
AMC <- CountryID(AMC, timeVar = 'year')
AMC <- AMC[order(AMC$iso2c, AMC$year), ]
AMC$AMCAnyCreated <- as.numeric(AMC$AMCAnyCreated)
AMC <- SpreadDummy(data = AMC, Var = 'AMCAnyCreated', GroupVar = 'iso2c', spreadBy = -3, NewVar = 'AMCAnyLag3')
AMC <- SpreadDummy(data = AMC, Var = 'AMCAnyCreated', GroupVar = 'iso2c', spreadBy = 3, NewVar = 'AMCAnyLead3')
AMC <- AMC[, c('iso2c', 'year', 'AMCAnyCreated', 'AMCAnyLag3', 'AMCAnyLead3')]
#### Kuncic institutional quality indicators ####
URL <- "https://docs.google.com/spreadsheet/pub?key=0AtSgiY60tn0_dEtYUVo4TWlFOU01dnRjTE1WZmFTUWc&single=true&gid=0&output=csv"
KunInstQual <- source_data(URL, sep = ",", header = TRUE)
KunInstQual$iso2c <- countrycode(KunInstQual$wbcode,
origin = 'wb', destination = 'iso2c')
KunInstQual <- KunInstQual[, c('iso2c', 'country', 'year', 'economic_abs',
'legal_abs', 'political_abs')]
KunInstQual <- VarDrop(KunInstQual, 'country')
KunInstQual <- DropNA(KunInstQual, 'iso2c')
#### Economic Data from the World Bank Development Indicators
Countries <- unique(DpiData$iso2c)
Wdi <- WDI(country = Countries,
indicator = c('NY.GDP.PCAP.PP.KD', 'NY.GDP.PCAP.KD.ZG', 'BN.CAB.XOKA.GD.ZS', 'BM.GSR.GNFS.CD',
'BX.GSR.GNFS.CD', 'FI.RES.TOTL.DT.ZS', 'FI.RES.TOTL.CD', 'NY.GDP.MKTP.CD', 'BX.KLT.DINV.CD.WD'),
start = 1970, end = 2012)
names(Wdi) <- c('iso2c', 'country', 'year', 'GDPperCapita', 'GDPChange', 'CurrentAccount',
'Imports', 'Exports', 'Reserves', 'TotalReserves', 'TotalGDP', 'FDI')
Wdi <- Wdi[order(Wdi$country, Wdi$year), ]
## Create transformed variables
# Income
Wdi <- ddply(Wdi, .(country), transform, Income33 = rollmean33(GDPperCapita))
Wdi <- slideMA(Wdi, Var = 'GDPperCapita', GroupVar = 'country', periodBound = -3, NewVar = 'IncomeLag3')
Wdi <- slideMA(Wdi, Var = 'GDPperCapita', GroupVar = 'country', periodBound = 3, NewVar = 'IncomeLead3')
# Growth
Wdi <- ddply(Wdi, .(country), transform, Growth33 = rollmean33(GDPChange))
Wdi <- slideMA(Wdi, Var = 'GDPChange', GroupVar = 'country', periodBound = 3, NewVar = 'GrowthLead3')
# Total GDP
Wdi <- slideMA(Wdi, Var = 'TotalGDP', GroupVar = 'country', periodBound = 3, NewVar = 'GDPLead3')
# CurrentAccount 1
Wdi <- slide(Wdi, Var = 'CurrentAccount', GroupVar = 'country', NewVar = 'CurrentAccountLag1')
# CurrentAccount 2
Wdi$CurrentAccountMinus <- Wdi$CurrentAccount - Wdi$CurrentAccountLag1
# ChangeTermsTrade
Wdi$Terms <- Wdi$Exports/Wdi$Imports
Wdi <- PercChange(Wdi, Var = 'Terms', GroupVar = 'country', NewVar = 'TermsChange', type = 'proportion')
# Total reserves
Wdi <- slideMA(Wdi, Var = 'TotalReserves', GroupVar = 'country', periodBound = 3, NewVar = 'TotalReservesLead3')
# FDI
Wdi <- slideMA(Wdi, Var = 'FDI', GroupVar = 'country', periodBound = 3, NewVar = 'FDILead3')
WdiSlim <- Wdi[, c('iso2c', 'year', 'GDPperCapita', 'Income33', 'IncomeLag3', 'IncomeLead3', 'Growth33', 'GrowthLead3',
'CurrentAccountLag1', 'CurrentAccountMinus', 'TotalGDP', 'GDPLead3',
'TermsChange', 'Reserves', 'TotalReserves', 'TotalReservesLead3', 'FDI', 'FDILead3')]
##### Combine data sets
Comb <- dMerge(DpiData, SubKeefer, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, SubLag, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, SubLagAll, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, SubLead, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, SubLead3, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, SubLeadAll, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, PolityData, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, IMF, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, AMC, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, KunInstQual, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, Win, Var = c('iso2c', 'year'), all.x = TRUE)
Comb <- dMerge(Comb, Fiscal, Var = c('iso2c', 'year'), all.y = TRUE)
Comb <- dMerge(Comb, WdiSlim, Var = c('iso2c', 'year'), all.x = TRUE)
Comb$country <- countrycode(Comb$iso2c, origin = 'iso2c', destination = 'country.name')
#### Revision Create errors variable
CombRevis = Comb
CombRevis$Diff_LVH <- (CombRevis$LV2012.Fiscal - CombRevis$Honohan2003.Fiscal)
CombRevis$Diff_LVC <- (CombRevis$LV2012.Fiscal - CombRevis$Caprio1996.Fiscal)
CombRevis$Diff_HC <- (CombRevis$Honohan2003.Fiscal - CombRevis$Caprio1996.Fiscal)
#### Create indicator for whether or not data was revised by Laeven and Valencia (2012)
CombRevis$Revision <- 0
CombRevis$Revision[(abs(CombRevis$Diff_LVH) > 0 & CombRevis$LV2012.Fiscal != CombRevis$Caprio1996.Fiscal)] <- 1
CombRevis$Revision[abs(CombRevis$Diff_LVC) > 0] <- 1
CombRevis <- NaVar(CombRevis, c('LV2012.Fiscal', 'Honohan2003.Fiscal', 'Caprio1996.Fiscal'))
CombRevis$Revision[(CombRevis$Miss_LV2012.Fiscal == 0 & CombRevis$Miss_Honohan2003.Fiscal == 1 &
CombRevis$LV2012.Fiscal != CombRevis$Caprio1996.Fiscal)] <- 1
CombRevis$Revision[(CombRevis$Miss_LV2012.Fiscal == 0 & is.na(CombRevis$Caprio1996.Fiscal) & CombRevis$year <= 1996)] <- 1
CombRevis$Revision[(CombRevis$Miss_LV2012.Fiscal %in% 0 & CombRevis$Miss_Caprio1996.Fiscal %in% 1 &
CombRevis$year <= 1996)] <- 1
# Recode Philipinnes as no change as change was caused by a coding error in Honohan & Klingebiel (2003)
CombRevis$Revision[CombRevis$iso2c %in% 'PH' & CombRevis$year %in% 1983] <- 0
# Merge years assuming that LV (2012) has correct start year
source('/git_repositories/CrisisDataIssues/source/DataCreators/RevisedRevision.R')
# Save to Stata format
write.dta(CombRevis, file = '/git_repositories/CrisisDataIssues/data/KeeferExtended.dta')
# -------------------------------------------------------------- #
##### Create Reinhart and Rogoff (2010) combination (Not Used in How They Spend It Version) #####
## Download RR crisis data
RR <- RRCrisisGet()
rr <- RR
write.csv(rr, file = '/git_repositories/CrisisDataIssues/data/ReinhartRogoffCrisis.csv', row.names = FALSE)
# Keep if independent
rr <- subset(rr, RR_Independence == 1)
# Keep if first year of crisis
rr <- rr[order(rr$country, rr$year),]
rr <- slide(rr, Var = 'RR_BankingCrisis', GroupVar = 'country', slideBy = 1)
# Merge
CombRR <- dMerge(rr, Fiscal, Var = c('iso2c', 'year'), all.x = TRUE)
CombRR <- dMerge(CombRR, PolityData, Var = c('iso2c', 'year'), all.x = TRUE)
write.csv(CombRR, file = '/git_repositories/CrisisDataIssues/data/ReinhartRogoffFiscalPolity.csv',
row.names = FALSE)
#### Reinhart and Rogoff/Laeven and Valencia Start and Stop
# Load LV data
LVFull <- read.csv('/git_repositories/amcData/BaseFiles/LaeVal2012/LVCrisisResponseFull.csv',
stringsAsFactors = FALSE )
# Keep country, start, and stop years
LVSub <- LVFull[, 1:3]
# Replace ongoing with 2011
LVSub$End[LVSub$End == 'ongoing'] <- '2011'
LVNew <- TimeFill(LVSub, GroupVar = 'Country', StartVar = 'Start', EndVar = 'End',
NewVar = 'LV_SystemCrisis', NewTimeVar = 'year')
LVNew$Country <- as.character(LVNew$Country)
LVNew <- CountryID(LVNew, countryVar = 'Country')
LVNew <- LVNew[, c('iso2c', 'year', 'LV_SystemCrisis')]
CombRRLV <- dMerge(rr, LVNew, Var = c('iso2c', 'year'), all = TRUE)
write.csv(CombRRLV, file = '/git_repositories/CrisisDataIssues/data/ReinhartRogoffLVCount.csv',
row.names = FALSE)
|
0e8f994082eeb4ab1c38598786cd0a4a06617109
|
5072c3989bbe9c8cabb9e8ba0fc0bf7506406a31
|
/inst/minimal-osgi/main.R
|
4221c51a1038a3288f6316b2e6418aeec242e154
|
[] |
no_license
|
lawremi/rigb
|
5acb5cef299fe1ad672af0ee1a75d9037712057b
|
56dd814328a2add72e5b952daee85442d1390842
|
refs/heads/master
| 2020-12-25T17:56:22.025641
| 2011-10-17T17:16:20
| 2011-10-17T17:16:20
| 2,149,977
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 252
|
r
|
main.R
|
#!/usr/local/bin/R --vanilla --slave -f
library(debug)
library(rJava)
.jinit(classpath=c('.',
'felix.jar',
'REngine.jar'))
.jengine(TRUE)
## This is NULL, even after starting the bundle in Felix.
print(J("Activator")$getInstance())
|
d05bc87ef14c8f5a3ec8aa4d6db21c8874043729
|
7ac133f9871f201f7a956f3b239d8f0030907c06
|
/man/groundwater_district.Rd
|
95234efa22af780bfaeeaa3a36615b6d7524638c
|
[
"MIT"
] |
permissive
|
gopalpenny/anem
|
1029318ca01a6172b365ddb7d2181135d909d92c
|
f2ba63622e0e67a08423b20c5f09a34b6433f6d0
|
refs/heads/master
| 2021-07-09T21:43:07.502292
| 2020-12-16T15:25:36
| 2020-12-16T15:25:36
| 219,404,991
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 401
|
rd
|
groundwater_district.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anem_data.R
\docType{data}
\name{groundwater_district}
\alias{groundwater_district}
\title{Groundwater district anem-app scenario}
\format{a list object containing named items associated with anem-app}
\usage{
groundwater_district
}
\description{
anem-app scenario with four pumping wells near a river
}
\keyword{internal}
|
b392e05cf457c76950bf24ec9072ce0fae101795
|
2c4dbf42a157b8691ad66da48a34c98e92407d18
|
/R/08-create-radii-data.R
|
e70d8ac2c51bd5dca1f989f8e8936c943b81c835
|
[] |
no_license
|
timkiely/spatially-conscious-ml-model
|
05f829b8efb181fe4f0f1454427589a3443f0d1a
|
3a81a9ce61a48dd8d34aca427370968f9580c2bd
|
refs/heads/master
| 2021-10-10T12:19:08.269686
| 2019-01-10T16:39:12
| 2019-01-10T16:39:12
| 95,896,422
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,437
|
r
|
08-create-radii-data.R
|
# This function creates the RADII modeling data
create_radii_data <- function(base_model_data = "data/processing steps/p06_base_model_data.rds"
, outfile = "data/processing steps/p08_radii_model_data.rds"
, run_radii = FALSE) {
message("## Creating RADII Modeling Data")
# create radii index set --------------------------------------------------
# we create an index of all PLUTO observations within 500 meters of every other
# PLUTO observation. We later use this to create radii metrics
message("Loading BASE model data...")
pluto_model <- read_rds(base_model_data)
if(run_radii==TRUE){
message("Creating radii comps...")
source("R/helper/get-spatial-neighbor-points.R")
radii_time <- Sys.time()
radii_index <-
pluto_model %>%
filter(Year == max(unique(pluto_model$Year), na.rm = T)) %>%
distinct(bbl, .keep_all = T) %>%
st_as_sf(coords = c("lon","lat"), na.fail=FALSE, crs = 4326) %>%
st_transform(crs = 32618) %>%
get_spatial_neighbor_points(id_col = "bbl"
, max_distance = 500
, n_cuts = max(c(2, floor(sqrt( parallel::detectCores() ))-1))
, allow_parralell = TRUE
, num_clusters = parallel::detectCores()-2)
radii_time_end <- Sys.time()
message(" ...done. Total indexing time: ", round(radii_time_end-radii_time), " ",units(radii_time_end-radii_time))
message("Writing radii index to disk...")
write_rds(radii_index, "data/aux data/radii-index.rds")
message(" ...done")
} else {
message("Bypassing radii index calculation, loading from disk...")
if(!file.exists("data/aux data/radii-index.rds")) stop("file data/aux data/radii-comps.rds missing. Run create_radii_data() at least once to completion first")
radii_index <- read_rds("data/aux data/radii-index.rds")
}
# create radii features from the index ------------------------------------
message("Creating radii features...")
if(run_radii==TRUE){
message("Running RADII feature creation")
source("R/helper/create-radii-features.R")
pluto_radii <- create_radii_features(pluto_model, radii_index) # takes 27 minutes for Manhattan-only
message(" ...done")
message("Writing radii features to disk...")
write_time <- Sys.time()
write_rds(pluto_radii, "data/aux data/radii-features.rds", compress = "gz")
write_time_end <- Sys.time()
tot_write_time <- write_time_end-write_time
message(" ...done. Writing took ", round(tot_write_time,2),units(tot_write_time))
} else {
message("Bypassing radii features calculation, loading from disk...")
if(!file.exists("data/aux data/radii-features.rds")) stop("file data/aux data/radii-comps.rds missing. Run create_radii_features() at least once to completion first")
pluto_radii <- read_rds("data/aux data/radii-features.rds")
}
message(" ...Engineering done. Input ", length(pluto_model)," variables and output ", length(pluto_radii), " variables")
message("Writing RADII modeling data to disk...")
write_rds(pluto_radii, outfile, compress = "gz")
message(" ...done. RADII modeling data written to ", outfile)
}
|
feaf5ab0a19ec6f5deade59d076d9910e50e54a6
|
902f6080da801730f4e1d2b7094054e6ab4178fc
|
/best.R
|
1fdd11745c95c137d442b99fb9a82229ff165cff
|
[] |
no_license
|
famibelle/rprog_data_ProgAssignment3-data
|
feda7f62ac747577ff4d67415f0ffdb90adf74ac
|
299328b247e33a4c1bef037eefffe7388969906f
|
refs/heads/master
| 2021-01-01T05:41:09.454611
| 2014-08-24T20:06:55
| 2014-08-24T20:06:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,264
|
r
|
best.R
|
#Finding the best hospital in a state
# Write a function called best that take two arguments: the 2-character abbreviated name of a state and an
# outcome name. The function reads the outcome-of-care-measures.csv le and returns a character vector
# with the name of the hospital that has the best (i.e. lowest) 30-day mortality for the specied outcome
# in that state. The hospital name is the name provided in the Hospital.Name variable. The outcomes can
# be one of \heart attack", \heart failure", or \pneumonia". Hospitals that do not have data on a particular
# outcome should be excluded from the set of hospitals when deciding the rankings.
# Handling ties. If there is a tie for the best hospital for a given outcome, then the hospital names should
# be sorted in alphabetical order and the rst hospital in that set should be chosen (i.e. if hospitals \b", \c",
# and \f" are tied for best, then hospital \b" should be returned).
best <- function(state="TX", outcome="heart attack") {
## Read outcome data
## Possible values are
# Heart Attack
# Heart Failure
# Pneumonia
outcome_of_care_measures <- read.csv("outcome-of-care-measures.csv", colClasses = "character",check.names=FALSE) ## with check.names=FALSE to avoid spaces to be converted into "."
## Check that state and outcome are valid
if (!(toupper(state) %in% outcome_of_care_measures$State)) stop("invalid state")
if (!(toupper(outcome) %in% toupper(c("Heart Attack", "Heart Failure", "Pneumonia")))) stop("invalid outcome")
## Return hospital name in that state with lowest 30-day death rate
elt <- grep(pattern = outcome,x = names(outcome_of_care_measures), ignore.case=TRUE) ## elt[1] gives the column number,
# find out the lowest 30-day death rate line number in column elt[1]
outcome_of_care_measures[,elt[1]] <- suppressWarnings(as.numeric(outcome_of_care_measures[,elt[1]])) #make it numeric and suppres the warning message
lowest <- min(outcome_of_care_measures[ outcome_of_care_measures$State == state ,elt[1]],na.rm=TRUE)
line_lowest <- which(outcome_of_care_measures[,elt[1]] == lowest)
col_lowest <- which( outcome_of_care_measures[line_lowest,"State"] == state)
Hospital <- outcome_of_care_measures[line_lowest[col_lowest],"Hospital Name"]
return(Hospital)
}
# The function should check the validity of its arguments. If an invalid state value is passed to best, the
# function should throw an error via the stop function with the exact message \invalid state". If an invalid
# outcome value is passed to best, the function should throw an error via the stop function with the exact
# message \invalid outcome".
# Here is some sample output from the function.
# best("TX", "heart attack")
# best("TX", "heart failure")
# best("MD", "heart attack")
# best("MD", "pneumonia")
# > source("best.R")
# > best("TX", "heart attack")
# [1] "CYPRESS FAIRBANKS MEDICAL CENTER"
# > best("TX", "heart failure")
# [1] "FORT DUNCAN MEDICAL CENTER"
# > best("MD", "heart attack")
# [1] "JOHNS HOPKINS HOSPITAL, THE"
# > best("MD", "pneumonia")
# [1] "GREATER BALTIMORE MEDICAL CENTER"
# > best("BB", "heart attack")
# Error in best("BB", "heart attack") : invalid state
# >best("BB", "heart attack")
# Error in best("NY", "hert attack") : invalid outcome
#
|
0626505218c748175633644ed7d9781a18d32b08
|
2b2aee3352f8a10c121fe74036eddec01b3ee595
|
/man/killIndividuals.Rd
|
b659832cc92cddd911239b5a29c46e7fe9909b5d
|
[
"MIT"
] |
permissive
|
rdinnager/slimr
|
56f1fef0a83198bce292dd92dc1014df87c2d686
|
e2fbb7115c7cca82dabd26dc6560e71a8cd0958b
|
refs/heads/master
| 2023-08-21T14:00:36.089104
| 2023-07-31T03:11:09
| 2023-07-31T03:11:09
| 226,999,099
| 8
| 1
|
NOASSERTION
| 2023-08-03T05:44:32
| 2019-12-10T01:04:16
|
R
|
UTF-8
|
R
| false
| true
| 3,926
|
rd
|
killIndividuals.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/slim_lang.R
\name{killIndividuals}
\alias{killIndividuals}
\alias{Species$killIndividuals}
\alias{.Sp$killIndividuals}
\title{SLiM method killIndividuals}
\usage{
killIndividuals(individuals)
}
\arguments{
\item{individuals}{An object of type Individual object. See details for
description.}
}
\value{
An object of type void.
}
\description{
Documentation for SLiM function \code{killIndividuals}, which is a method of the
SLiM class \code{\link{Species}}.
Note that the R function is a stub, it does not do anything in R (except bring
up this documentation). It will only do
anything useful when used inside a \code{\link{slim_block}} function further
nested in a \code{\link{slim_script}}
function call, where it will be translated into valid SLiM code as part of a
full SLiM script.
}
\details{
Documentation for this function can be found in the official
\href{http://benhaller.com/slim/SLiM_Manual.pdf#page=679}{SLiM manual: page
679}.
Immediately kills the individuals in individuals. This removes them
from their subpopulation and gives them an index value of -1. The Individual
objects are not freed immediately, since references to them could still exist
in local Eidos variables; instead, the individuals are kept in a temporary
"graveyard" until they can be freed safely. It therefore continues to be safe to
use them and their genomes, except that accessing their subpopulation property
will raise an error since they no longer have a subpopulation. Note that the
indices and order of individuals and genomes in all source subpopulations will
change unpredictably as a side effect of this method. All evaluated interactions
are invalidated as a side effect of calling this method. Note that this method
is only for use in nonWF models, in which mortality is managed manually by
the model script. In WF models, mortality is managed automatically by the SLiM
core when the new offspring generation becomes the parental generation and the
previous parental generation dies; mortality does not otherwise occur in WF
models. In nonWF models, mortality normally occurs during the survival stage
of the tick cycle (see section 23.4), based upon the fitness values calculated
by SLiM, and survival() callbacks can influence the outcome of that survival
stage. Calls to killIndividuals(), on the other hand, can be made at any time
during first(), early(), or late() events, and the result cannot be modified by
survival() callbacks; the given individuals are simply immediately killed. This
method therefore provides an alternative, and relatively rarely used, mortality
mechanism that is disconnected from fitness.
}
\section{Copyright}{
This is documentation for a function in the SLiM software, and has been
reproduced from the official manual,
which can be found here: \url{http://benhaller.com/slim/SLiM_Manual.pdf}. This
documentation is
Copyright © 2016-2020 Philipp Messer. All rights reserved. More information
about SLiM can be found
on the official website: \url{https://messerlab.org/slim/}
}
\seealso{
Other Species:
\code{\link{Sp}},
\code{\link{addSubpop}()},
\code{\link{countOfMutationsOfType}()},
\code{\link{individualsWithPedigreeIDs}()},
\code{\link{mutationCounts}()},
\code{\link{mutationFrequencies}()},
\code{\link{mutationsOfType}()},
\code{\link{outputFixedMutations}()},
\code{\link{outputFull}()},
\code{\link{outputMutations}()},
\code{\link{readFromPopulationFile}()},
\code{\link{recalculateFitness}()},
\code{\link{registerFitnessEffectCallback}()},
\code{\link{registerMateChoiceCallback}()},
\code{\link{registerModifyChildCallback}()},
\code{\link{registerMutationCallback}()},
\code{\link{registerMutationEffectCallback}()},
\code{\link{registerRecombinationCallback}()}
}
\author{
Benjamin C Haller (\email{bhaller@benhaller.com}) and Philipp W Messer
(\email{messer@cornell.edu})
}
\concept{Species}
|
d8c92f87a16d36a2b5659e6a69f371d9740bbcb1
|
6bee4f202b4a54b6b9d8c563208b5c29e7a13c84
|
/plot1.R
|
8ad90dbf05ed6cb84d2701bcb1108bd12917303b
|
[] |
no_license
|
fergusmeade/EDA_Project2
|
942d7e6303673682b7ceed703dd259dbc4827d7f
|
0617040316bb92f40535a67cb1607c132147d069
|
refs/heads/master
| 2020-03-26T07:15:18.184012
| 2018-08-13T23:37:59
| 2018-08-13T23:37:59
| 144,644,583
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 878
|
r
|
plot1.R
|
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(url, destfile = "project2_data.zip")
unzip("project2_data.zip")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
#Using the base plotting system, make a plot showing the total PM2.5 emission
#from all sources for each of the years 1999, 2002, 2005, and 2008.
head(NEI)
colnames(NEI)
str(NEI)
summary(NEI)
unique(NEI$year) # 4 years data only
library(dplyr)
summary <- NEI %>%
group_by(year) %>%
summarise(total = sum(Emissions, na.rm = TRUE))
summary
png(filename='plot1.png')
barplot(summary$total,
names = summary$year,
xlab = "Year",
ylab = "PM2.5 Emissions",
main = "Total Emissions from PM2.5 from 1999 to 2008")
dev.off()
|
50fa6272651184de93e994b9a61329a65adc32f7
|
a8ef03b7ec4559db296409f8017379aba77babf0
|
/eda2.R
|
67bb22d03b5934b39e04f7cc80822d4715965c1f
|
[] |
no_license
|
abalaji-blr/GrupoBimbo
|
60eed1788244947e42f54ef83bce1a671ff582e7
|
271204b083f0d280f388cbeb44e6f60c2d8fae75
|
refs/heads/master
| 2021-01-20T20:10:40.658056
| 2016-07-27T12:27:23
| 2016-07-27T12:27:23
| 63,602,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,545
|
r
|
eda2.R
|
# perform data analysis on Grupo Bimbo data set
library("data.table")
# training data is 3GB, generate smaller train for early analysis.
#
# system(dt <- fread("./train.csv", header = TRUE))
# library("dplyr")
# new_train <- sample_frac(dt, 0.15)
# write.csv(new_train, file = "new_train.csv", row.names = FALSE, quote = FALSE)
system(dt <- fread("./new_train.csv", header = TRUE))
#load ggplot2
library("ggplot2")
# per week, h
ggplot(dt, aes(x = Semana)) + geom_histogram()
# returns per week
returns <- dt[, .( total_returns = sum(Dev_uni_proxima)), by = Semana]
ggplot(returns, aes(x=Semana, y = total_returns)) + geom_line()
# product wise sale (units) & return info.
prodReturn <- dt[, .(sales = sum(Venta_uni_hoy), returns = sum(Dev_uni_proxima)), by = Producto_ID]
# product wise returns
ggplot(prodReturn, aes(x = Producto_ID, y = returns, color = returns)) + geom_point() +
scale_color_gradient(low="blue", high="red")
# product-wise sales (units)
ggplot(prodReturn, aes(x = Producto_ID, y = sales)) + geom_point()
# demand vs sales
# Demanda_uni_quil vs Venta_hoy
ggplot(dt %>% sample_frac(0.05),aes(x = Venta_uni_hoy , y = Demanda_uni_equil)) +
geom_point() +
geom_smooth(method = "lm") +
scale_x_continuous(name = "Sales") +
scale_y_continuous(name = "Demand") +
ggtitle("Demand Vs Sales (in terms of units)")
# correlation info
cor(dt$Demanda_uni_equil, dt$Venta_hoy)
cor(dt$Demanda_uni_equil, dt$Venta_uni_hoy)
cor(dt$Demanda_uni_equil, dt$Dev_uni_proxima)
cor(dt$Demanda_uni_equil, dt$Dev_proxima)
|
2dc8c65888f8c2ae03de5f7c14d0acc608f727a3
|
f2c26779b27e038121bb7d4e9a693cd898c4000b
|
/Min-Var portfolio.R
|
2e046e50c0817eeafa6027de0f0a50b2f8cadb85
|
[] |
no_license
|
Rigo5/Portfolio-optimization-with-ROI
|
0cf2b46eebf0f2c7e6f46c3a9105829551263bd8
|
a5c1231fafe62094859840aca7af46e1ed64a8ef
|
refs/heads/master
| 2022-11-24T21:43:19.169054
| 2020-08-04T09:51:42
| 2020-08-04T09:51:42
| 284,742,522
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,858
|
r
|
Min-Var portfolio.R
|
library(quantmod)
library(stringr)
library(ROI)
library(ROI.plugin.quadprog)
#function for download data
#I gonna use the getSymbols function
get_symbol = function(ticker, from = '2017-01-04'){
error_handler = function(e){
warning(paste0(ticker, ' is not present', sep = ''))
#return NA value
return(NA)
}
price = tryCatch(getSymbols(ticker, from = from, auto.assign = FALSE),
error = error_handler)
return(price)
}
#return a data frame with the different prices from a list
merge_prices = function(list_prices){
prices = list_prices[[1]]
#stop here if there is just one ticker
if(length(list_prices) == 1){return(prices)}
for(i in 2:length(list_prices)){
#join only if the element is not na
if(!is.na(list_prices[[i]])){
prices = inner_join(prices, list_prices[[i]], by = 'Index')
}
}
return(prices)
}
#get prices of ticker vector
get_prices = function(ticker_vec, from = '2017-01-04'){
list_prices = list()
c = 1
for(i in ticker_vec){
price = get_symbol(ticker = i, from = from)
price = fortify.zoo(price)
#select adj prices ans the Index
price = price[,c(1,7)]
#change col names
colnames(price) = c('Index', i)
list_prices[[c]] = price
c = c + 1
}
return(merge_prices(list_prices))
}
test = get_prices(c('AAPL', 'GE', 'T'))
#return dor just one vector data
ret = function(vect){
vect = as.numeric(unlist(vect))
M = length(vect)
i = 2
results = c()
results[1] = NA
while(i <= M){
results[i] = (vect[i] - vect[i-1])/vect[i-1]
i = i +1
}
return(results)
}
#complete function wich use the previus one
get_returns = function(prices){
N = ncol(prices)
for(i in 2:N){
prices[, i] = ret_single(prices[, i])
}
return(prices[-1, ])
}
#function for cumulated returns
get_cumulated = function(mat, price = TRUE ){
N = ncol(mat)
if(price == TRUE){
ret_mat = get_returns(mat)
#Index base = 100
ret_mat[, 2:N] = ret_mat[, 2:N] +1
ret_mat[1, 2:N] = 100
#cumulated returns now
if(ncol(ret_mat) > 2){
ret_mat[, 2:N] = apply(ret_mat[, 2:N], 2, cumprod)
}else{
ret_mat[, 2] = cumprod(ret_mat[, 2])
}
return(ret_mat)
}else{
#Index base = 100
mat[, 2:N] = mat[, 2:N] +1
mat[1, 2:N] = 100
#cumulated returns now
if(ncol(mat) > 2){
mat[, 2:N] = apply(mat[, 2:N], 2, cumprod)
}else{
mat[, 2] = cumprod(mat[, 2])
}
return(mat)
}
}
####Porfolio Classic Markowitz#####
min_var_portfolio = function(r_mat, beta = 0.5, short = FALSE){
r_mat = na.omit(r_mat)
#erase the Index
r_mat = r_mat[, -1]
N = ncol(r_mat)
asset_names = colnames(r_mat)
mu = colMeans(r_mat)
obj = Q_objective(
Q = beta * 2 * cov(r_mat),
L = -(1-beta)*mu
)
#The short is such that by selling I finance other positions
#Is basic model without any type of cost of borrowing
if(short == FALSE){
Amat = rep(1,N)
constr = L_constraint(
Amat,
dir = c('=='),
rhs = c(1)
)
}else{
Amat = rep(1,N)
bound = V_bound(li = 1:N,
lb = rep(-100, N))
constr = L_constraint(
Amat,
dir = c('=='),
rhs = c(1)
)
#costructing the optimization problem
portfolio = OP(objective = obj,
constraints = constr,
bounds = bound)
#return the problem solution objective
return(ROI_solve(portfolio, solver = 'quadprog'))
}
portfolio = OP(objective = obj,
constraints = constr)
return(ROI_solve(portfolio, solver = 'quadprog'))
}
#I gonna scrape the tickers of the SP500
scrape_sp500 = function(){
require(rvest)
tabl = read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies') %>% html_table(header = TRUE, fill = TRUE ) %>%
.[[1]] %>% as.data.frame() %>% .[, c(1,2,4)]
tick = as.list(tabl[, 1])
names(tick) = tabl[, 2]
return(tick)
}
#function for backtesting
backtest = function(sample, ret_mat, short = FALSE, beta = 0.5){
#beginning and end of sample period
start_p = sample +1
end_p = start_p - sample
N = nrow(ret_mat)
Index = ret_mat[, 1]
solution = list()
c = 1
while(start_p <= N){
data_p = ret_mat[end_p:start_p, -1]
solution_list = min_var_portfolio(data_p, beta = beta, short = short)
solution[[c]] = round(solution_list$solution, 3)
start_p = start_p + 1
end_p = end_p + 1
c = c +1
}
weight_data = do.call(rbind, solution)
#now I derive the portfolio returns
port_ret = round(weight_data * ret_mat[(sample +1):N, -1], 3)
port_ret = apply(port_ret, 1, sum)
#arrange the returns with the Date index
return(data.frame(Index = Index[(sample +1):N], Port_ret = port_ret))
}
|
aa03975e65a065047d23d1811efa8a6aa0c4693f
|
e47b613de6fde64e52c08f4fe752058664d83533
|
/titanic.R
|
f9392b01845d49a6102cc50ca102fa8f7a05214c
|
[] |
no_license
|
anu24/DataWrangling
|
e2111c521c0e8fec61e0ca78f8f76e86c9bb4d83
|
f29a74f350aca8615fe8ce0bf0c8747b5e85377f
|
refs/heads/master
| 2016-09-14T00:19:20.632859
| 2016-05-01T23:37:48
| 2016-05-01T23:37:48
| 57,585,327
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
titanic.R
|
library(dplyr)
# load "titanic_original.csv" data
titan <- read.csv("titanic_original.csv", header = T)
# 1. The embarked column has one missing value, find and replace with "S"
titan[titan$embarked == "", ]
titan <- titan %>% mutate(embarked = replace(embarked, embarked=="" & name != "", "S"))
# 2. "Age" column missing values
table(is.na(titan$age))
mean_age <- titan$age %>% mean(na.rm = T)
titan <- titan %>% mutate(age = replace(age, is.na(age), mean_age))
# 3. "lefeboat"
nrow(titan[titan$boat == "", ])
titan <- titan %>% mutate(boat = replace(boat, boat == "", NA))
table(is.na(titan$boat))
# 4. "cabin number"
titan <- titan %>% mutate(has_cabin_number = ifelse(cabin == "", 0, 1))
names(titan)
# 5. Write cleanedup data to "titanic_clean.csv"
write.csv(titan, "titanic_clean.csv")
|
f6c8492a33eb9103eaf1f2a58bec47aeafc022fb
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/biwavelet/inst/testfiles/rcpp_row_quantile/libFuzzer_rcpp_row_quantile/rcpp_row_quantile_valgrind_files/1610555112-test.R
|
2c18b6de7f366d37a7e5b4b27f3b0c9f460d2074
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,181
|
r
|
1610555112-test.R
|
testlist <- list(data = structure(c(7.27247987802838e+199, 1.4072470568e+248, 1.42963235646472e+248, 6.59297085909318e+38, 3.52062311262302e-305, 1.51255630705442e-312, 6.97572831925218e-310, 6.21470184187001e+228, 9.48968865377627e+170, 6.48706195472858e+174, 7.71825832018303e+188, 3.78576699573368e-270, 4.94065645841247e-324, 4.94065645841247e-324, 7.3099528636333e-304, 2.81700905511843e+209, 2.81700905511843e+209, 2.81700905511843e+209, 2.81700905511843e+209, 2.81700953463913e+209, 6.06877894485403e-307, 3.70252290058914e-305, 4.25004196166878, 1.33483554400379e-309, 3.52953811760204e+30, 2.86130632606109e-307, 1.38366263171814e-309, 6.95335580946656e-310, 4.86146275325191e-299, 4.94065645841247e-324, 2.45619941924076e+35, 2.7367946739059e-312, 9.55240318163186e+139, 2.41765891227415e+35, 3.91978391036689e-312, 4.24400786238171e-314, 6.80564733831973e+38, 2.8801395002149e+304, 1.18659437101509e-303, 2.41764731722575e+35, 2.68429802296263e-24, 1.46716115284115e-314, 1.390671161567e-309, 2.27541883785622e-317, 1.32548925352458e-309), .Dim = c(9L, 5L)), q = 2.77612803857452e-309)
result <- do.call(biwavelet:::rcpp_row_quantile,testlist)
str(result)
|
b0c6a3ba90a4dc0768a5bd61c7de1ba009182316
|
e6a95c4b8744ad329e6bd6755f3c5f7d185fe1ce
|
/classifier.StartingPoint.R
|
7d6352132b2cfec7060e77d3db6e4c5be2593696
|
[] |
no_license
|
FuCherng/CS411Project
|
bcacf1683db70dfc0be74dc356f061214bb13c0a
|
b2c7354aa30644440594af90787579eaf836163a
|
refs/heads/master
| 2020-06-28T20:37:36.087083
| 2016-11-29T15:02:52
| 2016-11-29T15:02:52
| 74,474,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,893
|
r
|
classifier.StartingPoint.R
|
library(dplyr)
library(plyr) #ddply
library(caret)
# library(lattice)
# library(ggplot2)
#------ read features
db=read.csv('/Users/fuyincherng/Documents/EPFLCourse/DigitalEducation/R/Dataset and Scripts-20161108/OutputTable.csv', stringsAsFactors = F)
#------ sort submissions
db=db[order(db$UserID,db$ProblemID,db$SubmissionNumber),]
#--- replace NA values with 0
db[is.na(db)]=0
#----- remove first submissions
db= filter(db,db$SubmissionNumber>0)
#---- remove cases when there is no video or forum activity between two submissions
#d$Guzzler <- factor(ifelse(d$MPG.city > median(d$MPG.city),"GUZZLER","ECONOMY"))
db$NVideoAndForum<- db$NVideoEvents+db$NForumEvents
db= filter(db,db$NVideoAndForum>0)
#----- make a catgorical vribale, indicating if grade improved
db$improved = factor(ifelse(db$GradeDiff>0 ,'Yes', 'No' ))
table(db$improved)
#----- visualize features per each category
boxplot(db$TimeSinceLast ~ db$improved, main="improve by TimeSinceLast", horizontal = T, outline=F)
boxplot(db$NForumEvents ~ db$improved , main="improve by NForumEvents", horizontal = T, outline=F)
boxplot(db$NVideoEvents ~ db$improved , main="improve by NVideoEvents", horizontal = T, outline=F)
boxplot(db$NumberOfThreadViews ~ db$improved , main="improve by NumberOfThreadViews", horizontal = T, outline=F)
#============ train a classifier to predict 'improved' status =============
# ----- 1. split data into train and test set
set.seed(1234)
tr.index= sample(nrow(db), nrow(db)*0.6)
#return the number of row in db; sample(sampleRange, how many number generate(Rounded:5.5->5))
#so the above line means there are 60% of all data token as train data. 40% as the test data
db.train= db[tr.index,]
db.test = db[-tr.index,]
#-----
# Train a classifier to identify which features are most predictive
# of an increase versus decrease of the grade. Try different methods,
# model parameters, features set and find the best classifier (highest 'kappa' value on test set.)
# try to achieve a model with kappa > 0.5
#----- Train the model and tuning the model (more info: https://topepo.github.io/caret/model-training-and-tuning.html)
#TrainControl is to set the validation method
fitControl <- trainControl( ##10-flod CV
method = "repeatedcv",
number=1,
##repeated ten times
repeats=1,
savePred=T,
classProbs=T,
summaryFunction = twoClassSummary
)
gbmfit1 <- train(improved ~ ., data=db.train,
method = "svmRadial",
tuneLength = 9,
preProc = c("center","scale"),
#metric="ROC",
trControal = fitControl
)
gbmfit1
#----- Test the model
#predict(model, newdata=db.test)
|
aceb0a5e8c849ae6434cb3180e062e541b1d49ff
|
de31f54f678b7fe3b0619425621345f440f8944a
|
/R/introduction.R
|
b95a1d2f95e63cefa5a25e967f7fd01f010fe9bb
|
[] |
no_license
|
PaulMelloy/GettingStartedInR_210506
|
0fc47211a1522ad960d009651402cf32e7383371
|
9cc9bffcb5cdc13e8ad97058b5db00c6f6e9a5fd
|
refs/heads/main
| 2023-04-26T09:28:07.172902
| 2021-05-06T05:58:12
| 2021-05-06T05:58:12
| 364,461,388
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,004
|
r
|
introduction.R
|
x = 100 + 1
# It is better practice when assining an object
# to use the <- assign operator
x <- 100
y <- 5
x * y
log(x)
# can't create object names with specific ways
# 2x
# The filesystem in R
# finding where you are
# What is your working directory
getwd()
# you can change your working directory
# but don't
# setwd()
# commented lines
# 1 + 1
# list files in the working directory
list.files()
# finding help on functions using '?'
?list.files
# list folders but not recursivley
list.dirs(recursive = FALSE)
#create directories in our working directory
dir.create("data")
# or we can be explict in the filename
# dir.create("C:/Users/U8011054/OneDrive - USQ/Cloudstor/R/GettingStartedInR_210506/data")
# downloading data from the internet
# open help file
?download.file
# download the data using the 'download.file()' function
download.file(url = "https://raw.githubusercontent.com/swcarpentry/files/master/inflammation-01.csv",
destfile = "data/inflammation-01.csv")
|
5cfa8651dcb2bec0de9b9ec5426b2a41b92ca426
|
2d47450c41c23f6d008bfca5bf08d3161bb13491
|
/man/ba_germplasm_markerprofiles.Rd
|
429bba9f9283ea750720c141a9390ca9e71134b3
|
[] |
no_license
|
khaled-alshamaa/brapi
|
2c14727d65fc82a77d243bdc40c10b67955a04d5
|
5f2a5caa48d72e2412ead128b9143cc1882a060c
|
refs/heads/master
| 2022-03-21T20:19:07.470329
| 2019-10-16T15:51:00
| 2019-10-16T15:51:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,652
|
rd
|
ba_germplasm_markerprofiles.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ba_germplasm_markerprofiles.R
\name{ba_germplasm_markerprofiles}
\alias{ba_germplasm_markerprofiles}
\title{ba_germplasm_markerprofiles}
\usage{
ba_germplasm_markerprofiles(con = NULL, germplasmDbId = "",
rclass = c("tibble", "data.frame", "list", "json"))
}
\arguments{
\item{con}{list, brapi connection object}
\item{germplasmDbId}{character, the internal database identifier for a
germplasm of which the internal markerprofile database
identifiers are to be retrieved e.g. "9932";
\strong{REQUIRED ARGUMENT} with default: ""}
\item{rclass}{character, class of the object to be returned; default: "tibble"
, possible other values: "data.frame"/"list"/"json"}
}
\value{
An object of class as defined by rclass containing the internal
markerprofile database identifiers.
}
\description{
Retrieve the internal markerprofile database identifiers for a given internal
germplasm database identifier
}
\note{
Tested against: test-server
BrAPI Version: 1.0, 1.1, 1.2
BrAPI Status: active
}
\examples{
if (interactive()) {
library(brapi)
con <- ba_db()$testserver
ba_germplasm_markerprofiles(con, germplasmDbId = "1")
}
}
\references{
\href{https://github.com/plantbreeding/API/blob/V1.2/Specification/Germplasm/Germplasm_Markerprofiles_GET.md}{github}
}
\seealso{
Other germplasm: \code{\link{ba_germplasm_breedingmethods}},
\code{\link{ba_germplasm_details_study}},
\code{\link{ba_germplasm_details}},
\code{\link{ba_germplasm_pedigree}},
\code{\link{ba_germplasm_progeny}},
\code{\link{ba_germplasm_search_post}},
\code{\link{ba_germplasm_search}},
\code{\link{ba_studies_germplasm_details}}
Other genotyping: \code{\link{ba_genomemaps_data_range}},
\code{\link{ba_genomemaps_data}},
\code{\link{ba_genomemaps_details}},
\code{\link{ba_genomemaps}},
\code{\link{ba_germplasm_attributes}},
\code{\link{ba_germplasm_search_post}},
\code{\link{ba_germplasm_search}},
\code{\link{ba_germplasmattributes_attributes}},
\code{\link{ba_germplasmattributes_categories}},
\code{\link{ba_germplasmattributes_details}},
\code{\link{ba_markerprofiles_allelematrices_details}},
\code{\link{ba_markerprofiles_allelematrices_search_post}},
\code{\link{ba_markerprofiles_allelematrices_search}},
\code{\link{ba_markerprofiles_allelematrix_search}},
\code{\link{ba_markerprofiles_details}},
\code{\link{ba_markerprofiles_search}},
\code{\link{ba_markers_details}},
\code{\link{ba_markers_search_post}},
\code{\link{ba_markers_search}}
}
\author{
Reinhard Simon, Maikel Verouden
}
\concept{genotyping}
\concept{germplasm}
|
c40636c507e84673a4214304c32694623f71f39d
|
5665b1a9e21db54af9c7862a5258e1f452b09721
|
/functions/realtime_predictions.R
|
1a042a41987d94e0c1ee845c34bc52eb4d02da23
|
[] |
no_license
|
ahessert/alluvium
|
fa432bb6a1322bdff556d15137d560a8245edd6f
|
a6eb493b429a07c66a3c5ef65623c08a547b118a
|
refs/heads/master
| 2021-04-06T07:01:16.841194
| 2018-03-20T19:14:50
| 2018-03-20T19:14:50
| 125,110,996
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,261
|
r
|
realtime_predictions.R
|
make_prediction <- function(prediction_records, turbine_endpoint, coefficients) {
prediction_records %>%
clean_records %>%
format_predictors %>%
apply_coefficients(coefficients) %>%
{.} -> prediction
GET(turbine_endpoint, query = list("ts"=prediction_records[1, ts], "value"=prediction))
}
format_new_row <- function(new_records, rownum) {
new_row <- new_records[rownum, .(ts=ts,
state=state,
rpm=as.numeric(rpm),
wetBulbFahrenheit=as.numeric(wetBulbFarenheit),
relativeHumidity=as.numeric(relativeHumidity),
windSpeed=as.numeric(windSpeed),
windDirection=as.numeric(windDirection),
hourlyPrecip=as.numeric(hourlyPrecip),
watts=as.numeric(watts))][order(ts)]
return(new_row)
}
prepend_to_turbine_records <- function(new_row, turbine_records) {
if (length(turbine_records)==0) {
return(new_row)
}
else {
return(rbind(new_row, turbine_records))
}
}
read_coeficients <- function(coefficients_path) {
coefficients <- unlist(read_json(coefficients_path))
}
clean_records <- function(dt) {
dt %>%
convert_to_unix_ts %>%
fix_nulls %>%
create_sin_cos_features %>%
create_current_down_time_feature(runmode = "LIVE") %>%
create_state_dummies %>%
{.} -> dt
return(dt)
}
format_predictors <- function(dt) {
dt %>%
create_rollup_features(runmode = "LIVE") %>%
create_trend_change_feature %>%
{.} -> formatted_predictors
formatted_predictors <- formatted_predictors[,REGRESSION_FEATURES, with=F]
if (dt[,diff(range(unix_ts))] < 150) {
formatted_predictors[, c("trend_change_three_hr", "trend_continuation_three_hr", "trend_continuation_one_hr",
"three_hr_watt_trend") := 0]
}
return(unlist(formatted_predictors[1]))
}
apply_coefficients <- function(record, coefficients) {
intercept <- 1
weighted_values <- c(intercept, record) * coefficients
prediction <- round(sum(weighted_values), digits = 3)
if (prediction < 0) { return(0.001) }
return(prediction)
}
|
5ddb95b6b8b33cdfb26177b5676623ba3a024dd0
|
48e98fed26af821438813b224d241b9e5573072f
|
/22Feb2021_1_dplyr.R
|
53cde73a5960773df982049be1f996c6fb152578
|
[] |
no_license
|
mrpetersenful/unit_3_penguins
|
060bbb574657e414e690805a9ff8418f96b19fdc
|
61b9bef9432560106554aa619337a8f0c7759ae0
|
refs/heads/main
| 2023-07-22T23:19:29.822238
| 2023-07-15T16:39:22
| 2023-07-15T16:39:22
| 341,201,942
| 0
| 0
| null | 2021-02-22T13:01:14
| 2021-02-22T13:01:13
| null |
UTF-8
|
R
| false
| false
| 16,164
|
r
|
22Feb2021_1_dplyr.R
|
## 22Feb2021
## 3.1 Intro to penguins
## The Tidyverse package makes coding much more efficient, easier to write and read,
## and creates great visualizations. Developed by RStudio's chief scientist Hadley
## Wickham, the Tidyverse provides a well-documented workflow for general data modeling,
## wrangling, and visualization tasks. The Tidyverse is a collection of R packages built
## around the basic concept that data in a table should have one observation per row,
## one variable per column, and only one value per cell.
## First, we have to install the Tidyverse package. We can do this in RStudio by going
## into the Tools tab and going to Install Packages. I can also do this from the
## command line.
install.packages("tidyverse")
## You only have to install packages on your computer once. However, to use Tidyverse
## in your R script, you need to load the package library at least every R session
## where you intend to use that package.
library("tidyverse")
tidyverse_packages()
## These are the packages that are loaded when you load the Tidyverse library.
## The packages we will be using consistently throughout the rest of the course are
## dplyr (for data wrangling) and ggplot2 (for visualization). Note that the lubridate
## package that we've already used is part of the Tidyverse as well.
## The Tidyverse is built around the basic concept that data in a table should have
## one observation per row, one variable per column, and only one value per cell.
## Once data is in this 'tidy' format, it can be transformed, visualized, and modelled
## for analysis.
## When using functions in the Tidyverse ecosystem, most data is returned as a tibble
## object. Tibbles are very similar to the data.frames we have been working with, and
## it is perfectly fine to use Tidyverse functions on a data.frame object. Just be
## aware that in most cases, the Tidyverse function will transform your data into a
## tibble. If you're unobservant, you won't even notice a difference. However, there
## are a few differences between the two flat data types, most of which are just designed
## to make your life easier. The most obvious differences when you're working with
## tibbles is:
## 1) printing in the console looks different
## 2) never changes the type of the inputs (e.g., it never converts strings to factors)
## 3) never creates row names
## 4) never changes the names of variables
## 5) tibbles generate a warning if the column you're trying to access doesn't exist.
## Some older functions don't work with tibbles. If you find one of these functions, use
## as.data.frame() to turn a tibble back to a data.frame.
## Example: my_data = as.data.frame(my_data)
## The dplyr package is designed to make it easier to manipulate flat (2-D) data. dplyr
## provides simple "verbs", functions that correspond to the most common data
## manipulation tasks, to help you translate your thoughts into code. This package
## also uses efficient backends, so you spend less time waiting for the computer.
## Here are the most common functions that we will be using in dplyr:
#### filter() chooses rows based on column values.
#### arrange() changes the order of the rows.
#### select() changes whether or not a column is included.
#### rename() changes the name of columns.
#### mutate() changes the values of columns and creates new columns.
#### summarize() collapses a group into a single row.
#### group_by() group data into rows with the same values
#### ungroup() remove grouping information from data frame.
#### distinct() remove duplicate rows.
## Now let's look at some penguin data!
## I'm also going to install the palmer pengins package.
install.packages("palmerpenguins")
## Now that it's installed, I'm going to load it in.
library(palmerpenguins)
## I just want to check to see what the data looks like now.
head(penguins)
summary(penguins)
dim(penguins)
## So we look at head() all of the time to check out our rows, but the tidyverse
## version of head() is glimpse.
glimpse(penguins)
## Let's check out what the class of our dataset penguins is. We want to have tbl
## dataframes -- actually, that's what it's called in the tidyverse.
class(penguins)
## So our list of functions for the tidyverse is a bit different. They are a list
## of verbs that help visualize what the changes to our data are that we want.
## Now we're going to look at how the dplyr functions can be used to subset, transform,
## and summarize data. First, we're going to use the filter() function in dplyr to grab
## only the gentoo penguins.
gentoo = filter(penguins, species=="Gentoo")
## Here I named a new set of just the Gentoo species of penguin. I can use this parameter
## to filter further down.
## Now I want to look at just the gentoo ladies.
gentoo_ladies = filter(gentoo, sex=="female")
summary(gentoo_ladies)
## We can see in the summary output that the number of Adelie and Chinstrap penguins
## now equals zero and the males and NA's have been zeroed out, also.
## We could also have separated out the female gentoos in one line of code. Love this.
gentoo_ladies = filter(penguins, species=="Gentoo", sex=="female")
## However, there is a different way to do this called a pipe %>% and that helps
## us filter the data without using the filter() function.
## These two lines of code are equivalent:
gentoo_ladies = filter(penguins, species=="Gentoo", sex=="female")
gentoo_ladies = penguins %>% filter(species=="Gentoo", sex=="female")
## All of the dplyr functions take a data frame (or tibble) as the first argument.
## Rather than forcing the user to either save intermediate objects or nest functions,
## dplyr provides the pipe operater %>% from the package magrittr. The pipe operator
## allows us to combine multiple operations in R into a single sequential chain of
## actions.
## Here's a hypothetical example. I want to perform a sequence of operations on data
## frame x using hypothetical functions f(), g(), and h():
#### 1) Take x then
#### 2) Use x as an input to a function f() then
#### 3) Use the output of f(x) as an input to a function g() then
#### 4) Use the output of g(f(x)) as an input to a function h().
## One way to achieve this sequence of operations is by using nesting parentheses as
## follows: h(g(f(x))).
## That code isn't so hard to read because we're only applying three functions, and
## each of the functions is short in its name. Further, each of these functions also
## has only one argument. However, this can get progressively harder to read as the
## number of functions applied in the sequence increases and the arguments in each
## function increase as well. This is where the pipe operator %>% comes in handy.
## %>% takes the output of one function and then "pipes" it to be the input of the
## next function. Furthermore, a helpful trick is to read %>% as "then" or "and then".
## For example, you can obtain the same output as the hypothetical sequence of
## functions as follows:
x %>%
f() %>%
g() %>%
h()
## While both approaches achieve the same goal, the latter is much more readable
## because you can clearly read the sequence of operations line-by-line.
## Here's how a single transformation with filter() looks with and without a pipe:
gentoo_ladies = filter(penguins, species=="Gentoo", sex=="female")
gentoo_ladies = penguins %>% filter(species=="Gentoo", sex=="female")
## To make the lines using a pipe easier to read, you can include carriage returns
## after the pipe, as follows.
gentoo_ladies = pengins %>% # include carriage returns after pipe for readability
filter(sex=="female", # include carriage returns in list of filter rules
species=="Gentoo")
## This piping style of writing code becomes really efficient when you start to string
## lots of data manipulation tasks together.
## Now let's use summarize() to find the mean mass (g) of all of the female penguins
## in the dataset.
female_mean_mass = penguins %>%
filter(sex == "female") %>%
summarize(mean_mass_g = mean(body_mass_g))
female_mean_mass
## First, I started with the dataset (penguins), then used filter() to separate out
## females, then used summarize() to find the mean value of the column body_mass_g.
## So we linked together several actions using the %>% pipe. Note how I used a carriage
## return after every pipe to make my code run more readable. This is standard coding
## etiquette used when writing piped dplyr commands because it's easier to skim
## and see the actions being performed and in what order.
## The dplyr syntax is simpler, and the name of the original dataset (penguins)
## doesn't need to be repeated when using filter(), unlike when using which(). Base
## R also requires the creation of an interim variable, or if you want to fit it all
## into one line of code, you must use the which() function to subset across rows
## and columns simultaneously and then unlist() your data.frame or tibble so that it
## can be read into the mean() function as an array. Woof. Go dplyr!!
## Okay, so I just created a filtered mean mass for all of the female penguins.
## Now I want to be be more specific and find out the mean body mass for all
## of the lady gentoo penguins.
gentoo_ladies_body_mass = gentoo_ladies %>%
summarize(mean_mass = mean(body_mass_g)) ## Calculate mean of female gentoo body mass
gentoo_ladies_body_mass
## This is more spacing review.
filter(sex == "female") %>%
summarize(mean_mass_g = mean(body_mass_g))
## Compare this to base R code.
female_penguins = penguins[which(penguins$sex == "female"), ]
female_mean_mass = mean(female_penguins$body_mass_g)
# Or to do it all in one line of code:
female_mean_mass = mean(unlist(penguins[which(penguins$sex == "female"), "body_mass_g"]))
## Exercise 1.1:
## Build a data set containing only Chinstrap penguins. Then build another data set
## that contains only Chinstrap penguins with a flipper length > 200 mm. What is the
## sex ratio of Chinstrap penguins? How does that compare to the sex ratio of
## Chinstrap penguins with a flipper length >200 mm? Use the summary() function to
## examine sex ratios. Given this analysis, what do you think the relationship is
## between sex and flipper length?
chinstrap = penguins %>%
filter(species == "Chinstrap")
glimpse(chinstrap)
summary(chinstrap)
sex_ratio_all = 34/68
chin_long_wing = chinstrap %>%
filter(flipper_length_mm > 200)
glimpse(chin_long_wing)
summarize(chin_long_wing) ## For some reason this is not working for me :'(
## Oh, it's not working because I want summary() lolol.
summary(chin_long_wing)
sex_ratio_chin_long_wing = 1/18
## Clearly the males have longer flipper length than the female in the chinstrap
## species.
## More dplyr functions.
## Let's dig in deeper. Now we need to know the mean body mass for each sex of each
## penguin species. I also want to throw out unknown sex birds so they don't bias
## the data. We can use group_by() to indicate that I want to consider each sex and
## each species as a distinct dataset when I use the summarize() function. We'll finish
## it out by printing to the console with print(). These can be valuable summary
## statistics that we want to refer back to when we are writing up our analysis, or
## maybe we'll want to include the results in a table in a presentation at a conference.
## Either way, we'll save our new summary table of mean body mass to a .csv in our
## project folder.
## First I'm going to calculate the mass of each species. I'm telling it to look at
## the penguins dataset, then group the set by species, then summarize by looking
## at the average of the body mass, and removing the NA data.
species_mean_mass = penguins %>%
group_by(species) %>%
summarize(mean_mass_g = mean(body_mass_g, na.rm=TRUE))
## Now I want to calculate the mass of each species by sex. I'm going to use the
## dataset penguins, then filter by removing the rows where the sex is NA (! means
## "not" here, so it flips the logical value). After removing the NA from the sex
## data, I'm grouping the dataset by species and by sex. Then I'm summarizing the
## mean of the body mass for each group of data.
species_sex_mean_mass = penguins %>%
filter(!is.na(sex)) %>% # Removes rows where sex is NA. Read the ! as the word "not" here - i.e. it flips the logical value
group_by(species, sex) %>%
summarize(mean_mass_g = mean(body_mass_g)) %>%
print()
## Now, save the table.
write_csv(species_sex_mean_mass, path="data/processed/peguin_mean_body_mass_g.csv")
## You could also use write.csv(), which is the base R function.
## So, we just made a pretty in-depth analysis that takes advantage of the consistent
## syntactical format across dplyr functions and avoids unnecessary repetition. If we
## calculated the same summary statistics in base R, we'd have to repeat the analysis
## separately for each combination of species/sex, or write a goddamn stupid for loop
## that stepped through the data subsets.
## We used the mean() function in our dplyr summarize() command, but there are many
## different summary functions that can be used inside of summarize():
#### Center: mean(), median()
#### Spread: sd(), IQR(), mad()
#### Range: min(), max(), quantile()
#### Position: first(), last(), nth()
#### Count: n(), n_distinct()
#### Logical: any(), all()
## Exercise 1.2. Repeat Exercise 1.1, but this time use group_by() along with the n()
## function inside summarize() to count the number of Chinstrap penguins of each sex.
## Again compare the sex ratio of all Chinstrap observations vs. the sex ratio of
## Chinstrap penguins with a flipper length > 200 mm.
penguins %>%
filter(species == "Chinstrap") %>%
group_by(sex) %>%
summarize(n=n())
penguins %>%
filter(species == "Chinstrap",
flipper_length_mm > 200) %>%
group_by(sex) %>%
summarize(n=n())
## Here are more examples of what we can do with dplyr functions: group_by(),
## summarize(), mutate(), distinct(), select(), arrange()
## Which species has the most observations?
n_by_species = penguins %>%
group_by(species) %>%
summarize(n = n())
## Use mutate() to convert body mass units:
penguins_for_america = penguins %>%
mutate(body_mass_lb = body_mass_g * 0.0022) ## 0.0022 lb/g
## Quickly display the names of all of the islands surveyed:
penguins %>%
distinct(island)
## Grab just the species and sex columns:
penguins_brief = penguins %>%
select(species, sex)
## Remove bill data:
penguins_no_bill = penguins %>%
select(-bill_length_mm, -bill_depth_mm)
## Sort data by body mass, then species:
penguins_sorted = penguins %>%
arrange(body_mass_g, species)
## Sort data by body mass (highest to lowest), then species:
penguins_sorted = penguins %>%
arrange(rev(body_mass_g), species)
## Exercise 1.3: What is the mean bill length (in inches) of Adelie penguins found
## on either Dream Island or Biscoe Island? What is the standard deviation? Is the
## mean larger or smaller than the mean bill length of Adelie penguins found on
## Torgersen Island?
penguins %>%
## Here I'm filtering by species and the two islands.
filter(species == "Adelie", island %in% c("Biscoe", "Dream")) %>%
## Here I'm naming a new column, and changing the bill length to inches.
mutate(bill_length_in = bill_length_mm * 0.039) %>% ## Conv: 0.0393701 in/mm
## Here I'm summarizing the average and std dev of bill length in inches.
summarize(mean_bill_length_in = mean(bill_length_in),
sd_bill_length_in = sd(bill_length_in))
penguins %>%
filter(species=="Adelie",
island=="Torgersen",
## Removing any NA data in this column.
!is.na(bill_length_mm)) %>%
mutate(bill_length_in = bill_length_mm * 0.039) %>%
summarize(mean_bill_length_in = mean(bill_length_in),
sd_bill_length_in = sd(bill_length_in))
## The mean of the bill length of the Adelie penguins on Biscoe and Dream island is
## 0.01 inches smaller than the mean of the bill length of the Adelie penguins on
## Torgersen Island.
|
b2757116732455b5768c0bb375a198bc44c9ea00
|
18f4631b4b312215877e5a65fff966089b993c8d
|
/LIB_dailytimeseries2.r
|
35331060613ef6cd8ab6e49d882cc1bd9544f5cf
|
[] |
no_license
|
kkyong77/R-coded-scripts-for-RHESSys-calibration
|
42c93f00d8e6dcb7027141f3dc0527897e84be3b
|
7fc7c222c0afe36cde7a88abde8357d7bc1d68c9
|
refs/heads/master
| 2020-03-20T10:07:16.627513
| 2018-06-12T13:57:57
| 2018-06-12T13:57:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,962
|
r
|
LIB_dailytimeseries2.r
|
source("~/Dropbox/LIB_Rscript/LIB_misc.r")
fillingTimeGap = function(x){
#assume x is Date
return <- seq.Date(x[1], to=x[3],by="day")
}#function
match2DailyTimeSeries_backup=function(x,y){
#assume both x and y are date object
x.select = rep(NA,length(x))
for(i in 1:length(x)){
x.select[i] = x[i]%in%y
}#i
y.select = rep(NA,length(y))
for(i in 1:length(y)){
y.select[i] = y[i]%in%x
}#i
return <- list( xSelect=x.select, ySelect=y.select)
}#function
match2DailyTimeSeries=function(x,y){
#assume both x and y are date object
return <- list( xSelect=(!is.na(match(x,y))), ySelect=(!is.na(match(y,x))))
}#function
match3DailyTimeSeries_backup=function(x,y,z){
#assume both x and y are date object
x.select = rep(NA,length(x))
for(i in 1:length(x)){
x.select[i] = x[i]%in%y & x[i]%in%z
}#i
y.select = rep(NA,length(y))
for(i in 1:length(y)){
y.select[i] = y[i]%in%x & y[i]%in%z
}#i
z.select = rep(NA,length(z))
for(i in 1:length(z)){
z.select[i] = z[i]%in%x & z[i]%in%y
}#i
return <- list( xSelect=x.select, ySelect=y.select, zSelect=z.select)
}#function
match3DailyTimeSeries=function(x,y,z){
#assume both x and y are date object
return <- list(
xSelect=(!is.na(match(x,y)) & !is.na(match(x,z)) ),
ySelect=(!is.na(match(y,x)) & !is.na(match(y,z)) ),
zSelect=(!is.na(match(z,x)) & !is.na(match(z,y)) )
)
}#function
match4DailyTimeSeries_backup=function(x,y,z,w){
#assume both x and y are date object
x.select = rep(NA,length(x))
for(i in 1:length(x)){
x.select[i] = x[i]%in%y & x[i]%in%z & x[i]%in%w
}#i
y.select = rep(NA,length(y))
for(i in 1:length(y)){
y.select[i] = y[i]%in%x & y[i]%in%z & y[i]%in%w
}#i
z.select = rep(NA,length(z))
for(i in 1:length(z)){
z.select[i] = z[i]%in%x & z[i]%in%y & z[i]%in%w
}#i
w.select = rep(NA,length(w))
for(i in 1:length(w)){
w.select[i] = w[i]%in%x & w[i]%in%y & w[i]%in%z
}#i
return <- list( xSelect=x.select, ySelect=y.select, zSelect=z.select, wSelect=w.select)
}#function
match4DailyTimeSeries=function(x,y,z,w){
#assume both x and y are date object
return <- list(
xSelect=(!is.na(match(x,y)) & !is.na(match(x,z)) & !is.na(match(x,w)) ),
ySelect=(!is.na(match(y,x)) & !is.na(match(y,z)) & !is.na(match(y,w)) ),
zSelect=(!is.na(match(z,x)) & !is.na(match(z,y)) & !is.na(match(z,w)) ),
wSelect=(!is.na(match(w,x)) & !is.na(match(w,y)) & !is.na(match(w,z)) )
)
}#function
LIBas.Date=function(x,y,z){
return <- as.Date(paste(x,y,z,sep="-"),format="%Y-%m-%d")
}
setDay=function(x,dd){
return <- as.Date(paste(format(x,"%Y"),format(x,"%m"),dd,sep="-"))
}
setMonth=function(x,mm){
return <- as.Date(paste(format(x,"%Y"),mm,format(x,"%d"),sep="-"))
}
setYear=function(x,yyyy){
return <- as.Date(paste(yyyy,format(x,"%mm"),format(x,"%d"),sep="-"))
}
wateryearStartDate=function(x){
#assume x is Date
# i don't know how to do?
hold = LIBas.Date(as.numeric(format(x,"%Y")),10,1)
if(hold>x){
return <- LIBas.Date(as.numeric(format(x,"%Y"))-1,10,1)
}else{
return <- hold
}
}
wateryearEndDate=function(x){
#assume x is Date
# i don't know how to do?
hold = LIBas.Date(as.numeric(format(x,"%Y")),9,30)
if(hold<x){
return <- LIBas.Date(as.numeric(format(x,"%Y"))+1,9,30)
}else{
return <- hold
}
}
getYear=function(x){
return <- as.numeric(format(x,"%Y"))
}
##-------------------------------------------------------------------
seasonalPatterns=function(x,grp_month,season){
result = matrix(NA,length(season),9)
for(i in 1:length(season)){
tmp = x[grp_month==season[i]]
if( length(tmp)>0 ){
result[i,]=c(
mean(tmp, na.rm=T),
quantile(tmp,probs=c(0.025),na.rm=T),
quantile(tmp,probs=c(0.975),na.rm=T),
max(tmp, na.rm=T),
min(tmp, na.rm=T),
quantile(tmp,probs=c(0.5),na.rm=T),
sd(tmp,na.rm=T),
sd(tmp,na.rm=T)/sum(!is.na(tmp)),
length(tmp)
)
}else{
result[i,]=rep(0,9)
}
}#i
colnames(result)=c("mean","q025","q975","max","min","med","sd","stderr","count")
return <- result
}
seasonalAccumPatterns=function(x,grp_month,grp_yyyy,season){
yy = unique(grp_yyyy)
hold = matrix(NA,length(yy),length(season) )
tmp = rep(NA,length(season))
for(i in 1:nrow(hold)){
cond = grp_yyyy==yy[i]
for(j in 1:length(season)){
tmp[j] = x[cond][grp_month[cond]==season[j]]
}#j
hold[i,] = accumulate(tmp)
}#i
return <- rowPattern(hold)
}
seasonalAccum=function(x,grp_month,grp_yyyy,season){
yy = unique(grp_yyyy)
hold = matrix(NA,length(yy),length(season) )
tmp = rep(NA,length(season))
for(i in 1:nrow(hold)){
cond = grp_yyyy==yy[i]
for(j in 1:length(season)){
tmp[j] = x[cond][grp_month[cond]==season[j]]
}#j
hold[i,] = accumulate(tmp)
}#i
return <- hold
}
dailyQQBiasCorrection = function(obs_,calibrate,obsDate, correcting, correctingDate,window=15){
#obs.dailytimeSeries = dailyTimeSeries(obsDate)
periodCalibration.doy = as.POSIXlt(obsDate)$yday+1 ##<<-------- julian day or DoY
period.doy = as.POSIXlt(correctingDate)$yday+1 #<<------- full series!!
newGCM = correcting #<<------- full series!!
for(i in 1:366){
cond = abs(periodCalibration.doy-i) <= window | abs(periodCalibration.doy-i-366) <= window
dayGroup = unique(periodCalibration.doy[cond]) #<<<------------------------- selecting days on multiple year; use unique to narrow exact day
specificDay = unique(period.doy[period.doy==i]) #<<------- full series!!
obs = obs_[periodCalibration.doy%in% dayGroup]#observed
gcm = calibrate[periodCalibration.doy%in% dayGroup]#model
gcmSP = correcting[period.doy%in% specificDay] #<<------- full series!!
# qqplot(gcm,obs)
gcmECDF = ecdf(gcm)
p = gcmECDF(gcmSP) #<<------- full series!!
correctTerm = rep(NA,length(p)) #<<------- full series!!
for(j in 1:length(p)){
correctTerm[j] = quantile(obs,p[j],na.rm=T)- gcmSP[j] #<<----
}#j
newGCM[period.doy==i] = newGCM[period.doy==i] + correctTerm #<<------- full series!!
}#i
return <- newGCM
}# function
monthlyQQBiasCorrection = function(obs_,calibrate,obsDate, correcting, correctingDate){
}#
dailyMovingMean = function(ts, ts_date){
ts_new = rep(NA,length(ts))
jday = as.POSIXlt(ts_date)$yday+1
yyyy = as.POSIXlt(ts_date)$year+1900
for(i in 1:length(ts_new)){
jtoday = jday[i]
ytoday = yyyy[i]
cond = (abs(jday-jtoday) <=15 | abs(jday-jtoday-366) <=15) & yyyy==ytoday
if(sum(cond)==0){
print(paste('problem at ', ts_date[i]))
ts_new[i] = ts[i]
}else if(sum(cond)==1){
print(paste('one at ', ts_date[i]))
ts_new[i] = ts[i]
}else{
ts_new[i] = mean(ts[cond])
}
}#i
return <- ts_new
}#function
seasonalMovingMean = function(ts, ts_date){
jday = as.POSIXlt(ts_date)$yday+1
yyyy = as.POSIXlt(ts_date)$year+1900
centralMonthJday = c(15,46,74,105,135,166,196,227,258,288,319,349) # assume no leap year from J to D
seasonal = matrix(NA,12,4)
for(i in 1:12){
jtoday = centralMonthJday[i]
cond = (abs(jday-jtoday) <=21 | abs(jday-jtoday-366) <=21)
if(sum(cond)==0){
print(paste('problem at ', ts_date[i]))
seasonal[i,] = c(i,NA,NA,NA,NA)
}else if(sum(cond)<3){
print(paste('less than 3 at ', ts_date[i]))
seasonal[i,] = c(i,mean(ts[cond],na.rm=T),mean(ts[cond],na.rm=T),mean(ts[cond],na.rm=T))
}else{
seasonal[i,] = c(
i,
mean(ts[cond],na.rm=T),
quantile(ts[cond], probs=c(0.025),na.rm=T),
quantile(ts[cond], probs=c(0.975),na.rm=T)
)
}
}#i
colnames(seasonal)=c('month','mean','q025','q975')
return <- seasonal
}#function
dailyTimeSeries = function(x){
key_month_len = matrix(c(31,29,31,30,31,30,31,31,30,31,30,31,31,28,31,30,31,30,31,31,30,31,30,31,31,28,31,30,31,30,31,31,30,31,30,31,31,28,31,30,31,30,31,31,30,31,30,31),nrow=4,ncol=12, byrow=T) #non leap year
# assume x = Date
firstDay = wateryearStartDate(x[1])
lastDay = wateryearEndDate(x[length(x)])
PerfectX = seq.Date(from=firstDay, to=lastDay ,by="day")
Perfectdd = as.numeric(format(PerfectX,"%d"))
Perfectmm = as.numeric(format(PerfectX,"%m"))
Perfectyyyy = as.numeric(format(PerfectX,"%Y"))
tmp=match2DailyTimeSeries(x, PerfectX)
xMatch=tmp$xSelect
PerfectXMatch=tmp$ySelect
Perfectgrp_wateryearYYYY = getYear(firstDay):(getYear(lastDay)-1)
Perfectgrp_wateryearLen = rep(365,length(Perfectgrp_wateryearYYYY)) + LIBbinaryRev((Perfectgrp_wateryearYYYY+1)%%4)
Perfectgrp_wateryear = LIBrep(1:length(Perfectgrp_wateryearLen), each= Perfectgrp_wateryearLen)
yearMatch = grpSums(PerfectXMatch,Perfectgrp_wateryear)
Perfectgrp_dayYYYY = Perfectyyyy
Perfectgrp_dayWY = Perfectgrp_wateryearYYYY[Perfectgrp_wateryear]
#ithdayisendWY = accumulate(table(Perfectgrp_dayWY)) #ith day is the end of WY <<--------- need imperfect
ithdayisendWY = accumulate(yearMatch[yearMatch>0])
ithdayisendWYlbl = Perfectgrp_dayWY[ithdayisendWY]
ithdayisbeginWY = c(1,ithdayisendWY[1:(length(ithdayisendWY)-1)]+1)
ithdayisbeginWYlbl= Perfectgrp_dayWY[ithdayisbeginWY]
ithdayisendYYYY = accumulate(table(Perfectgrp_dayYYYY)) #ith day is the end of WY
ithdayisendYYYYlbl = Perfectgrp_dayYYYY[ithdayisendYYYY]
ithdayisbeginYYYY = c(1, ithdayisendYYYY[1:(length(ithdayisendYYYY)-1)]+1)
ithdayisbeginYYYYlbl = Perfectgrp_dayYYYY[ithdayisbeginYYYY]
Perfectweekday = as.POSIXlt(PerfectX)$wday
sunStart = which(Perfectweekday ==0)
numWeek = length(sunStart);
if(sunStart[1]>1){numWeek= numWeek+1}
Perfectgrp_week = rep(numWeek,length(PerfectX))
for(i in length(sunStart):1){
numWeek = numWeek-1
if(sunStart[i]>1){Perfectgrp_week[1:(sunStart[i]-1)] = numWeek}
}#i
#Perfectgrp_weekLen = what=grpSums(rep(1,length(PerfectX)), Perfectgrp_week)
Perfectgrp_weekMM = c(Perfectmm[sunStart[1]-1],Perfectmm[sunStart])
Perfectgrp_weekYYYY = c(Perfectyyyy[sunStart[1]-1],Perfectyyyy[sunStart])
Perfectgrp_weekWY= Perfectgrp_wateryearYYYY[c(Perfectgrp_wateryear[sunStart[1]-1], Perfectgrp_wateryear[sunStart])]
weekMatch=grpSums(PerfectXMatch, Perfectgrp_week)
Perfectgrp_weekTH= LIBfirstEachGrpX(Perfectgrp_week,grpSums(rep(1,length(PerfectX)), Perfectgrp_week) )$lbl
ithweekisendWY = accumulate(table(Perfectgrp_weekWY)) #ith week is the end of WY
ithweekisendWYlbl = Perfectgrp_weekWY[ithweekisendWY]
ithweekisbeginWY = c(1,ithweekisendWY[1:(length(ithweekisendWY)-1)]+1)
ithweekisbeginWYlbl= Perfectgrp_weekWY[ithweekisbeginWY]
ithweekisendYYYY = accumulate(table(Perfectgrp_weekYYYY)) #ith week is the end of WY
ithweekisendYYYYlbl = Perfectgrp_weekYYYY[ithweekisendYYYY]
ithweekisbeginYYYY = c(1, ithweekisendYYYY[1:(length(ithweekisendYYYY)-1)]+1)
ithweekisbeginYYYYlbl = Perfectgrp_weekYYYY[ithweekisbeginYYYY]
firstStart = which(Perfectdd == 1)
Perfectgrp_month = rep(NA,length(PerfectX))
count=0
for(i in 2:length(firstStart)){
count=count+1
Perfectgrp_month[firstStart[i-1]:(firstStart[i]-1)] = count
}#i
count=count+1
Perfectgrp_month[firstStart[i]:length(Perfectgrp_month)]=count
Perfectgrp_monthMM = Perfectmm[firstStart]
Perfectgrp_monthYYYY = Perfectyyyy[firstStart]
Perfectgrp_monthWY = Perfectgrp_wateryearYYYY[Perfectgrp_wateryear[firstStart]]
monthMatch=grpSums(PerfectXMatch, Perfectgrp_month)
Perfectgrp_monthTH= LIBfirstEachGrpX(Perfectgrp_month,grpSums(rep(1,length(PerfectX)), Perfectgrp_month) )$lbl
ithmonthisendWY = accumulate(table(Perfectgrp_monthWY)) #ith month is the end of WY
ithmonthisendWYlbl = Perfectgrp_monthWY[ithmonthisendWY]
ithmonthisbeginWY = c(1, ithmonthisendWY[1:(length(ithmonthisendWY)-1)]+1)
ithmonthisbeginWYlbl = Perfectgrp_monthWY[ithmonthisbeginWY]
ithmonthisendYYYY = accumulate(table(Perfectgrp_monthYYYY)) #ith month is the end of WY
ithmonthisendYYYYlbl = Perfectgrp_monthYYYY[ithmonthisendYYYY]
ithmonthisbeginYYYY = c(1, ithmonthisendYYYY[1:(length(ithmonthisendYYYY)-1)]+1)
ithmonthisbeginYYYYlbl = Perfectgrp_monthYYYY[ithmonthisbeginYYYY]
## returned result is discontinuous because the original series is discontinuous
##
return <- list(
grp_wateryear= Perfectgrp_wateryear[PerfectXMatch], #data length
grp_wateryearDefaultLen = Perfectgrp_wateryearLen[yearMatch>0],
grp_wateryearLen = yearMatch[yearMatch>0],
grp_wateryearYYYY = Perfectgrp_wateryearYYYY[yearMatch>0],
ithdayisendWY = ithdayisendWY,
ithdayisendWYlbl= ithdayisendWYlbl,
ithdayisbeginWY = ithdayisbeginWY,
ithdayisbeginWYlbl = ithdayisbeginWYlbl,
ithdayisendYYYY = ithdayisendYYYY,
ithdayisendYYYYlbl= ithdayisendYYYYlbl,
ithdayisbeginYYYY = ithdayisbeginYYYY,
ithdayisbeginYYYYlbl = ithdayisbeginYYYYlbl,
weekday = Perfectweekday[PerfectXMatch],
grp_week = Perfectgrp_week[PerfectXMatch],
grp_weekLen = weekMatch[weekMatch>0],
grp_weekMM = Perfectgrp_weekMM[weekMatch>0],
grp_weekYYYY = Perfectgrp_weekYYYY[weekMatch>0],
grp_weekWY = Perfectgrp_weekWY[weekMatch>0],
grp_weekTH = Perfectgrp_weekTH[weekMatch>0], #showing these discontinuous weeks in the continuous time line
ithweekisendWY = ithweekisendWY,
ithweekisendWYlbl= ithweekisendWYlbl,
ithweekisbeginWY = ithweekisbeginWY,
ithweekisbeginWYlbl = ithweekisbeginWYlbl,
ithweekisendYYYY = ithweekisendYYYY,
ithweekisendYYYYlbl= ithweekisendYYYYlbl,
ithweekisbeginYYYY = ithweekisbeginYYYY,
ithweekisbeginYYYYlbl = ithweekisbeginYYYYlbl,
grp_month = Perfectgrp_month[PerfectXMatch],
grp_monthLen = monthMatch[monthMatch>0],
grp_monthDefaultLen = grpSums(rep(1,length(PerfectX)), Perfectgrp_month)[monthMatch>0],
grp_monthMM = Perfectgrp_monthMM[monthMatch>0],
grp_monthYYYY = Perfectgrp_monthYYYY[monthMatch>0],
grp_monthWY = Perfectgrp_monthWY[monthMatch>0],
grp_monthTH = Perfectgrp_monthTH[monthMatch>0],
ithmonthisendWY = ithmonthisendWY,
ithmonthisendWYlbl= ithmonthisendWYlbl,
ithmonthisbeginWY = ithmonthisbeginWY,
ithmonthisbeginWYlbl= ithmonthisbeginWYlbl,
ithmonthisendYYYY = ithmonthisendYYYY,
ithmonthisendYYYYlbl= ithmonthisendYYYYlbl,
ithmonthisbeginYYYY = ithmonthisbeginYYYY,
ithmonthisbeginYYYYlbl= ithmonthisbeginYYYYlbl
)
}
|
95b2d8d9ae41ead10c3ef41702e06a9974198788
|
770b14ae44e4991d444f0a0b1af124396bf2960f
|
/pkg/R/cases.R
|
beb0d0741b637a508e8a9b3339065e5253878f60
|
[] |
no_license
|
melff/memisc
|
db5e2d685e44f3e2f2fa3d50e0986c1131a1448c
|
b5b03f75e6fe311911a552041ff5c573bb3515df
|
refs/heads/master
| 2023-07-24T19:44:10.092063
| 2023-07-07T23:09:11
| 2023-07-07T23:09:11
| 29,761,100
| 40
| 10
| null | 2022-08-19T19:19:13
| 2015-01-24T01:21:55
|
R
|
UTF-8
|
R
| false
| false
| 5,624
|
r
|
cases.R
|
cases <- function(...,check.xor=c("warn","stop","ignore"),
.default=NA,.complete=FALSE,
check.na=c("warn","stop","ignore"),
na.rm=TRUE){
subst <- match.call(expand.dots=FALSE)$...
if(!missing(check.xor))
if(is.logical(check.xor))
check.xor <- ifelse(check.xor,"stop","ignore")
else
check.xor <- as.character(check.xor)
if(!missing(check.na))
if(is.logical(check.na))
check.na <- ifelse(check.na,"stop","ignore")
else
check.na <- as.character(check.na)
check.xor <- match.arg(check.xor)
check.na <- match.arg(check.na)
deflabels <- sapply(subst,deparse)
if(length(subst)<2) stop("need at least two conditions")
have.arrows <- sapply(subst,length) > 1
have.arrows[have.arrows] <- have.arrows[have.arrows] & sapply(sapply(subst[have.arrows],"[[",1),paste)=="<-"
parent <- parent.frame()
if(all(have.arrows)){
cond.names <- names(subst)
conditions <- lapply(subst,"[[",3)
values <- lapply(subst,"[[",2)
conditions <- do.call(cbind,lapply(conditions,eval,envir=parent))
if(ncol(conditions)!=length(subst)) stop("at least one condition results in NULL")
if(!is.logical(conditions)) stop("all conditions have to be logical")
if(any(is.na(conditions)) && check.na!="ignore") {
msg <- switch(check.na,warn=warning,stop=stop)
msg("At least one logical condition results in missing values")
}
na.cond <- is.na(conditions)
na.count <- rowSums(na.cond)
any.na <- na.count > 0
all.na <- na.count == ncol(conditions)
done <- rowSums(conditions, na.rm=TRUE)
if((any(done > 1) || any(done >= 1 & any.na)) && check.xor!="ignore") {
msg <- switch(check.xor,warn=warning,stop=stop)
msg("Conditions are not mutually exclusive")
}
never <- colSums(conditions, na.rm=TRUE) == 0
if(any(never) && check.xor!="ignore"){
neverlab <- deflabels[never]
if(length(neverlab)==1)
warning("condition ",neverlab," is never satisfied")
else
warning("conditions ",paste(neverlab,collapse=", ")," are never satisfied")
}
values <- lapply(values,eval,envir=parent.frame(),enclos=parent.frame())
nrow <- unique(sapply(values,length))
if(length(nrow) > 1 || nrow != nrow(conditions)){
nrow <- nrow(conditions)
values <- lapply(values,function(x){
tmp <- x
length(tmp) <- nrow
tmp[] <- x
tmp
})
}
values <- do.call(cbind,values)
na_ <- as.vector(NA,mode=storage.mode(values))
res <- vector(nrow(conditions),mode=storage.mode(values))
res[] <- na_
for(i in rev(1:ncol(conditions))){
cond.i <- which(conditions[,i])
res[cond.i] <- values[cond.i,i]
}
if(any(done == 0) && !is.na(.default)){
if(length(.default) > 1) warning("only first element of '.default' used")
if(length(.default) < 1) stop("'.default' must have non-zero length")
xvalue <- .default[1]
xvalue <- as.vector(xvalue,mode=storage.mode(values))
res[done == 0 &!any.na] <- xvalue
done <- done + 1
}
if(!na.rm){
res[any.na] <- na_
}
nNA <- sum(is.na(res))
if(nNA > 0) warning(nNA," NAs created")
if(length(cond.names) && all(nzchar(cond.names))){
uq.values <- drop(unique(values))
if(length(uq.values)==length(cond.names))
labels(res) <- structure(unique(uq.values),names=cond.names)
}
res
}
else if(!any(have.arrows))
{
conditions <- cbind(...)
if(ncol(conditions)!=length(subst)) stop("at least one condition results in NULL")
if(!is.logical(conditions)) stop("all conditions have to be logical")
if(any(is.na(conditions)) && check.na!="ignore") {
msg <- switch(check.na,warn=warning,stop=stop)
msg("At least one logical condition results in missing values")
}
na.cond <- is.na(conditions)
na.count <- rowSums(na.cond)
any.na <- na.count > 0
all.na <- na.count == ncol(conditions)
codes <- 1:ncol(conditions)
labels <- colnames(conditions)
if(length(labels))
labels <- ifelse(nzchar(labels),labels,deflabels)
else labels <- deflabels
done <- rowSums(conditions,na.rm=TRUE)
if((any(done > 1) || any(done >= 1 & any.na)) && check.xor!="ignore") {
msg <- switch(check.xor,warn=warning,stop=stop)
msg("conditions are not mutually exclusive")
}
never <- colSums(conditions, na.rm=TRUE) == 0
if(any(never) && check.xor!="ignore"){
neverlab <- deflabels[never]
if(length(neverlab)==1)
warning("condition ",neverlab," is never satisfied")
else
warning("conditions ",paste(neverlab,collapse=", ")," are never satisfied")
}
res <- integer(nrow(conditions))
res[] <- NA_integer_
for(i in rev(1:ncol(conditions))){
cond.i <- which(conditions[,i])
res[cond.i] <- i
}
if(any(done == 0 & !all.na) && .complete){
nms <- names(subst)
if(length(nms)){
elevel <- paste(nms,collapse="|")
}
else {
elevel <- sapply(subst,deparse)
elevel <- paste(elevel,collapse=" | ")
}
elevel <- paste0("!(",elevel,")")
xcode <- length(codes) + 1
res[done == 0 &!any.na] <- xcode
codes <- c(codes,xcode)
labels <- c(labels,elevel)
done <- done + 1
}
if(!na.rm)
res[any.na] <- NA_integer_
nNA <- sum(is.na(res))
if(nNA > 0) warning(nNA," NAs created")
factor(res,levels=codes,labels=labels)
}
else stop("inconsistent arguments to 'cases'")
}
|
fc98f37d11bcaf0c00f8116352645e89e90b60b9
|
a82978f0e25412ac1d60a340009cbf3b5ab0c998
|
/man/dpca.var.Rd
|
c6f87486be3969cea3e8a9322e1a579748f37b05
|
[] |
no_license
|
kidzik/freqdom
|
560d8b99f46bc7f35e987d9114e7dd3aabe4fa65
|
fcbfc6958cbb3e96fb02f63acb87dac56bad4dfc
|
refs/heads/master
| 2022-05-02T22:41:58.132430
| 2022-04-18T05:48:16
| 2022-04-18T05:48:16
| 73,332,290
| 2
| 4
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,691
|
rd
|
dpca.var.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dpca.var.R
\name{dpca.var}
\alias{dpca.var}
\title{Proportion of variance explained}
\usage{
dpca.var(F)
}
\arguments{
\item{F}{\eqn{(d\times d)} spectral density matrix, provided as an object of class \code{freqdom}. To guarantee accuracy of numerical integration it is important that \code{F}\eqn{\$}\code{freq} is a dense grid of frequencies in \eqn{[-\pi,\pi]}.}
}
\value{
A \eqn{d}-dimensional vector containing the \eqn{v_\ell}.
}
\description{
Computes the proportion of variance explained by a given dynamic principal component.
}
\details{
Consider a spectral density matrix \eqn{\mathcal{F}_\omega} and let \eqn{\lambda_\ell(\omega)} by the
\eqn{\ell}-th dynamic eigenvalue. The proportion of variance described by the \eqn{\ell}-th dynamic
principal component is given as
\deqn{v_\ell:=\int_{-\pi}^\pi \lambda_\ell(\omega)d\omega/\int_{-\pi}^\pi \mathrm{tr}(\mathcal{F}_\omega)d\omega.}
This function numerically computes the vectors \eqn{(v_\ell\colon 1\leq \ell\leq d)}.
For more details we refer to Chapter 9 in Brillinger (2001), Chapter 7.8 in Shumway and Stoffer (2006)
and to Hormann et al. (2015).
}
\references{
Hormann, S., Kidzinski, L., and Hallin, M.
\emph{Dynamic functional principal components.} Journal of the Royal
Statistical Society: Series B (Statistical Methodology) 77.2 (2015): 319-348.
Brillinger, D.
\emph{Time Series} (2001), SIAM, San Francisco.
Shumway, R.H., and Stoffer, D.S.
\emph{Time Series Analysis and Its Applications} (2006), Springer, New York.
}
\seealso{
\code{\link{dpca.filters}}, \code{\link{dpca.KLexpansion}}, \code{\link{dpca.scores}}
}
\keyword{DPCA}
|
b1e923d0cfd9ab86779f7b72abe81da5f027082c
|
0c1b5c68a28405e79eb010a76725652704ba7403
|
/plot1.R
|
7aae587f9423b5b812a12003069a048dad7237f4
|
[] |
no_license
|
ChadworthMagee/Exploratory_Data_Project_2
|
d38ba436fe19803097bbdf7ad190765c4c3756a8
|
950e76f179c680683ad231cbb1c464bc59fcae92
|
refs/heads/master
| 2021-01-17T22:51:41.096103
| 2014-06-22T23:18:47
| 2014-06-22T23:18:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 289
|
r
|
plot1.R
|
if(!exists("NEI")) source("load.R")
library('plyr')
png(file="plot1.png")
# Sum data by year
emissions <- ddply(NEI, .(year), summarize, total.emissions=sum(Emissions))
plot(emissions, main="Emissions from PM[2.5] in the US", xlab="Year", ylab="Total Emissions", col="red")
dev.off()
|
4a83d7cb7a41325eb1f5552e57dc477772eae30a
|
b0594404d169215bbdf80f50cd5337872b79c426
|
/tests/testthat/test_Map.R
|
ea1e758bd1d10909770e009df85b02b26ad2a123
|
[] |
no_license
|
BioinformaticsFMRP/rols
|
6e362d171b55d552d709ea40e776b0f55864681d
|
ff0149c545b58b3abed680821cc73c0a12e44002
|
refs/heads/master
| 2021-01-18T12:53:54.530986
| 2015-12-30T18:17:29
| 2015-12-30T18:17:29
| 53,099,450
| 1
| 0
| null | 2016-03-04T02:01:54
| 2016-03-04T02:01:54
| null |
UTF-8
|
R
| false
| false
| 1,021
|
r
|
test_Map.R
|
test_that("Map class", {
map <- allIds("MS", simplify=FALSE)
char <- allIds("MS", simplify=TRUE)
char2 <- as(map, "character")
char3 <- as.character(map)
expect_identical(char, char2)
expect_identical(char, char3)
k <- key(map)
names(k) <- NULL
expect_equal(k, names(char))
v <- value(map)
names(v) <- names(char)
expect_equal(v, char)
## show -----------------------
expect_true(is.null(show(map)))
expect_true(is.null(show(new("Map"))))
expect_true(is.null(show(new("Map", .Data = map[1:2]))))
expect_true(is.null(show(new("Map", .Data = map[1]))))
## ----------------------------
xomapi <- map[[1]]
x <- as(map[[1]], "character")
y <- as.character(map[[1]])
expect_identical(x, y)
})
|
3f3a96ba56cbf1f552c1abbe97ac1de2944bc180
|
89c706327fbac52418ccda18e44d1c98bd7759e7
|
/dashboard/AppCode/shiny/server/components/noveltyChapterDifference.R
|
8c523a631101fad342d1fda90de5c81ed63894d2
|
[] |
no_license
|
rickdott/Montecristo
|
3d313f434a2a0bec167e271a0d27b2e058125ae5
|
fa66a9d6b6d0555d1e882ebec4a7d79340e2ddf6
|
refs/heads/master
| 2023-03-29T23:51:16.121366
| 2021-04-03T08:22:07
| 2021-04-03T08:22:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,738
|
r
|
noveltyChapterDifference.R
|
output[[id.visual.noveltyChapterDifference.output]] <- renderPlot({
#Make plot here
chapters <- db.tdms$find(
query = paste0('{"title": "', input[[id.general.book]], '" }'),
fields = '{"data" : true}')$data[[1]]
# Make sure there are only chapters included with more than 50 words, this is
# done to make sure a 'chapter' like: "VOLUME IV:" is not included
chapters <- chapters[, colSums(chapters) > 50]
# For every column:
# Find 50 most frequent words
# Get difference compared to last, divide by 50
diffDf <- data.frame(chapter = numeric(), diff = numeric())
frequentWords <- NA
for (i in 1:ncol(chapters)) {
frequentWords <- rownames(head(chapters[order(chapters[,i], decreasing = TRUE), i, drop = FALSE], 50))
if(!i %in% c(1, ncol(chapters))) {
difference <- length(setdiff(frequentWords, prevFrequentWords)) / 50
diffDfRow <- data.frame(chapter = i, diff = difference)
diffDf <- rbind(diffDf, diffDfRow)
}
prevFrequentWords <- frequentWords
}
ggplot(diffDf, aes(x = chapter, y = diff)) +
geom_line(colour = "#1f8897") +
labs(x = "Chapter", y = "Difference to previous chapter") +
ylim(0, 1) +
theme(
panel.background = element_rect(fill = "transparent"), # bg of the panel
plot.background = element_rect(fill = "#D9D9D9", color = NA), # bg of the plot
#panel.grid.major = element_blank(), # get rid of major grid
#panel.grid.minor = element_blank(), # get rid of minor grid
legend.background = element_rect(fill = "transparent"), # get rid of legend bg
legend.box.background = element_rect(fill = "transparent"), # get rid of legend panel bg
text = element_text(size = 16)
)
}, bg = "transparent")
|
b755e08ad6a3e203fe0209cb1b509f8f6f7b4e05
|
c5de5d072f5099e7f13b94bf2c81975582788459
|
/R Extension/RMG/Utilities/Interfaces/DivideAndConquer/man/finalize.Rd
|
b62d6b48fa0e86af643e86c12c6254f79af4fd74
|
[] |
no_license
|
uhasan1/QLExtension-backup
|
e125ad6e3f20451dfa593284507c493a6fd66bb8
|
2bea9262841b07c2fb3c3495395e66e66a092035
|
refs/heads/master
| 2020-05-31T06:08:40.523979
| 2015-03-16T03:09:28
| 2015-03-16T03:09:28
| 190,136,053
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,303
|
rd
|
finalize.Rd
|
\name{finalize}
\alias{finalize}
\title{Apply a function to a bunch of files and return the results.}
\description{
Finalize operates a function on a bunch of files and returns the
results back into memory. It can be used to aggregate many smaller
files into bigger ones, or it can be used to subset data from each
input file.
Finalize is different from conquer in that it brings the results in
memory and that it operates directly on files. Each file is supposed
to fit into memory, as well as the aggregated results. It should be
seen as the last step of a sequence of conquer commands.
}
\usage{
finalize(files, FUN, agg="rbind", ...)
}
\arguments{
\item{files}{A vector of pathnames pointing to files on the
disk.}
\item{FUN}{The function that needs to be applied to each file. It
needs a named argument file=, that will be used as the file name
from the list keys.}
\item{agg}{String with the function name that should be applied to
each function output.}
\item{...}{Other arguments passed to FUN.}
}
\value{}
\author{Adrian Dragulescu}
\note{}
\seealso{}
\examples{
cFun <- function(file=file, id){
load(file) # file contains variable res
subset(res, ID == id)
}
files = c("file_1.RData", "file_2.RData")
res <- finalize(files, FUN, id=2)
}
|
9ad7ef3139444ba124054eb42e28b10d5e3e80cb
|
13bb1694b07014883228eaadfe29ca3774133006
|
/20-cellranger-edger-markers-for-gsea.R
|
ef991883c27b295610d2ca0dc6bdcc9032788a82
|
[] |
no_license
|
DevkotaLab/ha-et-al-2020-cell
|
0b8278ff040bf98ca8961ef73f5d9c752ba0013e
|
f5c5a724d5a89058b5d2b9cfac312222f15009c4
|
refs/heads/master
| 2022-12-08T15:52:27.632998
| 2020-09-14T19:35:08
| 2020-09-14T19:35:08
| 241,246,468
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
20-cellranger-edger-markers-for-gsea.R
|
## Calculate GSEA markers per cluster with edgeR for Cell Ranger all samples.
## Updated 2020-05-20.
source("_setup.R")
cellranger <- readRDS(file.path(
"rds",
"2020-05-18",
"cellranger_all_samples_seurat.rds"
))
## Set the resolution.
resolution <- import("resolution.txt", format = "lines")
Idents(cellranger) <- resolution
clusters <- levels(clusters(cellranger))
lapply(
X = clusters,
FUN = function(cluster) {
message(paste("Cluster", cluster))
out <- findMarkers(
object = cellranger,
clusters = cluster,
caller = "edgeR"
)
assignAndSaveData(
name = paste0("cellranger_edger_cluster_", cluster, "_markers"),
object = out
)
}
)
|
2012cca0b2cd5738e5781b51578a6babcc149973
|
33e7d36bac9b7db5c6da8396e7d0b5f0033593b7
|
/figure/Plot4.R
|
429bcdce5ec04445c78c35c6053a3536eaa5060b
|
[] |
no_license
|
18912810609/ExData_Plotting1
|
19c998147ff75eb2e41954619466eaf64c5aa85a
|
f540170a322ef6b89fded4e85c696d3bb1fea746
|
refs/heads/master
| 2022-11-10T06:51:22.456583
| 2020-06-16T18:57:31
| 2020-06-16T18:57:31
| 272,779,701
| 0
| 0
| null | 2020-06-16T18:10:57
| 2020-06-16T18:10:56
| null |
UTF-8
|
R
| false
| false
| 1,141
|
r
|
Plot4.R
|
power<- data.table::fread(input = "household_power_consumption.txt",na.strings="?")
power$Date<-as.Date(power$Date,"%d/%m/%Y")
power4<- power[(power$Date>= "2007-02-01") & (power$Date<= "2007-02-02")]
power4$DateTime<-paste(power4$Date,power4$Time)
power4$DateTime<-as.POSIXct(power4$DateTime,"%d/%m/%Y %H:%M:%S")
png("plot4.png", width=480, height=480)
par(mfrow=c(2,2))
hist(power4[, Global_active_power], main="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency", col="Red")
plot(x = power4[, DateTime], y = power4[, Global_active_power], type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(power4[, DateTime], power4[, Sub_metering_1], type="l", xlab="", ylab="Energy sub metering")
lines(power4[, DateTime], power4[, Sub_metering_2],col="red")
lines(power4[, DateTime], power4[, Sub_metering_3],col="blue")
legend("topright", col=c("black","red","blue"),
c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 ")
,lty=c(1,1), lwd=c(1,1))
plot(power4[, DateTime], power4[,Global_reactive_power], type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
a54c317cac29b111cbf368c4bb02956aa7b9c62a
|
5c714607243a555a69eed41aba0b53f8747cd495
|
/papillary/feature_sel/fcorr.R
|
d1124a33b4089cb7bcb1130f6ada275ffaa5bb26
|
[] |
no_license
|
xulijunji/Stage-Prediction-of-Cancer
|
5a7912d66f92e73deacedd12446490b79c78cea8
|
4924468fa35a504c991fdd0051174d0489d3ff21
|
refs/heads/master
| 2021-05-06T06:45:57.242407
| 2017-12-11T09:26:20
| 2017-12-11T09:26:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,182
|
r
|
fcorr.R
|
library(Biocomb)
library(class)
library(randomForest)
library(e1071)
library(DESeq2)
source('stability_gene_selection/helper_stable.R')
source('../function.R')
load('environment/stages_levels.RData')
load('environment/diff_genes.RData')
load('environment/stage.index.RData')
load('environment/stages.level.comb.RData')
load('environment/dds_tumor_reported_normal_stage.RData')
load('environment/sample_info_tumor_rep_normal.RData')
load('environment/vs_normal_tumor_repored.RData')
load('environment/filter_based_genes.RData_based.RData')
vs.normal.tumor.reported <-
varianceStabilizingTransformation(dds_tumor_reported_normal_stage)
genes.most.varying <- rownames(vs.normal.tumor.reported)[get.imp.genes(3,
assay(vs.normal.tumor.reported), 10000)]
sum(rownames(sample.info.all.rep) == colnames(vs.normal.tumor.reported)) == length(rownames(sample.info.all.rep))
indexes.to.remove <- which(sample.info.all.rep$type == 'N')
vs.normal.tumor.reported.varying <- t(assay(vs.normal.tumor.reported)[genes.most.varying,
-indexes.to.remove])
sum(rownames(vs.normal.tumor.reported.varying) == sample.info.all.rep$sample.names[sample.info.all.rep$type == 'T'])
filter.based.genes = list()
filter.based.genes[['fast_filter']] <- select.fast.filter(cbind(vs.normal.tumor.reported.varying, stages.levels),
disc.method = 'MDL', threshold = 0.05)
filter.based.genes[['forw.Corr']] <- select.forward.Corr(cbind(vs.normal.tumor.reported.varying,
stages.levels), disc.method = 'MDL')
filter.based.genes[['relief']] <- select.relief(as.data.frame(cbind(vs.normal.tumor.reported.varying,
stages.levels)))
filter.based.genes[['forw_Wrap']] <- select.forward.wrapper(as.data.frame(cbind(vs.normal.tumor.reported.varying,
stages.levels)))
data.fpqm.filter = req.dfs$fpqm
data.fpqm.filter.diff1 <- data.fpqm.filter[,diff.genes[[1]]]
data.fpqm.filter$class = stages.levels
typeof(as.data.frame(data.fpqm.filter))
filter.based.genes[['cfs']] = select.cfs(data.fpqm.filter.diff)
filter.based.genes[['ffilter']] = select.fast.filter(data.fpqm.filter, 'MDL', 0.1,numeric())
filter.based.genes[['ffilter']] = select.fast.filter(data.fpqm.filter.diff, 'MDL', 0.05,numeric())
filter.based.genes[['fcor']] = select.forward.Corr(data.fpqm.filter.diff1, 'MDL', numeric())
save(filter.based.genes, file = 'environment/filter_based_genes.RData')
load('environment/filter_based_genes.RData')
intersect(filter.based.genes$cfs$Biomarker, filter.based.genes$ffilter$Biomarker)
pred.knn.fpqm.fastcorr <- knn.cv(data.fpqm.filter[,filter.based.genes$ffilter$Biomarker[1:76]], cl = stages.levels, k = 3)
create.mat.error(table(stages.levels, pred.knn.fpqm.fastcorr))
pred.knn.fpqm.cfs <- knn.cv(data.fpqm.filter[,filter.based.genes$cfs$Biomarker], cl = stages.levels, k = 5)
create.mat.error(table(stages.levels, pred.knn.fpqm.cfs))
rf.fpqm.fastcorr <- randomForest(x = data.fpqm.filter[,filter.based.genes$ffilter$Biomarker],
y = stages.levels, strata = stages.levels, sampsize = c(20,15,10,15))
rf.fpqm.fastcorr$confusion
rf.fpqm.cfs <- randomForest(x = data.fpqm.filter[,filter.based.genes$cfs$Biomarker],
y = stages.levels, strata = stages.levels, sampsize = c(20,15,10,15))
rf.fpqm.cfs$confusion
svm.fastcorr <- cv.svm.leave.one.out(data.fpqm.filter[,filter.based.genes$ffilter$Biomarker], stages.levels,
cost = 1, class.weights = c('stage i' = 1, 'stage ii' = 5, 'stage iii' = 3, 'stage iv' = 7))
svm.cfs <- cv.svm.leave.one.out(data.fpqm.filter[,filter.based.genes$cfs$Biomarker], stages.levels)
svm.cfs
svm.fastcorr <- cv.svm.leave.one.out(data.fpqm.filter[,filter.based.genes$ffilter$Biomarker], stages.levels)
svm.fastcorr
|
e4dc136076fc92bc46fea53c44bfa125ad5adbc3
|
d67abff5ef203490361b42f8fdebbb6c44738964
|
/trigger-model-development/drought/skill-assessment/DMP-VCI-Analysis/dmp_vci_comparison/extract_dmp.R
|
b0f715351b56cf82092fe2171e8c41d738f54cd6
|
[] |
no_license
|
lvanbrussel/IBF-system
|
da63cfed5416f07e10effe9314046802a6bd6746
|
3cc18825765ba81c92260d72b6f8c5cf9244ff2c
|
refs/heads/master
| 2023-03-17T00:52:38.488305
| 2021-03-02T16:23:07
| 2021-03-02T16:23:07
| 282,157,570
| 0
| 0
| null | 2020-07-24T07:50:49
| 2020-07-24T07:50:48
| null |
UTF-8
|
R
| false
| false
| 1,490
|
r
|
extract_dmp.R
|
library(raster)
library(sf)
library(tidyverse)
library(velox)
library(stringr)
library(lubridate)
# install.packages("velox_0.2.0.tar.gz", repos = NULL, type="source")
# Because velox$extract() does not take extra arguments we have to write a custom mean no na function
mean_no_na <- function(x) {
mean(x, na.rm=T)
}
kenya_admin1 <- st_read("admin_shapes/KEN_adm1_mapshaper_corrected.shp")
raster_folders <- setdiff(list.dirs("all_dmp/", full.names=F), "")
dmp_dfs <- list()
for (raster_folder in raster_folders) {
print(paste0("Calculating raster ", raster_folder, " time ", Sys.time()))
if (length(grep("_RT", raster_folder)) != 0) {
rt_type <- str_sub(raster_folder, 5, 7)
filename <- paste0("all_dmp/", raster_folder, "/c_gls_DMP-", rt_type,"_QL_", str_replace(raster_folder, paste0("DMP_", rt_type, "_"), ""), ".tiff")
} else {
filename <- paste0("all_dmp/", raster_folder, "/c_gls_DMP_QL_", str_replace(raster_folder, "DMP_", ""), ".tiff")
}
dmp_raster <- raster(filename)
dmp_velox <- velox(dmp_raster)
dmp_values <- dmp_velox$extract(kenya_admin1, fun=mean_no_na)
raster_date <- str_sub(raster_folder, 5, 12)
dmp_df <- tibble(
pcode = kenya_admin1$pcode_le_1,
date = as_date(raster_date),
dmp = dmp_values
)
write.csv(dmp_df, paste0("results/", raster_date, ".csv"), row.names=F)
dmp_dfs[[raster_folder]] <- dmp_df
}
# Combining files
all_dmps <- bind_rows(dmp_dfs)
write.csv(all_dmps, "results/all_dmp.csv", row.names=F)
|
d56ac323c07c4060e5823da8dfdfbc2d27929539
|
83dcb30cb3fde868a5a7be13db3a2cd2dcc9d1f4
|
/R/SES_life.R
|
de5d8a0ef1862e80a620a70ad388f0ea60df062e
|
[] |
no_license
|
markocherrie/LifecourseEpi
|
50f72006206ba8c1881354917682230860b9d7b3
|
aa835bd8946a5000404c1e7eea7859c117c0552e
|
refs/heads/master
| 2021-01-22T03:54:07.923354
| 2017-02-09T17:10:06
| 2017-02-09T17:10:06
| 81,472,911
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,729
|
r
|
SES_life.R
|
#' Historical Area-level Socioeconomic Deprivation for Edinburgh
#'
#' Derives an estimate of area-level socioeconomic deprivation for any Edinburgh location from 1926-2015.
#' @param Lat has to be within Edinburgh boundary
#' @param Long has to be within Edinburgh boundary
#' @param Year has to be between 1926-2015
#' @param Polygon is the deprivation index shapefile downloaded from https://markcherrie.shinyapps.io/MMPgeodata/
#' @keywords life course, Edinburgh, historical index of deprivation
#' @export
#' @examples
#' SES_life(55.9533, -3.1883, "ME1", 1948, Polygon)
#'
# Polygon WILL BE SUPPLIED ON THE GEOPORTAL
# Function uses Latitude and Longitude of participant's address, the LBC code, and the Year (Year) they were there
SES_life= function(Lat, Long, Id, Year, Polygon){
# make the points a spatial object
coords = cbind(Long, Lat)
sp = SpatialPoints(coords)
proj4string(sp) = CRS('+proj=LongLat +datum=WGS84')
Edinburgh_bng = spTransform(sp, CRS('+proj=tmerc +Lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +ellps=airy +datum=OSGB36 +units=m +no_defs'))
# Get SES per decade
if (Year>=1926 & Year<=1935) {
Polygon <- spTransform(Polygon, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Polygon[41] , fn = NULL)
} else if (Year>=1936 & Year<=1945) {
Polygon <- spTransform(Polygon, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Polygon[42] , fn = NULL)
} else if (Year>=1946 & Year<=1955) {
Polygon <- spTransform(Polygon, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Polygon[43] , fn = NULL)
} else if (Year>=1956 & Year<=1965) {
Polygon <- spTransform(Polygon, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Polygon[44] , fn = NULL)
} else if (Year>=1966 & Year<=1975) {
Polygon <- spTransform(Polygon, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Polygon[45] , fn = NULL)
} else if (Year>=1976 & Year<=1985) {
Boundary1981 <- spTransform(Boundary1981, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Boundary1981[3] , fn = NULL)
} else if (Year>=1986 & Year<=1995) {
Boundary1991 <- spTransform(Boundary1991, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Boundary1991[4] , fn = NULL)
} else if (Year>=1996 & Year<=2005) {
Boundary2001 <- spTransform(Boundary2001, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Boundary2001[12] , fn = NULL)
} else if (Year>=2006 & Year<=2015) {
Boundary2011 <- spTransform(Boundary2011, proj4string(Edinburgh_bng))
SES<-over(Edinburgh_bng , Boundary2011[20] , fn = NULL)
} else
SES<-NA
# Data output and index creation
SESexposure <-data.frame(Id, Year, SES)
return(SESexposure)
}
|
054356d49130aff40acaad2cf583d15e2f1302e7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PASWR2/examples/RAT.Rd.R
|
998db93cb257da3e9358a36159ab3de57d1ee12c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 305
|
r
|
RAT.Rd.R
|
library(PASWR2)
### Name: RAT
### Title: Rat Survival Time
### Aliases: RAT
### Keywords: datasets
### ** Examples
ggplot(data = RAT, aes(sample = survival.time)) + stat_qq()
ggplot(data = RAT, aes(x = survival.time)) + geom_density(alpha = 0.2, fill = "blue") +
labs(x = "Survival time in weeks")
|
b5982f3ae9f64034f3a43a95f5b9a1fce088f17a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/rstiefel/R/rbmf.vector.gibbs.R
|
74f0305398aec73ef960894fa00c885efc88b8f4
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 286
|
r
|
rbmf.vector.gibbs.R
|
rbmf.vector.gibbs <-
function(A,c,x)
{
#simulate from the vector bmf distribution as described in Hoff(2009)
#this is one Gibbs step, and must be used iteratively
evdA<-eigen(A)
E<-evdA$vec
l<-evdA$val
y<-t(E)%*%x
d<-t(E)%*%c
x<-E%*%ry_bmf(y,l,d)
x/sqrt(sum(x^2))
}
|
b09c87170ae4c2f99ac26b489a1447da3c7a9e23
|
68ab7ff66e331acb0a0721a37c0fa9da5b9b1e57
|
/inst/Filtering_snow_height.R
|
0dd5bb5e4ba882dd156b9bc2cdd78157576c226c
|
[] |
no_license
|
EURAC-Ecohydro/SnowSeasonAnalysis
|
240bbd9c3616d4e959b51d88c5672c6ebc4717cd
|
2a08475c2d79c183bcf76a13531dcd14ce2c4152
|
refs/heads/master
| 2021-01-02T08:16:28.539577
| 2020-11-30T22:39:07
| 2020-11-30T22:39:07
| 98,980,841
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,301
|
r
|
Filtering_snow_height.R
|
#-------------------------------------------------------------------------------------------------------------------------------------------------------
# File Title: Filtering_snow_height.R
# TITLE: Analyze and filter snow height signal
# Autor: Christian Brida
# Institute for Alpine Environment
# Data: 11/04/2017
# Version: 1.0
#
#------------------------------------------------------------------------------------------------------------------------------------------------------
Sys.setenv(TZ='Etc/GMT-1')
if(!require("zoo")){
install.packages(zoo)
require("zoo")
}
if(!require("signal")){
install.packages(signal)
require("signal")
}
# ~~~~~~ Section 1 ~~~~~~
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Define your Git folder:
#------------------------------------------------------------------------------------------------------------------------------------------------------
# ====== INPUT 1 ======
git_folder=getwd()
#git_folder="C:/Users/CBrida/Desktop/Git/EURAC-Ecohydro/SnowSeasonAnalysis/"
# =====================
#------------------------------------------------------------------------------------------------------------------------------------------------------
# Show data available
#------------------------------------------------------------------------------------------------------------------------------------------------------
path <- paste(git_folder,"/data/Input_data/",sep = "")
files_available=dir(path)
print(paste("Example data:",files_available))
# ====== INPUT 2-3 ======
# 1. file with snow heigth time series
file="B3_2000m_TOTAL.csv"
# label of snow height column
SNOW_HEIGHT = "Snow_Height"
# List here also the support files
# 2. file with max and min range for snow height
# Set up parameters in table: "H:/Projekte/Criomon/06_Workspace/BrC/Cryomon/03_R_Script/05_snow_filter/function/Support files/Range_min_max.csv"
Range_min_max <- paste(git_folder,"/data/Support_files/Range_min_max.csv",sep = "")
# 3. file with Values that have high increment or high decrement are considered outliers and substitute with 'NA'
# Set up parameters in table: "H:/Projekte/Criomon/06_Workspace/BrC/Cryomon/03_R_Script/05_snow_filter/function/Support_files/Rate_min_max.csv"
Rate_min_max <- paste(git_folder,"/data/Support_files/Rate_min_max.csv",sep = "")
# 4. file with snow depth observations used for calibration of snow heigth sensor
# it is supposed to end wiht the same name of the input meteo file: Snow_Depth_Calibration_B3_2000m_TOTAL.csv
folder_surveys=paste(git_folder,"/data/Snow_Depth_Calibration/Snow_Depth_Calibration_",sep = "")
# =======================
# ====== METHOD ======
# Select one of smoothing method
SMOOTH_METHOD = "Savitzky_Golay"
# Options (copy one of this string in SMOOTH_METOD)
# 1. "Moving_Average"
# 2. "Savitzky_Golay"
# ~~~~~~ Section 2 ~~~~~~
#- READ DATA --------------------------------
#
# 1.Read data from folder. 2.Convert values from chraracter to numeric. 3.Return a zoo time series of numeric data
# Import functions to read data
# source("H:/Projekte/Criomon/06_Workspace/BrC/Cryomon/03_R_Script/05_snow_filter/function/fun_read_data_metadata.R")
source(paste(git_folder,"/R/fhs_read_data_metadata.R",sep = ""))
# Import data and metadata using funcions loaded before
zoo_data=fun_read_data(PATH = path,FILE = file)
snow = zoo_data[,which(colnames(zoo_data)==SNOW_HEIGHT)]
#-------------------------------------------
# ~~~~~~ Section 3 ~~~~~~
#- CALIBRATION -----------------------------
# Import functions to calibrate HS
source(paste(git_folder,"/R/fhs_calibration_HS_2.R",sep = ""))
# snow_elab = data_no_outliers[,which(colnames(zoo_data)==SNOW_HEIGHT)]
# Calibration of HS using real and virtual snow surveys (we assume that at the end of season the snow height is 0 cm)
data_calibr=fun_calibration_HS_2(DATA = snow,FILE_NAME = file,PATH_SURVEYS = folder_surveys)
# Gaps are filled with contant value (the last befor gap)
# data_calibr=na.locf(data_calibr,na.rm=F)
data_calibr=na.fill(object = data_calibr,fill = "extend")
zero=zoo(seq(1,length(data_calibr),by=1),order.by = index(data_calibr))
zoo_calibr=merge(data_calibr,zero)
colnames(zoo_calibr)=c(SNOW_HEIGHT, "zero")
#-------------------------------------------
# ~~~~~~ Section 4 ~~~~~~
#- EXCLUDE DATA OUT OF RANGE ---------------
#
# 1.Exctract selected variable from input data.
# 2.Values out of physical range are considered outliers and substitute with 'NA'
# Import function to delete outliers (Range)
source(paste(git_folder,"/R/fhs_range.R",sep = ""))
# Exclude HS data out of range min/max set. Units: m
data_in_range=fun_range(DATA = zoo_calibr,VARIABLE = SNOW_HEIGHT, RANGE = Range_min_max)
# Gap are filled with contant value (the last befor gap)
# data_in_range=na.locf(data_in_range,na.rm=F)
data_in_range=na.fill(object = data_in_range,fill = "extend")
#-------------------------------------------
#- EXCLUDE DATA WITH RAPID INCREASE/DECREASE ------
# 1.Exctract selected variable from input data.
# 2.Values that have high increment or high decrement are considered outliers and substitute with 'NA'
# Import function to delete outliers (Rate)
source(paste(git_folder,"/R/fhs_rate.R",sep = ""))
# Exclude HS data with high increse and high decrease (Comai thesis). Units: m/h
data_no_outliers=fun_rate(DATA = data_in_range,VARIABLE = SNOW_HEIGHT, RATE = Rate_min_max)
# Gap are filled with contant value (the last befor gap)
# data_no_outliers=na.locf(data_no_outliers,na.rm=F)
data_no_outliers=na.fill(object = data_no_outliers,fill = "extend")
#-------------------------------------------
# ~~~~~~ Section 5 ~~~~~~
# ==== OPTION 1: MOVING AVERAGE FILTER ====
if(SMOOTH_METHOD == "Moving_Average"){
#- MOVING AVERAGE ---------------------------
# 1.Run a moving average on data selected with a window lenght set up as function argument
# Important: time series should not contains NA values
# Import function for a moving average
source(paste(git_folder,"/R/fhs_moving_average.R",sep = ""))
# Apply a moving average with a window length of 5 (Mair et.al.). Units: h
data_ma=fun_moving_average(DATA = data_calibr, PERIOD_LENGTH = 5)
# Gaps are filled with contant value (the last befor gap)
# data_ma=na.locf(data_ma,na.rm=F)
data_ma=na.fill(object = data_ma,fill = "extend")
data_smooth=data_ma # <- OPTION 1
}
#-------------------------------------------
# ==== OPTION 2: SAVITKY-GOLAY FILTER ====
if(SMOOTH_METHOD == "Savitzky_Golay" ){
#- SAVITKY-GOLAY FILTER ---------------------------
# Apply a savitzky golay filter (better compared with Moving average) to reduce signal noise.
# Suggest to set FILTER_ORDER = 1 and FILTER_LENGTH = 9
# Help: sgolay (signal) on https://cran.r-project.org/web/packages/signal/signal.pdf
# Important: time series should not contains NA values
# Import function for savitzky golay filter
source(paste(git_folder,"/R/fhs_savitzky_golay_filter.R",sep = ""))
# Apply a savitzky golay filter with FILTER_ORDER = 1 and FILTER_LENGTH = 9. Units: h
data_filt=fun_savitzky_golay(DATA = data_calibr, FILTER_ORDER = 1,FILTER_LENGTH = 9)
# Gap are filled with contant value (the last befor gap)
# data_filt=na.locf(data_filt,na.rm=F)
data_filt=na.fill(object = data_filt,fill = "extend")
data_smooth=data_filt # <- OPTION 2
}
#-------------------------------------------
if(SMOOTH_METHOD != "Moving_Average" & SMOOTH_METHOD != "Savitzky_Golay"){
stop(paste("SMOOTH_METHOD:",SMOOTH_METHOD,"incorrect! Please select one of the options! The selction must be under quotation marks" ))
}else{
#- OUTLIERS ON FILTERED DATA -------------------------
# Exclude HS smoothed with moving average data with high increse and high decrease (Comai thesis). Units: m/h
data_smooth_no_outliers=fun_rate(DATA = data_smooth,VARIABLE = SNOW_HEIGHT, RATE = Rate_min_max) # <--DATA could be data_ma
# Gap are filled with contant value (the last befor gap)
# data_smooth_no_outliers=na.locf(data_smooth_no_outliers,na.rm=F)
data_smooth_no_outliers=na.fill(object = data_smooth_no_outliers,fill = "extend")
#-------------------------------------------
# ~~~~~~ Section 6 ~~~~~~
#- SAVE DATA -----------------------------
zoo_output=cbind(snow,data_in_range,data_calibr,data_no_outliers,data_smooth,data_smooth_no_outliers)
rdata_output=list(snow,data_in_range,data_calibr,data_no_outliers,data_smooth,data_smooth_no_outliers)
names(rdata_output)=c("HS_original","HS_calibrated","HS_range_QC", "HS_rate_QC", "HS_calibr_smoothed", "HS_calibr_smooothed_rate_QC" )
output=as.data.frame(zoo_output)
output=cbind(index(snow),output)
colnames(output)=c("TIMESTAMP","HS_original","HS_calibrated","HS_range_QC", "HS_rate_QC", "HS_calibr_smoothed", "HS_calibr_smooothed_rate_QC" )
save(rdata_output,file=paste(git_folder,"/data/Output/Snow_Filtering_RData/Snow_",substring(file,1,nchar(file)-4), ".RData",sep=""))
write.csv(output,paste(git_folder,"/data/Output/Snow_Filtering/Snow_",file,sep = ""),quote = F,row.names = F)
}
|
e5643e35efd12b49cc689a7d718e7283a53b6ba6
|
28a3ee1f4c46944bc797be2f511c147bb35d6a98
|
/R/facets-wrapper.R
|
904979d4386595760112dd384876dde1c70103a7
|
[] |
no_license
|
rptashkin/facets2n
|
9d52da47b02ecfeabff3148b9019f2f460efafc0
|
c75d9a97bb60b854b603927c2eb73114ef94590a
|
refs/heads/master
| 2022-05-17T11:50:55.948684
| 2022-05-06T01:51:53
| 2022-05-06T01:51:53
| 200,111,725
| 11
| 3
| null | 2022-05-06T01:51:54
| 2019-08-01T19:59:44
|
R
|
UTF-8
|
R
| false
| false
| 20,993
|
r
|
facets-wrapper.R
|
readSnpMatrix <- function(filename, skip=0L, err.thresh=Inf, del.thresh=Inf,
perl.pileup=FALSE, MandUnormal=FALSE, spanT=0.2,
spanA=0.2, spanX=0.2, gbuild="hg19",
ReferencePileupFile=NULL, ReferenceLoessFile=NULL,
MinOverlap=0.90, useMatchedX=FALSE, refX=FALSE, unmatched=FALSE, donorCounts=FALSE) {
#' Read in the snp-pileup generated SNP read count matrix file
#' @importFrom utils read.csv
#' @param filename counts file from snp-pileup
#' @param skip (character) Skip n number of lines in the input file.
#' @param err.thresh (numeric) Error threshold to be used to filter snp-pileup data frame.
#' @param del.thresh (numeric) Deletion threshold to be used to filter snp-pileup data frame.
#' @param perl.pileup (logical) Is the pileup data generated using perl pileup tool?
#' @param MandUnormal (logical) Is CNLR analysis to be peformed using unmatched reference normals?
#' @param spanT (numeric) Default span value to be used for loess normalization in tumor sample.
#' @param unmatched (logical) is the tumor being analyzed unmatched
#' @param spanA (numeric) Default span value to be used for loess normalization across autosomal chromosomes in the normal sample.
#' @param spanX (numeric) Default span value to be used for loess normalization in Chr X in the normal sample.
#' @param gbuild (character) Genome build (Default: hg19).
#' @param ReferencePileupFile (character) Filepath to an optional snp-pileup generated pileup data of one or more reference normals.
#' @param ReferenceLoessFile (character) Filepath to an optional loess data, generated using the facets2n package, of one or more reference normals. The number of normals in this data should match that in the ReferencePileupFile, and should be in the same order.
#' @param MinOverlap (numeric) Mininum overlap fraction of loci between a tumor pileup and reference pileup data.
#' @param useMatchedX (logical) Is the matched normal to be used for ChrX normalization?
#' @param refX (logical) Use matched or reference normal for chrX normalization. excludes unmatched normals, such as pooled references, present in tumor counts matrix.
#' @param donorCounts (logical) is the counts matrix baseline donor sample(s)
#' @return A dataframe of pileup depth values for Tumor and Matched Normal if MandUnormal is FALSE. Else, a list of data frame with pileup depth values of Tumor, matched Normal, and a best unmatched normal, and the associated span values.
#' @export
# could have been generated by original VES snp-pileup.pl code (perl)
if (perl.pileup) {
rcmat <- scan(pileupfilename, what=list(Chromosome="", Position=0,
NOR.DP=0, NOR.RD=0, TUM.DP=0,
TUM.RD=0), skip=skip)
if (rcmat$Chromosome[1] == "chr1") {
rcmat$Chromosome <- gsub("chr","",rcmat$Chromosome)
}
rcmat <- as.data.frame(rcmat, stringsAsFactors=FALSE)
}
else if (donorCounts) {
tumor.pileup <- PreProcSnpPileup(filename, err.thresh,
del.thresh, is.Reference=T, gbuild)
colnames(tumor.pileup) = gsub("File", "Donor", colnames(tumor.pileup))
return(tumor.pileup)
}else {
# read the read count matrix generated by snp-pileup.cpp code
tumor.pileup <- PreProcSnpPileup(filename, err.thresh,
del.thresh, is.Reference=F, gbuild)
if (MandUnormal) {
tumor.loess <- MakeLoessObject(tumor.pileup, is.Reference=F)
tumor.loess.key <- tumor.loess[,1]
reference.loess <- NULL
reference.pileup <- NULL
rcmat=NULL
if (!is.null(ReferencePileupFile) & !is.null(ReferenceLoessFile)) {
reference.pileup <- PreProcSnpPileup(ReferencePileupFile, err.thresh,
del.thresh, is.Reference = T)
reference.loess <- as.matrix(read.csv(ReferenceLoessFile, sep="\t",
stringsAsFactors=FALSE, header=T))
rcmat <- FindBestNormalParameters(
tumor.loess,
tumor.pileup,
reference.loess,
reference.pileup,
MinOverlap,
useMatchedX,
refX,
unmatched
)
}
else{
tumor.pileup = tumor.pileup[which(tumor.pileup$key %in% tumor.loess.key),]
rcmat <- FindBestNormalParameters(
tumor.loess,
tumor.pileup,
reference.loess,
reference.pileup,
MinOverlap,
useMatchedX,
refX,
unmatched
)
}
return(rcmat)
}
else {
rcmat <- subset(tumor.pileup, select=c(Chromosome, Position, Ref, Alt, File1DP, File1R, File2DP, File2R))
colnames(rcmat) <- c("Chromosome", "Position","Ref","Alt", "NOR.DP", "NOR.RD", "TUM.DP", "TUM.RD")
return(rcmat)
}
}
}
preProcSample <- function(rcmat, ndepth=35, het.thresh=0.25, snp.nbhd=250, cval=25, deltaCN=0, gbuild=c("hg19", "hg38", "hg18", "mm9", "mm10"), hetscale=TRUE, unmatched=FALSE, MandUnormal=FALSE, ndepthmax=5000, spanT=0.2, spanA=0.2, spanX=0.2,donorCounts=NULL) {
#' Pre-process a sample
#' @description Processes a snp read count matrix and generates a segmentation tree
#' @param rcmat data frame with 6 required columns: Chrom, Pos, NOR.DP, NOR.RD, TUM.DP and TUM.RD. Additional variables are ignored. Ref and Alt columns required for transplant cases with option donorCounts.
#' @param ndepth minimum normal sample depth to keep
#' @param het.thresh vaf threshold to call a SNP heterozygous
#' @param snp.nbhd window size
#' @param cval critical value for segmentation
#' @param deltaCN minimum detectable difference in CN from diploid state
#' @param gbuild genome build used for the alignment of the genome. Default value is human genome build hg19. Other possibilities are hg38 & hg18 for human and mm9 & mm10 for mouse. Chromosomes used for analysis are 1-22, X for humans and 1-19 for mouse. Option udef can be used to analyze other genomes.
#' @param hetscale (logical) variable to indicate if logOR should get more weight in the test statistics for segmentation and clustering. Usually only 10 % of snps are hets and hetscale gives the logOR contribution to T-square as 0.25/proportion of hets.
#' @param unmatched indicator of whether the normal sample is unmatched. When this is TRUE hets are called using tumor reads only and logOR calculations are different. Use het.thresh = 0.1 or lower when TRUE.
#' @param ndepthmax loci for which normal coverage exceeds this number (default is 1000) will be discarded as PCR duplicates. Fof high coverage sample increase this and ndepth commensurately.
#' @param MandUnormal analyzing both matched and unmatched normal for log ratio normalization
#' @param spanT span value tumor
#' @param spanA span value autosomes
#' @param spanX span value X
#' @param donorCounts snp read count matrix for donor sample(s). Required columns: Chromosome Position Ref Alt and for each donor sample,i: RefDonoriR RefDonoriA RefDonoriE RefDonoriD RefDonoriDP
#' @details The SNPs in a genome are not evenly spaced. Some regions have multiple SNPs in a small neighborhood.
#' Thus using all loci will induce serial correlation in the data. To avoid it we sample loci such that only
#' a single locus is used in an interval of length snp.nbhd. So in order to get
#' reproducible results use set.seed to fix the random number generator seed.
#' @return \item{pmat}{Read counts and other elements of all the loci}
#' \item{seg.tree}{a list of matrices one for each chromosome. the matrix gives the tree structure of the splits. each row corresponds to a segment with the parent row as the first element the start-1 and end index of each segment and the maximal T^2 statistic. the first row is the whole chromosome and its parent row is by definition 0.}
#' \item{jointseg}{The data that were segmented. Only the loci that were sampled within a snp.nbhd are present. segment results given.}
#' \item{hscl}{scaling factor for logOR data.}
#' @export
gbuild <- match.arg(gbuild)
# integer value for chromosome X depends on the genome
if (gbuild %in% c("hg19", "hg38", "hg18")) nX <- 23
if (gbuild %in% c("mm9", "mm10")) nX <- 20
pmat <- procSnps(rcmat, ndepth, het.thresh, snp.nbhd, gbuild, unmatched, ndepthmax, donorCounts)
dmat <- counts2logROR(pmat[pmat$rCountT>0,], gbuild, unmatched, MandUnormal, 0.2, spanT,spanA, spanX)
tmp <- segsnps(dmat, cval, hetscale, deltaCN)
out <- list(pmat=pmat, gbuild=gbuild, nX=nX)
c(out, tmp)
}
procSample <- function(x, cval=150, min.nhet=15, dipLogR=NULL) {
#' Process a sample
#' @description Processes the output from preProcSample for given cval and min.nhet
#' @param x the output from preProcSample; should contain seg.tree and jointseg
#' @param cval critical value for segmentation
#' @param min.nhet minimum number of heterozygote snps in a segment used for bivariate t-statistic during clustering of segments
#' @param dipLogR diploid level obtained from a fit, typically using a higher cval, can be used with lower cval to recover focal changes
#' @details The minor copy number lcn may not be estimated with confidence when a segment has fewer than min.nhet heterozygous SNPs and hence will return NA. If there are too few heterozygous SNPs in a segment then mafR and mafR.clust can be NA.
#' @return \item{jointseg}{The data that were segmented. Only the loci that were sampled within a snp.nbhd are present}
#' \item{out}{data frame of segment summaries pre and post clustering of segments. The columns are: chrom the chromosome to which the segment belongs; seg the segment number; num.mark the number of SNPs in the segment; nhet the number of SNPs that are deemed heterozygous; cnlr.median the median log-ratio of the segment; mafR the log-odds-ratio summary for the segment; segclust the segment cluster to which segment belongs; cnlr.median.clust the median log-ratio of the segment cluster; mafR.clust the log-odds-ratio summary for the segment cluster; cf the cellular fraction of the segment; tcn the total copy number of the segment; lcn the minor copy number of the segment.}
#' \item{dipLogR}{specified or obtained from data}
#' \item{...}{other output when findDiploidLogR is used}
#' @export
# ensure availability of seg.tree
if (is.null(x$seg.tree)) stop("seg.tree is not available")
# get the numeric value of chromosome X
nX <- x$nX
# make sure that original cval is smaller than current one
cval.fit <- attr(x$seg.tree, "cval")
if (cval.fit > cval) stop("original fit used cval = ", cval.fit)
# jointseg etc
jseg <- x$jointseg
jseg <- jseg[is.finite(jseg$cnlr),]
# chromosomes with data and their counts
chrs <- x$chromlevels
nchr <- length(chrs)
# get chromlevels from chrs
if (x$gbuild %in% c("hg19", "hg38", "hg18")) chromlevels <- c(1:22,"X")[chrs]
if (x$gbuild %in% c("mm9", "mm10")) chromlevels <- c(1:19,"X")[chrs]
# get the segment summary for the fit in seg.tree
nsegs <- 0
# jointseg already has a seg variable numbered 1 thru number of segments for each chromosome
for (i in 1:nchr) {
jseg$seg[jseg$chrom==chrs[i]] <- nsegs + jseg$seg[jseg$chrom==chrs[i]]
nsegs <- max(jseg$seg[jseg$chrom==chrs[i]])
}
focalout <- jointsegsummary(jseg)
# cnlr.median to the left and right
cnlr.med.l <- c(0, focalout$cnlr.median[-nsegs])
cnlr.med.r <- c(focalout$cnlr.median[-1], 0)
# mad of cnlr noise
cnlr.mad <- mad(jseg$cnlr - rep(focalout$cnlr.median, focalout$num.mark))
# segments that show focal changes have big jump in cnlr.median
focalout$focal <- 1*(focalout$cnlr.median > pmax(cnlr.med.l, cnlr.med.r)+3*cnlr.mad) + 1*(focalout$cnlr.median < pmin(cnlr.med.l, cnlr.med.r)-3*cnlr.mad)
# get the segments for the specified cval
nsegs <- 0
for (i in 1:nchr) {
seg.widths <- diff(prune.cpt.tree(x$seg.tree[[i]], cval))
jseg$seg[jseg$chrom==chrs[i]] <- nsegs + rep(1:length(seg.widths), seg.widths)
nsegs <- nsegs + length(seg.widths)
}
# adding the focal change segments - need a jump at the beginning and end
jseg$seg0 <- jseg$seg # detected segments
# jump at the beginning (twice the height)
jseg$seg <- jseg$seg + rep(cumsum(2*focalout$focal), focalout$num.mark)
# drop back for the focal segment to get the steps right
jseg$seg <- jseg$seg - rep(focalout$focal, focalout$num.mark)
# focal segment could already be in; so change seg indicator
jseg$seg <- cumsum(c(1, 1*(diff(jseg$seg) > 0)))
# segment summaries
out <- jointsegsummary(jseg)
# cluster the segments
out <- clustersegs(out, jseg, min.nhet)
# put in the clustered values for snps
jseg$segclust[is.finite(jseg$cnlr)] <- rep(out$segclust, out$num.mark)
# find dipLogR and fit cncf
if (is.null(dipLogR)) {
oo <- findDiploidLogR(out, jseg$cnlr)
} else {
oo <- list()
oo$out0 <- "empty"
oo$dipLogR <- dipLogR
}
out <- fitcncf(out, oo$dipLogR, nX)
c(list(jointseg=jseg, out=out, nX=nX, chromlevels=chromlevels), oo[-1])
}
plotSample <- function(x, emfit=NULL, clustered=FALSE, plot.type=c("em","naive","both","none"), sname=NULL) {
#' Plot the data and results for a single sample
#' @description Plots copy number log-ratio, variant allele log-odds ratio as well as the copy number and cellular fraction fits.
#' @importFrom grDevices colorRampPalette
#' @param x (character) output from procSample
#' @param emfit (character) output of emcncf
#' @param clustered (logical) indicator of whether segment or cluster summary plotted
#' @param plot.type (character) the type of plot. The default is em in which the logR and logOR data as well as the copy number and cellular fraction fits from EM are graphed. For naive the naive copy number and cellular fraction fits are used instead of EM. For none only the data are shown and for both both fits are shown
#' @param sname (character) sample name give as a character string
#' @export
def.par <- par(no.readonly = TRUE) # save default, for resetting...
# plot.type
plot.type <- match.arg(plot.type)
# layout of multi panel figure
if (plot.type=="none") layout(matrix(1:2, ncol=1))
if (plot.type=="em") layout(matrix(rep(1:4, c(9,9,6,1)), ncol=1))
if (plot.type=="naive") layout(matrix(rep(1:4, c(9,9,6,1)), ncol=1))
if (plot.type=="both") layout(matrix(rep(1:6, c(9,9,6,1,6,1)), ncol=1))
par(mar=c(0.25,3,0.25,1), mgp=c(1.75, 0.6, 0), oma=c(3,0,1.25,0))
# raw data used for joint segmentation
jseg <- x$jointseg
# chromosome boundaries
chrbdry <- which(diff(jseg$chrom) != 0)
out=NULL
if (missing(emfit)) {
out <- x$out
out$lcn[out$tcn == 1] = 0 #fix bad NAs
if (plot.type=="em" | plot.type=="both") {
warning("emfit is missing; plot.type set to naive")
plot.type <- "naive"
}
} else {
out <- emfit$cncf
# add the naive tcn, lcn and cf to out
out$tcn <- x$out$tcn
out$lcn <- x$out$lcn
out$lcn[out$tcn == 1] = 0 #fix bad NAs
out$lcn.em[out$tcn.em == 1] = 0
out$cf <- x$out$cf
}
# determine which of the cnlr.median & mafR to show
if (clustered) {
cnlr.median <- out$cnlr.median.clust
mafR <- out$mafR.clust
mafR[is.na(mafR)] <- out$mafR[is.na(mafR)]
} else {
cnlr.median <- out$cnlr.median
mafR <- out$mafR
}
mafR <- abs(mafR)
# chromosome colors
chrcol <- 1+rep(out$chr-2*floor(out$chr/2), out$num.mark)
nn <- cumsum(table(jseg$chrom[is.finite(jseg$cnlr)]))
segbdry <- cumsum(c(0,out$num.mark))
segstart <- segbdry[-length(segbdry)]
segend <- segbdry[-1]
# plot the logR data and segment medians
ymin = floor(min(out$cnlr.median, na.rm = T))
ymax = ceiling(max(out$cnlr.median, na.rm = T))
if (ymin > -2) ymin = -2
if (ymax < 2) ymax = 2
plot(jseg$cnlr[is.finite(jseg$cnlr)], pch=1, cex=.5, col = c("grey","lightblue","azure4","slateblue")[chrcol], ylab="log-ratio", xaxt="n", ylim=c(ymin,ymax))
abline(v=chrbdry, lwd=0.25)
abline(h=median(jseg$cnlr, na.rm=TRUE), col="green2")
abline(h = x$dipLogR, col = "magenta4")
segments(segstart, cnlr.median, segend, cnlr.median, lwd=1.75, col='red')
# plot the logOR data and mafR
plot(jseg$valor[is.finite(jseg$cnlr)], pch=1, cex=.5, col = c("grey","lightblue","azure4","slateblue")[chrcol], ylab="log-odds-ratio", ylim=c(-4,4), xaxt="n")
abline(v=chrbdry, lwd=0.25)
segments(segstart, sqrt(mafR), segend, sqrt(mafR), lwd=1.75, col='red')
segments(segstart, -sqrt(mafR), segend, -sqrt(mafR), lwd=1.75, col='red')
cfpalette <- c(colorRampPalette(c("white", "steelblue"))(10),"bisque2")
# EM copy number and cellular faction pieces
if (plot.type=="em" | plot.type=="both") {
# plot the estimated copy numbers and cf
out$tcn.em[out$tcn.em > 10] <- 9 + log10(out$tcn.em[out$tcn.em > 10])
ii <- which(out$lcn.em > 5)
if (length(ii)>0) out$lcn.em[ii] <- 5 + log10(out$lcn.em[ii])
plot(c(0,length(jseg$cnlr)), c(0,max(out$tcn.em)), type="n", ylab="copy number (em)", xaxt="n")
abline(v=chrbdry, lwd=0.25)
segments(segstart, out$tcn.em, segend, out$tcn.em, lwd=2, col='black')
segments(segstart, out$lcn.em, segend, out$lcn.em, lwd=1.5, col='red')
# add the cf
plot(c(0,length(jseg$cnlr)), 0:1, type="n", ylab="", xaxt="n", yaxt="n")
mtext("cf-em", side=2, at=0.5, line=0.2, las=2, cex=0.75)
cfcol <- cfpalette[round(10*out$cf.em+0.501)]
rect(segstart, 0, segend, 1, col=cfcol, border=NA)
}
# naive copy number and cellular faction pieces
if (plot.type=="naive" | plot.type=="both") {
# plot the estimated copy numbers and cf
out$tcn[out$tcn > 10] <- 9 + log10(out$tcn[out$tcn > 10])
ii <- which(out$lcn > 5)
if (length(ii)>0) out$lcn[ii] <- 5 + log10(out$lcn[ii])
plot(c(0,length(jseg$cnlr)), c(0,max(out$tcn)), type="n", ylab="copy number (nv)", xaxt="n")
abline(v=chrbdry, lwd=0.25)
segments(segstart, out$tcn, segend, out$tcn, lwd=2, col='black')
segments(segstart, out$lcn, segend, out$lcn, lwd=1.5, col='red')
# add the cf
plot(c(0,length(jseg$cnlr)), 0:1, type="n", ylab="", xaxt="n", yaxt="n")
mtext("cf-nv", side=2, at=0.5, line=0.3, las=2, cex=0.75)
cfcol <- cfpalette[round(10*out$cf+0.501)]
rect(segstart, 0, segend, 1, col=cfcol, border=NA)
}
# now add the chromosome ticks on x-axis
chromlevels <- x$chromlevels
# just make sure chromlevels actually exists
if (is.null(chromlevels)) chromlevels <- 1:length(nn)
axis(labels=chromlevels, side=1, at=(nn+c(0,nn[-length(nn)]))/2, cex=0.65)
mtext(side=1, line=1.75, "Chromosome", cex=0.8)
if (!missing(sname)) mtext(sname, side=3, line=0, outer=TRUE, cex=0.75)
par(def.par) #- reset to default
}
logRlogORspider <- function(cncf, dipLogR=0, nfrac=0.005) {
#' logRlogRsplide plot generation from cncf input
#' @description Plots copy number log-ratio, variant allele log-odds ratio as well as the copy number and cellular fraction fits.
#' @param cncf Copy number and cellular fraction data frame either the naive one (out) from procSample or the EM fit (cncf) from emcncf.
#' @param dipLogR the log-ratio value corresponding to the diploid state.
#' @param nfrac a segment is shown if the proportion of loci and het SNPs (num.mark and nhet) nfrac. Default is 0.01.
#' @details This is a diagnostic plot to check how well the copy number fits work. The estimated segment summaries are plotted as circles where the size of the circle increases with the number of loci in the segment. The expected value for various integer copy number states are drawn as curves for purity ranging from 0 to 0.95. For a good fit, the segment summaries should be close to one of the lines.
#' @export
rho <- seq(0, 0.95, by=0.01)
nrho <- length(rho)
logACR <- logCNR <- matrix(0, nrho, 19)
# initialize index
l <- 1
# one copy loss
logCNR[,l] <- log2(2*(1-rho) + 1*rho) -1
logACR[,l] <- log(1/(1-rho))
# integer copy numbers (clonal)
for(i in 2:7) {
for(j in 0:floor(i/2)) {
l <- l+1
logCNR[,l] <- log2(2*(1-rho) + i*rho) -1 # base-2
logACR[,l] <- log(1-rho+(i-j)*rho) - log(1-rho+j*rho)
}
}
plot(c(-0.95, 1.8), c(0, 5), type="n", xlab="Expected(logR - dipLogR)", ylab=" Expected(|logOR|)")
l <- 1; i <-1; j <-0
linecols <- c("black","cyan3","green3","blue")
lines(logCNR[,l], logACR[,l], lty=1, col=j+1, lwd=1.25)
text(logCNR[nrho,l]+0.03, logACR[nrho,l], paste(i,j,sep="-"), cex=0.65)
for(i in 2:7) {
for(j in 0:floor(i/2)) {
l <- l+1
lines(logCNR[,l], logACR[,l], lty=i-1, col=linecols[j+1], lwd=1.25)
text(logCNR[nrho,l]+0.03, logACR[nrho,l], paste(i,j,sep="-"), cex=0.65)
}
}
nsnps <- sum(cncf$num.mark)
nhets <- sum(cncf$nhet)
ii <- cncf$num.mark > nfrac*nsnps & cncf$nhet > nfrac*nhets
cex <- 0.3 + 2.7*(cncf$num.mark[ii]/sum(0.1*cncf$num.mark[ii]))
points(cncf$cnlr.median[ii] - dipLogR, sqrt(abs(cncf$mafR[ii])), cex=cex, col="magenta4", lwd=1.5)
}
|
bce90ab3643917309ecc5aadb517431c1800907a
|
6087436469aafbd7b1007cf3c02e64e928d4e13b
|
/03_good_programming_R_functions/Problem 6.R
|
ac7d858157fbbe9e25e048ade795cec5bb7ea388
|
[] |
no_license
|
Peng0618/STAT_5014
|
48ae2e18970d50a5e683b71fc97ad94fe89744cb
|
fd1b3aa44717ff896208984612a5a826864add5b
|
refs/heads/master
| 2021-07-11T14:51:50.645887
| 2017-10-11T22:27:48
| 2017-10-11T22:27:48
| 103,090,715
| 0
| 0
| null | 2017-09-11T04:44:04
| 2017-09-11T04:44:04
| null |
UTF-8
|
R
| false
| false
| 1,018
|
r
|
Problem 6.R
|
datasum <- function(dev1,dev2) {
m1 <- mean(dev1)
m2 <- mean(dev2)
sd1 <- sd(dev1)
sd2 <- sd(dev2)
cov12 <- cov(dev1,dev2)/sd1/sd2
result <- data.frame(Mean1 = m1, Std1 = sd1, Mean2 = m2, Std2 = sd2, Corr = cov12)
return(result)
}
RawData <- readRDS('HW3_data.rds')
Observer <- NULL
Mean1 <- NULL
STD1 <- NULL
Mean2 <- NULL
STD2 <- NULL
Correlation <- NULL
for(i in 1:13){
Obs <- subset(RawData,RawData$Observer == i)
statresult <- datasum(Obs$dev1,Obs$dev2)
Observer <- rbind(Observer, i)
Mean1 <- rbind(Mean1, statresult$Mean1)
STD1 <- rbind(STD1,statresult$Std1)
Mean2 <- rbind(Mean2,statresult$Mean2)
STD2 <- rbind(STD2,statresult$Std2)
Correlation <- rbind(Correlation,statresult$Corr)
}
FinalResult <- data.frame(Observer = Observer, Mean1 = Mean1, Std1 = STD1, Mean2 = Mean2,
Std2 = STD2, Cov = Correlation)
SpreadMean <- FinalResult$Mean2 - FinalResult$Mean1
SpreadSTD <- FinalResult$Std2 - FinalResult$Std1
|
c360c7e130be98bbb076c665d71cff4c97cfe9e6
|
f07605bde795cf492828a04fc8e027609ea1054c
|
/closestGeneEnrichment.R
|
76c5d47b4f9799c8227d5c3f1b5a931d7e8e2e9a
|
[] |
no_license
|
Dan-Ju/gwas
|
1f6372264dd1a2b9565739f7bfbe0bff0d876953
|
dfd3127927224c828a7a535164442c0c5bbf543b
|
refs/heads/master
| 2022-04-26T21:35:53.132867
| 2018-03-25T21:49:23
| 2018-03-25T21:49:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,789
|
r
|
closestGeneEnrichment.R
|
## Gene Enrichment analysis for closest gene to snp
# NOTE: Total number of genes is hardcoded in according to RefSeq gene list
args = commandArgs(trailingOnly = TRUE)
# arg1 path to bed tools intersect output bed file
# arg2 path to bed tools intersect output bed file
library(methods)
library(biomaRt)
df1 <- read.table(args[1], as.is = TRUE)
df2 <- read.table(args[2], as.is = TRUE)
hsapiens.grch37 <- useEnsembl(biomart = "ensembl", dataset="hsapiens_gene_ensembl",
GRCh=37)
genes.df1 <- getBM(attributes = c("chromosome_name", "start_position", "refseq_mrna",
"end_position", "hgnc_symbol", "hgnc_id"),
values = df1$V7,
filters = "refseq_mrna",
mart = hsapiens.grch37)
genes.df1 <- genes.df1[!(duplicated(genes.df1$hgnc_symbol)), ]
genes.df2 <- getBM(attributes = c("chromosome_name", "start_position", "refseq_mrna",
"end_position", "hgnc_symbol", "hgnc_id"),
values = df2$V7,
filters = "refseq_mrna",
mart = hsapiens.grch37)
genes.df2 <- genes.df2[!(duplicated(genes.df2$hgnc_symbol)), ]
intersect.df <- merge(genes.df1, genes.df2, by=c("hgnc_id","chromosome_name",
"start_position","end_position",
"refseq_mrna","hgnc_symbol"))
q_intersect <- nrow(intersect.df)
m_hits <- nrow(genes.df1)
n_nothits <- 19132 - m_hits
k_draws <- nrow(genes.df2)
p <- 1 - phyper(q_intersect,m_hits,n_nothits,k_draws)
print(c("Hypergeometric test:",p))
print(c("Intersecting genes:", q_intersect))
print(c("Arg1 data gene hits:", m_hits))
print(c("Arg2 data gene hits:", k_draws))
|
d018beb1da9eb05160074b770859dfc24ffaa651
|
0cc863fed706b96df0c44afe7d466cff23228049
|
/man/pdf.Rd
|
a86e343fc14c71ab3e9feaa766274bdb8f4f9a84
|
[
"MIT"
] |
permissive
|
alexpghayes/distributions3
|
80a96665b4dabe2300908d569cb74de3cc75b151
|
67d27df128c86d80fe0c903b5b2c8af1fb9b0643
|
refs/heads/main
| 2023-01-27T14:49:47.588553
| 2023-01-18T18:12:22
| 2023-01-18T18:12:22
| 185,505,802
| 52
| 11
|
NOASSERTION
| 2023-01-18T18:12:24
| 2019-05-08T01:38:24
|
R
|
UTF-8
|
R
| false
| true
| 1,595
|
rd
|
pdf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{pdf}
\alias{pdf}
\alias{log_pdf}
\alias{pmf}
\title{Evaluate the probability density of a probability distribution}
\usage{
pdf(d, x, drop = TRUE, ...)
log_pdf(d, x, ...)
pmf(d, x, ...)
}
\arguments{
\item{d}{An object. The package provides methods for distribution
objects such as those from \code{\link[=Normal]{Normal()}} or \code{\link[=Binomial]{Binomial()}} etc.}
\item{x}{A vector of elements whose probabilities you would like to
determine given the distribution \code{d}.}
\item{drop}{logical. Should the result be simplified to a vector if possible?}
\item{...}{Arguments passed to methods. Unevaluated arguments will generate a warning to
catch mispellings or other possible errors.}
}
\value{
Probabilities corresponding to the vector \code{x}.
}
\description{
Generic function for computing probability density function (PDF)
contributions based on a distribution object and observed data.
}
\details{
The generic function \code{pdf()} computes the probability density,
both for continuous and discrete distributions. \code{pmf()} (for the
probability mass function) is an alias that just calls \code{pdf()} internally.
For computing log-density contributions (e.g., to a log-likelihood)
either \code{pdf(..., log = TRUE)} can be used or the generic function
\code{log_pdf()}.
}
\examples{
## distribution object
X <- Normal()
## probability density
pdf(X, c(1, 2, 3, 4, 5))
pmf(X, c(1, 2, 3, 4, 5))
## log-density
pdf(X, c(1, 2, 3, 4, 5), log = TRUE)
log_pdf(X, c(1, 2, 3, 4, 5))
}
|
6abbe51c18cab4f2428ce5478678d9ebf64edec8
|
640fe2928a7f221855704c7bef19644004bb2235
|
/Week 4 Assignment/plot4.R
|
4168d7ad096d7b499f1ee9c4dc069937a1b7ace7
|
[] |
no_license
|
shivangipokhriyal/Exploratory-Data-Analysis
|
2629e4417fa982ab3f632307311bd339f1ef349b
|
bd7584839d1d8e55d428bded41391e523793e896
|
refs/heads/master
| 2022-11-26T21:10:43.146576
| 2020-07-31T19:31:11
| 2020-07-31T19:31:11
| 281,900,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 695
|
r
|
plot4.R
|
setwd("C:/Users/comp/Documents/CourseraR/Exploratory Data Analysis/US air pollution case study")
NEI<-readRDS("summarySCC_PM25.rds")
SCC<-readRDS("Source_Classification_Code.rds")
SCC_coal <- SCC[grepl("Coal",SCC$Short.Name),1]
NEI_CC <- NEI[NEI$SCC %in% SCC_coal ,]
library(dplyr)
ems <- NEI_CC %>% select(Emissions,year) %>%
group_by(year) %>% summarise(Total_emissions=sum(Emissions))
png(file="plot4.png")
par(bg="grey")
with(ems,plot(year, Total_emissions, col="purple",type="o",pch=16,cex=2,
lwd=2,xlab="Years",ylab="Total PM2.5 emissions (in tons)",
main="Total PM2.5 emissions in the US from Coal combustion sources"))
dev.off()
|
0272d515e212fae24204f6773e77a6dca54d400f
|
45a8db12eb70b9deaf130e3aa1822fd0f152ceec
|
/sources/ICCATPostTraitment.R
|
1df0d8c5ebe79f6e60c2bfb21390f117b3d4b468
|
[] |
no_license
|
MarieEtienne/ICCAT-BFT
|
b23f8ae7dda7c33635e7765b348a26d7c35a8ef3
|
80122e36a45113e4c8f0a27897c3515062b4b8ca
|
refs/heads/master
| 2021-01-10T20:33:00.675198
| 2014-02-04T13:23:43
| 2014-02-04T13:23:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,130
|
r
|
ICCATPostTraitment.R
|
####################################################################################
## ##
## Post treatment on MLE results obtained using iscam on bfte data ##
## Authors: Marie-Pierre Etienne marie.etienne@agroparistech.fr ##
## Date: Aug. 10, 2013 ##
## Date: Jan, 11 2014 ##
## ##
## ##
####################################################################################
main.dir= file.path(Sys.getenv("HOME"), "ICCAT/ICCAT-BFT")
## rep where to find mcmc outputs
rep<-unlist(lapply(c(file.path('bfte/2012/vpa','inflated'),file.path('bfte/2012/vpa','reported')),
function(d) {file.path(d, c("high-R0", "high-Rinit"))}))
repNames <- lapply(strsplit(rep, "/"), function(d) {paste(d[4], d[5], sep="")})
palette(c("black", "red", "green3", "blue4", "maroon4", "magenta", "orangered",
"gray"))
outdir <- file.path(main.dir, "Report/figure")
load(file.path(main.dir, 'Report','RData','Info.RData'))
setwd(main.dir)
iSCAMR <- dir("../iSCAM/src/r-code/R/")
for(f in iSCAMR)
source(paste("../iSCAM/src/r-code/R/", f, sep=""), echo=T)
attach(Info)
RDataFiles<- readLines(file.path(main.dir,'Report', 'RDataSave', 'datafile.out'))
nFiles=length(rep)
res <- lapply(rep, function(d){read.admb(ifile=file.path(d,'ICCAT'))})
## reading bug in abundance index
##last index is split on 2 lines
survey=list()
gear.names <- Info$surveySpecification[,8]
pdf(file="ICCAT-Abundance.pdf", width=11, width=12, height=8 )
for( i in 1:nit)
{
survey[[i]] <- iSCAMsurvey[iSCAMsurvey$gear==gear.list[i],]
survey[[i]][,2] <- survey[[i]][,2]/ max(survey[[i]][,2])
if(i == 1 ){
plot(survey[[i]][,2]~survey[[i]][,1],type="l",xlim=c(1945,2011), ylim=c(0,1.6), lty=gear.list[i], col=gear.list[i], xlab="Year", ylab="Normalised abundance index")
} else {
lines(survey[[i]][,2]~survey[[i]][,1], lty=gear.list[i], col=gear.list[i])
}
}
legend("topleft", legend=paste( Info$surveyName), cex=0.9,col=gear.list, lty=gear.list, )
dev.off()
CAAReformat <- data.frame(NA, ncol=3, nrow=(nage-sage+1)*(nyr-syr+1))
for( a_ind in sage:nage){
for(yr_ind in syr:nyr ){
CAAReformat[(yr_ind-syr)*(nage-sage+1)+a_ind-sage+1, ] <- c(yr_ind, a_ind, CAA[yr_ind-syr+1, a_ind-sage+1])
}
}
pdf(file="ICCAT-Catch.pdf", width=10, height=8)
names(CAAReformat)=c("Year", "Age", "Catch")
radius <- sqrt( CAAReformat$Catch/ pi )
with(CAAReformat,
symbols(Year, Age, circles=radius, inches=0.35, fg="white", bg="red", ylab="Age", xlab="Year")
)
dev.off()
gear.list <- unique(compositionCatch[,2])
selectivity=list()
age <- sage:nage
for( i in 1:na_gear)
{
ind <- which(compositionCatch[,2]==gear.list[i])
selectivity[[i]] <- compositionCatch[ind,3:(nage-sage+3)]
selectivity[[i]] <- selectivity[[i]] / apply(selectivity[[i]], 1, sum)
selectivity[[i]]<- apply(selectivity[[i]], 2, mean)
if(i == 1 ){
plot(selectivity[[i]]~age,type="l",xlim=c(sage, nage), ylim=c(0,1), lty=gear.list[i], col=gear.list[i], xlab="Year", ylab="Average normalised catch at age")
} else {
lines(selectivity[[i]]~age, lty=gear.list[i], col=gear.list[i])
}
}
legend("topleft", legend=Info$surveyName, col=gear.list, lty=gear.list)
ng <- ngear[[1]]
name.list=c("Comm", Info$surveyName)
age <- sage:nage
par(mfcol=c(2,2))
for( i in 1:ng)
{
ind <- which(compositionCatch[,2]==gear.list[i])
if(length(ind)>3)
{
seltmp<- compositionCatch[ind,]
seltmp[,3:(3+nage-sage)] <- seltmp[,3:(3+nage-sage)] / apply(seltmp[,3:(3+nage-sage)], 1, sum)
CAAtmp <- data.frame(NA, ncol=3, nrow=(nage-sage+1)*(nyr-syr+1))
for( a_ind in sage:nage){
for(yr_ind in syr:nyr ){
j= which(seltmp[,1]==yr_ind)
if(length(j)>0)
CAAtmp[(yr_ind-syr)*(nage-sage+1)+a_ind-sage+1, ] <- c(yr_ind, a_ind, seltmp[j,a_ind-sage+3])
}
}
names(CAAtmp)=c("Year", "Age", "Catch")
CAAtmp <- CAAtmp[!is.na(CAAtmp[,1]),]
radius <- sqrt( CAAtmp$Catch/ pi )
radius <- (radius)/max(radius, na.rm=T)
pdf(file.path(outdir, paste(name.list[gear.list[i]],'.pdf', sep='')), width=10, height=8)
with(CAAtmp,
symbols(Year, Age, circles=radius, inches=0.2, fg="white", bg=i, ylab="Age", xlab="Year", main =paste("Gear", name.list[gear.list[i]]))
)
dev.off()
}
}
selectivity <- lapply(res, function(d) {d$log_sel})
gear.list=lapply(selectivity, function(d) {unique(d[,1])})
selectivity <- lapply(selectivity, function(d) { d[ c(1,which(diff(d[,1])!=0)+1), ]})
selectivity <- lapply(selectivity, function(d) {
r <- cbind(d[,1],exp(d[,2:ncol(d)])/10)
return(r)
})
ngear <- lapply(res, function(d) {d$ngear})
l<-1
lapply(selectivity, function(d)
{
ng<- nrow(d)
#pdf(file=file.path(outdir, paste("ICCAT-Selectivity-", repNames[i]",.pdf", sep="")), width=10, paper="a4r")
for( i in 1:ng)
{
if(i==1){
plot(sage:nage, d[d[,1]==gear.list[[l]][i], 2:(nage-sage+2)], "l", col=gear.list[[l]][i],
lty=gear.list[[l]][i], ylim=c(0,1), yla="Selectivity", xlab="Age")
ind = which(compositionCatch[,2]==gear.list[[l]][i])
if(length(ind)>0)
points(sage:nage, apply(compositionCatch[ind,],2, mean)[3:(nage-sage+3)],
col=gear.list[[l]][i], cex=0.7, pch=19 )
}
else{
lines(sage:nage, d[d[,1]==gear.list[[l]][i], 2:(nage-sage+2)], "l",
col=gear.list[[l]][i], lty=gear.list[[l]][i])
ind = which(compositionCatch[,2]==gear.list[[l]][i])
if(length(ind)>0)
points(sage:nage, apply(compositionCatch[ind,],2, mean)[3:(nage-sage+3)],
col=gear.list[[l]][i], cex=0.7, pch=19 )
}
}
legend("topleft", legend=paste(Info$surveyName), lty=gear.list[[l]], col=gear.list[[l]])
l<<-l+1
})
dev.off()
library(grid)
library(ggplot2)
i <- 1
lapply(res, function(d){
nyr <- length(d$yr)
df <- data.frame(Fstatus=d$Fstatus[1,], Bstatus = d$Bstatus[1:nyr], Year=d$yr)
p<- ggplot() + xlab("SpawningBiomass / Bmsy") + ylab ("F/Fmsy") + ylim(c(0,2.025)) +
xlim(c(0,5.5)) + geom_path(data=df, aes(y=Fstatus, x=Bstatus, col=Year), arrow=arrow(type="open", length = unit(0.1, "inches") )) +
geom_vline(aes(xintercept=1)) + geom_hline(aes(yintercept=1))
print(p)
ggsave(filename=file.path(outdir,paste("ICCAT-KobePlot", repNames[i],".pdf", sep="")), width=14, units="cm", height=10)
i <<- i+1
}
)
pdf(file=file.path(outdir,"ICCAT-SelectivityByGear.pdf"), width=10, heigh=14)
par( oma = c( 0, 0, 3, 0 ), mfcol=c(1,1))
split.screen(figs=c(3,2))
ind.scr =1
for(i in 1:ng){
screen(ind.scr)
ind = which(compositionCatch[,2]==gear.list[i])
if(length(ind)>0){
ind.scr <- ind.scr +1
plot(res$age, selectivity[selectivity[,1]==gear.list[i], 2:(nage-sage+2)], "l", col=gear.list[i], lty=gear.list[i], ylim=c(0,1), yla="Selectivity", xlab="Age", main=name.list[gear.list[i]])
for(j in ind)
points(res$age, compositionCatch[j,3:(nage-sage+3)], col=gear.list[i], cex=0.7, pch=19 )
}
}
mtext("Selectivity at age", outer=TRUE)
close.screen(all=TRUE)
dev.off()
par( oma = c( 2, 2, 0, 0 ), mfcol=c(1,1), mar=c(2, 2, 1, 1))
split.screen(figs=c(3,2))
ind.scr =1
for(i in 1:ng){
screen(ind.scr)
ind = which(compositionCatch[,2]==gear.list[i] & compositionCatch[,1]<=1980)
if(length(ind)>0){
if(ind.scr>=5){ x.axt="s"} else{x.axt="n"}
plot(res$age, selectivity[selectivity[,1]==gear.list[i], 2:(nage-sage+2)], "l", col=gear.list[i], lty=gear.list[i], ylim=c(0,1),
ylab="", xlab="", xaxt=x.axt, yaxt="n")
if(ind.scr>=5){ print(ind.scr); mtext("Age", side=1, line=2, adj=0.5)}
# if(!(ind.scr%%2)){ print(ind.scr); mtext("Selectivity", side=2, line=2; adj=1)}
ind.scr <- ind.scr +1
for(j in ind)
points(res$age, compositionCatch[j,3:(nage-sage+3)], col=gear.list[i], cex=0.7, pch=19 )
}
}
close.screen(all.screens=T)
par( oma = c( 2, 2, 0, 0 ), mfcol=c(1,1), mar=c(2, 2, 1, 1))
split.screen(figs=c(3,2))
ind.scr =1
for(i in 1:ng){
screen(ind.scr)
ind = which(compositionCatch[,2]==gear.list[i] & compositionCatch[,1]>=1980)
if(length(ind)>0){
if(ind.scr>=5){ x.axt="s"} else{x.axt="n"}
plot(res$age, selectivity[selectivity[,1]==gear.list[i], 2:(nage-sage+2)], "l", col=gear.list[i], lty=gear.list[i], ylim=c(0,1),
ylab="", xlab="", xaxt=x.axt, yaxt="n")
if(ind.scr>=5){ print(ind.scr); mtext("Age", side=1, line=2, adj=0.5)}
# if(!(ind.scr%%2)){ print(ind.scr); mtext("Selectivity", side=2, line=2; adj=1)}
ind.scr <- ind.scr +1
for(j in ind)
points(res$age, compositionCatch[j,3:(nage-sage+3)], col=gear.list[i], cex=0.7, pch=19 )
}
}
close.screen(all.screens=T)
### collect results for all scenarios
resTable <- matrix(NA, ncol=nFiles, nrow=7)
resTable[1,] <-unlist(lapply(res, function(d) {(log(d$ro))}))
resTable[2,] <-unlist(lapply(res, function(d) {((d$steepness))}))
resTable[3,] <-unlist(lapply(res, function(d) {(d$fmsy)}))
resTable[4,] <-unlist(lapply(res, function(d) {log(d$msy)}))
resTable[5,] <-unlist(lapply(res, function(d) {log(d$bmsy)}))
resTable[6,] <-unlist(lapply(res, function(d) {(log(d$Bstatus[length(d$yr)]))}))
resTable[7,] <-unlist(lapply(res, function(d) {((d$Fstatus[1,length(d$yr)]))}))
resTable <-cbind(Name=c("logRO", "h", "fmsy", "msy", "bmsy", "Bstatus", "Fstatus"),as.data.frame(resTable))
colnames(resTable)[2:(nFiles+1)]=repNames
xtable(resTable, digits=4)
i <- 1
p<- ggplot() + xlab("Years") + ylab ("recruits") + ylim(c(0,6.1e6))
lapply(res, function(d){
nyr <- length(d$yrs)
df <- data.frame(rt=d$rt[1:(nyr-4)], yr=d$yrs[1:(nyr-4)])
p <<- p +geom_line(data=df, aes(y=rt, x=yr), col=i)
i <<- i+1
}
)
print(p+ scale_color_manual(labels=unlist(namesRep), values=1:nFiles))
ggsave(filename=file.path(outdir,paste("Recruits.pdf", sep="")), width=14, units="cm", height=10)
i <- 1
p<- ggplot() + xlab("Years") + ylab ("Spawning biomass") + ylim(c(0,6e8))
lapply(res, function(d){
nyr <- length(d$yrs)
df <- data.frame(sbt=d$sbt[1:(nyr)], yr=d$yrs[1:(nyr)])
p <<- p +geom_line(data=df, aes(y=sbt, x=yr), col=i)
i <<- i+1
}
)
print(p+ scale_color_manual(labels=unlist(namesRep), values=1:nFiles))
ggsave(filename=file.path(outdir,paste("Spawning.pdf", sep="")), width=14, units="cm", height=10)
detach(Info)
j=1
g.names <- gear.names
lapply(res,
function(d){
yr <- d$yr
p <- ggplot()+ xlim(range(yr))
i=1
prov <- data.frame(pit = d$pit[i,], it=d$it[i,], iyr=d$iyr[i,], gear=rep(g.names[1], length(d$it[i,]) ) )
norm<- max(prov$it, na.rm=T)
prov$pit <- prov$pit /norm
prov$it <- prov$it /norm
df <- prov
## bug in reading abundance, last line split on 2 lines
for(i in 2:(ng-1))
{
prov <- data.frame(pit = d$pit[i,], it=d$it[i,], iyr=d$iyr[i,], gear=rep(g.names[i], length(d$it[i,]) ) )
if(i==(ng-1) )
{
prov2 <- data.frame(pit = d$pit[i+1,],
it=d$it[i+1,], iyr=d$iyr[i+1,],
gear=rep(g.names[i], length(d$it[i,]) ) )
prov <- rbind(prov, prov2)
}
norm<- max(prov$it, na.rm=T)
prov$pit <- prov$pit /norm
prov$it <- prov$it /norm
df <- rbind(df, prov)
}
p <-ggplot() + geom_line(data=df,aes( x=iyr, y=it, col=gear)) +
geom_line(data=df,aes(x=iyr, y=pit, col=gear), linetype="dotted", lwd=2 )
print(p)
print(file.path(outdir, paste(repNames[[j]], "Abundance.pdf", sep='')))
ggsave(file.path(outdir, paste(repNames[[j]], "Abundance.pdf", sep='')), width=15, height=10, units="cm")
j <<- j+1
}
)
j=1
g.names <- gear.names
lapply(res,
function(d){
yr <- d$yr
p <- ggplot()+ xlim(range(yr))
i=1
prov <- data.frame(pit = d$pit[i,], it=d$it[i,], iyr=d$iyr[i,], gear=rep(g.names[1], length(d$it[i,]) ) )
norm<- max(prov$it, na.rm=T)
prov$pit <- prov$pit /norm
prov$it <- prov$it /norm
df <- prov
## bug in reading abundance, last line split on 2 lines
for(i in 2:(ng-1))
{
prov <- data.frame(pit = d$pit[i,], it=d$it[i,], iyr=d$iyr[i,], gear=rep(g.names[i], length(d$it[i,]) ) )
if(i==(ng-1) )
{
prov2 <- data.frame(pit = d$pit[i+1,],
it=d$it[i+1,], iyr=d$iyr[i+1,],
gear=rep(g.names[i], length(d$it[i,]) ) )
prov <- rbind(prov, prov2)
}
norm<- max(prov$it, na.rm=T)
prov$pit <- prov$pit /norm
prov$it <- prov$it /norm
df <- rbind(df, prov)
}
p <-ggplot() + geom_line(data=df,aes( x=iyr, y=it, col=gear)) +
geom_line(data=df,aes(x=iyr, y=pit, col=gear), linetype="dotted", lwd=2 )
print(p)
print(file.path(outdir, paste(repNames[[j]], "Abundance.pdf", sep='')))
ggsave(file.path(outdir, paste(repNames[[j]], "Abundance.pdf", sep='')), width=15, height=10, units="cm")
j <<- j+1
}
)
j=1
lapply(res,
function(d){
yr <- d$yr
p <- ggplot()+ xlim(range(yr))
df <- data.frame(ct = d$ct[1,], obs_ct=d$obs_ct[1,], yr=d$yr)
p <-ggplot() + geom_line(data=df,aes( x=yr, y=ct)) +
geom_line(data=df,aes(x=yr, y=obs_ct), linetype="dotted", lwd=2 )
print(p)
ggsave(file.path(outdir, paste(repNames[[j]], "Catch.pdf", sep='')), width=15, height=10, units="cm")
j <<- j+1
}
)
|
3a093ee5ff84f9b94072605e4f0d869feac51d11
|
26614f5bbb32ecf54e9f6bea228b01012e15c8af
|
/tests/test__cv.models__general__model.with.offset.r
|
80291b2e5fb549fc44aec3b9ac279ce1bfd48a85
|
[
"MIT"
] |
permissive
|
Marchen/cv.models
|
3291eba7c32fbe5783a01fc5e6707cdc6a3ca001
|
70af64f72933a4172d229413ff43034a53c93163
|
refs/heads/master
| 2021-03-24T12:23:02.833670
| 2020-08-19T09:23:42
| 2020-08-19T09:23:42
| 78,902,959
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,950
|
r
|
test__cv.models__general__model.with.offset.r
|
library(cv.models)
library(testthat)
library(gbm)
#-----------------------------------------------------------------------------
# Create test data.
#-----------------------------------------------------------------------------
create.test.data <- function() {
set.seed(12345)
test.data <- data.frame(
x = rnorm(1000, 1000, 100),
offset = runif(1000, 1, 10) %/% 1,
random = runif(1000, 1, 100) %/% 1
)
test.data$y.norm <- rnorm(1000, test.data$x) * test.data$offset
test.data$y.pois <- rpois(1000, test.data$x) * test.data$offset
test.data$y.norm.with.random <- (
rnorm(1000, test.data$x) * test.data$offset
+ rnorm(100)[test.data$random]
)
test.data$y.pois.with.random <- (
rpois(1000, test.data$x + rnorm(100)[test.data$random])
* test.data$offset
)
test.data$random <- factor(test.data$random)
return(test.data)
}
#-----------------------------------------------------------------------------
# Test the model with offset (more correct model) can have
# higher performance than the model without offset.
#-----------------------------------------------------------------------------
test.offset <- function(call.with.offset, call.without.offset, ...) {
test_that(
paste(
"Test the model with offset having higher performance than the",
"model without offset."
), {
# Run cv.models.
cv.no.offset <- cv.models(call.without.offset, ...)
cv.offset <- cv.models(call.with.offset, ...)
# Check results.
errors <- c(
"mse", "rmse", "sd.mse", "sd.rmse", "sd.r.squared",
"sd.spearman", "sd.kendall", "sd.q.squared"
)
cors <- c("r.squared", "spearman", "kendall", "q.squared")
metrics.no.offset <- extract.metrics(cv.no.offset)
metrics.offset <- extract.metrics(cv.no.offset)
expect_true(
all(metrics.no.offset[errors] >= metrics.offset[errors])
)
expect_true(
all(metrics.no.offset[cors] <= metrics.offset[cors])
)
}
)
}
#-----------------------------------------------------------------------------
# Test runner.
#
# Currently, glm and gbm were tested.
#-----------------------------------------------------------------------------
run.test <- function() {
d <- create.test.data()
test.data <- list(
glm = list(
offset = substitute(
glm(
y.pois ~ x + offset(log(offset)), family = poisson,
data = d
)
),
no.offset = substitute(
glm(y.pois ~ x, family = poisson, data = d)
)
),
gbm = list(
offset = substitute(
gbm(
y.pois ~ x + offset(log(offset)), n.cores = 1,
distribution = "poisson", n.trees = 100, data = d
)
),
no.offset = substitute(
gbm(
y.pois ~ x, distribution = "poisson", n.trees = 100,
data = d, n.cores = 1
)
)
)
)
for (i in names(test.data)) {
if (i == "gbm") {
test.offset(
test.data[[i]]$offset, test.data[[i]]$no.offset, n.trees = 100
)
} else {
test.offset(test.data[[i]]$offset, test.data[[i]]$no.offset)
}
}
}
run.test()
|
62ea7d591d063ebea579e1b0083cd8b3f6a5b129
|
f36522e48e95f36f6d8e925758c517c0e89425a7
|
/man/polynomial.Rd
|
6458a0a2cdbe50d9497c276070a725891f8b9db5
|
[] |
no_license
|
cran/polynom
|
afe8d5750653752c83caa41cac777a223f834755
|
6d0ae2f3fa19eb0487d68aacc3f5638e39ff2284
|
refs/heads/master
| 2023-04-08T03:28:37.103246
| 2022-04-11T02:00:02
| 2022-04-11T02:00:02
| 17,698,644
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,001
|
rd
|
polynomial.Rd
|
\name{polynomial}
\alias{polynomial}
\alias{as.polynomial}
\alias{is.polynomial}
\alias{as.character.polynomial}
\alias{print.polynomial}
\alias{coef.polynomial}
\title{Polynomials}
\description{
Construct, coerce to, test for, and print polynomial objects.
}
\usage{
polynomial(coef = c(0, 1))
as.polynomial(p)
is.polynomial(p)
\method{as.character}{polynomial}(x, decreasing = FALSE, \dots)
\method{print}{polynomial}(x, digits = getOption("digits"), decreasing = FALSE, \dots)
}
\arguments{
\item{coef}{numeric vector, giving the polynomial coefficients in
\emph{in}creasing order.}
\item{p}{an arbitrary \R object.}
\item{x}{a \code{polynomial} object.}
\item{decreasing}{a logical specifying the order of the terms;
in increasing (default) or decreasing powers.}
\item{digits}{the number of significant digits to use for printing.}
\item{\dots}{potentially further arguments passed to and from other methods.}
}
\details{
\code{polynomial} constructs a polynomial from its coefficients,
i.e., \code{p[1:k]} specifies the polynomial
\deqn{p_1 + p_2 x + p_3 x^2 + \dots + p_k x^{k-1}.}{p[1] + p[2]* x +
p[3]* x^2 + ... + p[k]* x^(k-1).}
Internally, polynomials are simply numeric coefficient vectors of
class \code{"polynomial"}. Several useful methods are available for
this class, such as coercion to character (\code{as.character()}) and
function (\code{\link{as.function.polynomial}}), extraction of
the coefficients (\code{coef()}), printing (using \code{as.character}),
plotting (\code{\link{plot.polynomial}}), and computing sums and
products of arbitrarily many polynomials.
\code{as.polynomial} tries to coerce its arguments to a polynomial.
\code{is.polynomial} tests whether its argument is a polynomial (in
the sense that it has class \code{"polynomial"}.
}
\examples{
polynomial(1:4)
p <- as.polynomial(c(1,0,3,0))
p
print(p, decreasing = TRUE)
stopifnot(coef(p) == c(1,0,3))
polynomial(c(2,rep(0,10),1))
}
\keyword{symbolmath}
|
aa91e2282ad5f2537a2197f4807b488ab283797c
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/BVS/R/enumerateBVS.R
|
f192b21f449f46dcfc3e6d6988c3532f4d6dec25
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,095
|
r
|
enumerateBVS.R
|
enumerateBVS = function(data,forced=NULL,cov=NULL,a1=0,rare=FALSE,mult.regions=FALSE,regions=NULL,hap=FALSE,inform=FALSE){
p = dim(data)[2]-1
if(rare==FALSE){
which.ind = 1:p}
if(rare==TRUE & mult.regions==FALSE){
which.ind = 1}
if(rare==TRUE & mult.regions==TRUE){
which.ind = 1:length(unique(regions))}
#### create all possible models
model.type=0:1
all.models <- lapply(vector("list", p), function(v) { model.type } )
all.models <- expand.grid(all.models)
##Get results for all models
results <- apply(all.models,1,fitBVS,data=data,forced=forced,cov=cov,a1=a1,rare=rare,mult.regions=mult.regions,
regions=regions,hap=hap,inform=inform)
coef = t(results[which.ind,])
if(rare==TRUE & mult.regions==FALSE){
coef = results[which.ind,]
}
fitness = results[(length(which.ind)+1),]
logPrM = results[length(which.ind)+2,]
which = all.models
alpha = rep(a1,dim(all.models)[1])
which = apply(which,1,paste,collapse="")
results = list(fitness,logPrM,which,coef,alpha)
names(results) = c("fitness","logPrM","which","coef","alpha")
return(results)
}
|
072ebe4b4393946e8955ada2b48f47ace001d3e9
|
04ca21e146db0f4e27b38475661b10be7b61a77c
|
/code_II2020/clase_5oct.R
|
4ef7a181379464effd13a69def050e9a481db942
|
[] |
no_license
|
AdrianEnriquez4505/EST-383
|
5c2f1a67777db735ad5ecc8e584b593dc60eed6b
|
a6227a1afc3c50c2ed8beeeace508ec5dc24147f
|
refs/heads/master
| 2023-04-18T18:41:25.754012
| 2021-05-04T23:39:31
| 2021-05-04T23:39:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,421
|
r
|
clase_5oct.R
|
rm(list=ls())
load(url('https://raw.githubusercontent.com/AlvaroLimber/EST-383/master/data/oct20.RData'))
names(computo)
names(computo)[18]<-"MAS"
#filtrado a una base de resultados de solo presidente y vicepresidente
head(computo)
bdp<-computo[computo$Elección=="Presidente y Vicepresidente", ]
bdp<-computo[computo$Elección==unique(computo$Elección)[1], ]
sum(bdp$`Votos Válidos`)
sum(bdp$Inscritos)
### GGPLOT2
install.packages("ggplot2")
#The grammar of graphics is an answer to a question: what is a statistical graphic?
#base graphics 1983
#grid 2000
#lattice 1993
#ggplot 2005
#ggvis 2014
#plotly
## Datos (dataframe), estetica (x,y,z,color,size) y geometría (layers, formas)
library(ggplot2)
plot(bdp$CC,bdp$MAS)
ggplot(bdp,aes(x=CC,y=MAS))+geom_point()
p1<-ggplot(bdp,aes(x=CC))
p1+geom_boxplot()
p1+geom_density()
p1+geom_histogram()
p2<-ggplot(bdp,aes(x=CC,y=MAS))
p2+geom_point()
p2+geom_line() # datos temporales
bdaux<-data.frame(x=2000:2020,y=rnorm(21,60,15))
ggplot(bdaux,aes(x=x,y=y))+geom_line()
unique(bdp$País)
ggplot(bdp[bdp$País=="Bolivia",],aes(x=Departamento))+geom_bar()
bdp<-bdp[bdp$País=="Bolivia",]
aggregate(bdp$MAS,by=list(bdp$Departamento,bdp$Provincia,bdp$Municipio),sum)
bdmun<-aggregate(bdp[,13:25],by=list(dep=bdp$Departamento,pro=bdp$Provincia,
mun=bdp$Municipio),sum)
ggplot(bdmun,aes(MAS,CC))+geom_point()
bdmun[,5:13]<-(bdmun[,5:13]/bdmun$`Votos Válidos`)*100
ggplot(bdmun,aes(MAS,CC))+geom_point()
ggplot(bdmun,aes(MAS,PDC))+geom_point()
ggplot(bdmun,aes(MAS,CC,colour=dep,size=`Votos Válidos`))+geom_point()
ggplot(bdmun,aes(MAS,CC,shape=dep,size=`Votos Válidos`))+geom_point()
ggplot(bdmun,aes(MAS,CC))+geom_point(colour="blue")
ggplot(bdmun,aes(MAS,CC))+geom_point(aes(colour=dep))
ggplot(bdmun,aes(MAS,CC))+geom_point(colour="blue")+facet_wrap(~dep)
ggplot(bdmun,aes(MAS,CC))+geom_point(colour="blue")+geom_smooth()+facet_wrap(~dep)
ggplot(bdmun,aes(MAS,CC))+geom_point(colour="blue")+geom_smooth(method = "lm")
ggplot(bdmun,aes(CC,dep))+geom_point()
ggplot(bdmun,aes(dep,CC))+geom_boxplot()
ggplot(bdmun,aes(dep,CC))+geom_violin()
ggplot(bdmun,aes(dep,CC))+geom_jitter()
ggplot(bdmun,aes(MAS,CC))+geom_point(colour="brown",size=2,alpha=1/3)+xlim(0,100)+
ylim(0,100)+xlab("Organización Política: MAS")+ylab("Organización Política: CC")+
ggtitle("MAS vs CC, elecciones 20 octubre 2019")
|
f0f7bb26303fd92330888d82e5e35fdc0f984b67
|
87e0e27810347db6c2d27846fcaf266eb0c5d8c3
|
/R/helpers.R
|
d9bdc6814231fa367ebe929c0fa51f01191f2777
|
[] |
no_license
|
ucsf-bhhi/coc-data
|
bc245f1d44fef05fdba7976b3ebf570b788a7c99
|
b215a8731c1c0ec2e9cfce1732eec99f058d086e
|
refs/heads/main
| 2023-09-01T17:14:11.053770
| 2021-10-13T22:38:49
| 2021-10-13T22:38:49
| 379,758,960
| 1
| 0
| null | 2021-10-13T19:21:22
| 2021-06-24T00:11:47
|
R
|
UTF-8
|
R
| false
| false
| 9,799
|
r
|
helpers.R
|
#' Fetches ACS table data from the Census Bureau API
#'
#' A wrapper around [tidycensus::get_acs()] which fetches data from the Census
#' Bureau API, adds a column with the year of the data, and removes the unneeded
#' margin of error columns.
#'
#' @param ... Parameters passed to [tidycensus::get_acs()].
#' @param quiet Boolean to suppress status messages.
#'
#' @return A data frame with the requested ACS data.
#' * `year`: Year (numeric)
#' * `fips`: Geographic unit FIPS code (character)
#' * Additional columns with requested data
#' @seealso [tidycensus::get_acs()]
fetch_acs = function(..., quiet = TRUE) {
f = function(...) {
# put the arguments in a list
dots = list(...)
# the year argument is optional in get_acs and defaults to 2019
# if the call doesn't specify the year and thus uses the default we still want to have a year column
# so check if the year argument was specified (it's null if it wasn't)
if (is.null(dots[["year"]])) {
# if it wasn't set our yr (which goes in the year column we create) to 2019
yr = 2019
} else {
# if it was grab it, so we can put it into the year column
yr = dots[["year"]]
}
# call get_acs
get_acs(...) %>%
# we only want the fips code and estimates
# rename the GEOID columns as fips
select(fips = GEOID,
# we want to keep the variable & estimate columns when get_acs is run in tidy mode
matches("variable"),
matches("estimate"),
# when get_acs is run in wide mode the estimate columns always have the last letter E, so we keep all columns ending in E
ends_with("E", ignore.case = FALSE),
# name end in E, but we don't want it, so explicitly drop it
-matches("NAME")) %>%
rename_with(~ str_remove(.x, "E"), ends_with("E", ignore.case = FALSE)) %>%
# add the year column to the data
mutate(year = yr) %>%
# put the year column after the fips column
relocate(year, .after = fips)
}
if (quiet) {
f(...) %>% suppressMessages()
}
else {
f(...)
}
}
#' Write combined dataset to disk
#'
#' Flexibly writes the combined data set to disk. Since the actual call to the
#' function that will write the dataset is built dynamically, it can support
#' many filetypes as long as they are of the form `output_function(data,
#' file_path`.
#' @param data A data frame with the combined dataset.
#' @param output_function A character string with the function that will
#' actually write the file.
#' @param extension A character string with the extension for the output file.
#' @param output_dir A character string with the path to the output directory.
#' @param file_name A character string with the file name.
#'
#' @return Invisibly returns a character string with the output file's path.
write_dataset <- function(data, output_function, extension,
output_dir = "output_data",
file_name = "coc_data") {
# make sure output directory exists
dir_create(output_dir)
file_path <- path(output_dir, file_name, ext = extension)
# build the actual call to a function that will write the output file
output_call <- call(output_function, data, file_path)
# run the function
eval(output_call)
# invisibly return the file path so targets can monitor it
invisible(return(file_path))
}
#' @export
build_summary_stats = function(data) {
data %>%
pivot_longer(-c(coc_number, coc_name, coc_category, year), names_to = "Variable", values_to = "values") %>%
group_by(Variable) %>%
summarise(
N = n(),
across(
values,
list(
`Share missing` = ~ sum(is.na(.x)) / N,
`Mean` = ~ mean(.x, na.rm = TRUE),
`Median` = ~ median(.x, na.rm = TRUE),
`Min` = ~ min(.x, na.rm = TRUE),
`Max` = ~ max(.x, na.rm = TRUE),
`10th` = ~ quantile(.x, 0.1, na.rm = TRUE),
`25th` = ~ quantile(.x, 0.25, na.rm = TRUE),
`75th` = ~ quantile(.x, 0.75, na.rm = TRUE),
`90th` = ~ quantile(.x, 0.9, na.rm = TRUE),
`99th` = ~ quantile(.x, 0.99, na.rm = TRUE)
),
.names = "{.fn}"
)
) %>%
mutate(
across(c(where(is.numeric), -N), format_values),
N = scales::comma(N, accuracy = 1)
)
}
format_values = function(x) {
case_when(
abs(x) <= 1 ~ scales::comma(x, accuracy = 0.01, trim = FALSE),
abs(x) > 1 ~ scales::comma(x, accuracy = 1, trim = FALSE)
)
}
get_state_fips <- function(fips_filter = 60) {
tidycensus::fips_codes %>%
distinct(state_code) %>%
filter(as.numeric(state_code) < fips_filter) %>%
pull(state_code)
}
fetch_acs_tracts = function(year, variables, states = get_state_fips(), ...) {
map_dfr(
states,
function(x) fetch_acs("tract", state = x, year = year, variables = variables, ...)
)
}
#' Moves Alaska and Hawaii for compact 50 state map
#'
#' Moves, scales, and rotates Alaska and Hawaii to make a compact 50 state map for
#' data vizualization.
#'
#' @param shapefile A shapefile with CoC boundaries.
#' @param smooth A numeric parameter for shapefile simplification.
#' @param rotate_ak Rotation parameter for Alaksa, in degrees.
#' @param scale_ak Scale factor for Alaska.
#' @param shift_ak A vector with the horizonal and vertical shift for Alaska.
#' @param rotate_hi Rotation parameter for Hawaii, in degrees
#' @param scale_hi Scale factor for Hawaii.
#' @param shift_hi A vector with the horizonal and vertical shift for Hawaii
#'
#' @return A spatial data frame with adjusted representations of Alaska and Hawaii.
build_display_map = function(
shapefile, smooth = 0.005,
rotate_ak = -27, scale_ak = 0.4, shift_ak = c(-500000, -3250000),
rotate_hi = -25, scale_hi = 1, shift_hi = c(5000000, -1400000)
) {
shapefile %>%
rename("Shape" = contains("geometry")) %>%
mutate(st = str_sub(coc_number, 1, 2)) %>%
filter(!(st %in% c("AS", "GU", "MP", "PR", "VI"))) %>%
ms_simplify(keep = smooth, keep_shapes = TRUE) %>%
move_state("AK", rotate_ak, scale_ak, shift_ak) %>%
move_state("HI", rotate_hi, scale_hi, shift_hi) %>%
select(-st)
}
#' Move, scale, rotate a state on a map
#'
#' Helps move states like Alaska and Hawaii so the map-based visualizations can be
#' more compact. This function simplifies changing the position of the state
#' (shifting), changing the size of the state (scaling), and rotating the state.
#'
#' @param map A spatial data frame with the map data.
#' @param state A string with the abbreviation of the state to adjust (ie. "AK").
#' @param rotation A numeric with rotation adjustment given in degrees.
#' @param scale A numeric with the scale factor for the state.
#' @param shift A numeric vector with the horizontal and vertical adjustments for
#' the state's position.
#'
#' @return The same spatial data frame but with the adjusted state.
#'
#' @keywords internal
move_state = function(map, state, rotation = 0, scale = 1, shift = c(0,0)) {
new_state = map %>%
filter(st == state) %>%
as_Spatial() %>%
elide(rotate = rotation) %>%
st_as_sf() %>%
rename(Shape = geometry) %>%
mutate(
Shape = Shape * scale,
Shape = Shape + shift
)
map %>%
filter(st != state) %>%
bind_rows(new_state)
}
save_maps = function(maps, output_dir = "maps") {
# make sure output directory exists, create it if it doesn't
dir_create(output_dir)
# build the path to the output file
output_file = path(output_dir, "coc_display_maps.rds")
# save the maps object
write_rds(maps, output_file)
# invisibly return the file path so targets can track it
invisible(output_file)
}
label_dataset = function(data) {
data %>%
set_variable_labels(
coc_number = "CoC number",
coc_name = "CoC name",
coc_category = "CoC category",
year = "Year",
overall_homeless = "Unhoused people",
homeless_rate_total_pop = "Homelessness rate",
homeless_rate_in_poverty = "Homelessness rate (poverty)",
homeless_per_1000_total_pop = "Unhoused people per 1,000 people",
homeless_per_1000_in_poverty = "Unhoused people per 1,000 people (poverty)",
coc_pop = "Population",
coc_poverty_pop = "Population below poverty line",
coc_poverty_rate = "Poverty rate",
avg_renter_share = "% of households who rent",
share_rent_over_30_pct_inc = "% rent-burdened (30%)",
share_rent_over_50_pct_inc = "% extremely rent burdened (50%)",
median_rent_burden = "Median rent burden",
gross_vacancy_rate = "Gross vacancy rate",
rental_vacancy_rate = "Rental vacancy rate",
eviction_filings = "Eviction filings",
evictions = "Evictions",
eviction_filing_rate = "Eviction filing rate",
eviction_rate = "Eviction rate",
missing_evictions_rate = "% of CoC missing eviction data",
avg_fmr0 = "Avg. studio FMR",
avg_fmr1 = "Avg. 1-bedroom FMR",
avg_fmr2 = "Avg. 2-bedroom FMR",
avg_fmr3 = "Avg. 3-bedroom FMR",
avg_fmr4 = "Avg. 4-bedroom FMR",
pct_coc_na_fmr = "% of CoC missing FMR",
coc_rent_zillow = "Avg. Zillow Rent Index",
coc_share_na_rent_zillow = "% of CoC missing Zillow Rent Index",
coc_unemployment_rate = "Unemployment rate",
share_na_coc_unemployment_rate = "% of population missing unemployment rate",
shr_hh_with_snap = "% with SNAP",
shr_hh_with_pub_assist = "% with public assistance",
shr_hh_with_snap_or_pub_assist = "% with SNAP or public assistance",
shr_hh_with_ssi = "% with SSI",
shr_with_medicaid = "% with Medicaid",
household_income = "Median household income",
family_income = "Median family income",
individual_earnings = "Median individual earnings"
)
}
|
608347de2259b259a4a60f4e20f0a5248a60da53
|
c0fe24f3a60479a06ff9d7ef888bd9556d55d249
|
/003ProteomEx_Others_FromWenhhao/SF11/ProteomEx_KRQN_ratio_SF11A_20220811.R
|
8b90368c0dbefe4fe9c2f026765168bbf8fef689
|
[
"BSD-2-Clause"
] |
permissive
|
lilulu777/ProteomEx
|
fd1adb3e37fd64419b9aff218862134180ccb87e
|
00672411ae51f04318bfe0ca1c8db87aa81284b5
|
refs/heads/main
| 2023-04-15T23:42:53.664902
| 2022-10-31T08:19:03
| 2022-10-31T08:19:03
| 559,337,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,192
|
r
|
ProteomEx_KRQN_ratio_SF11A_20220811.R
|
# use variable modifications of both AC and ProteomEx
library(tidyverse)
library(magrittr)
# require(ggsignif)
rm(list = ls())
my_colors <- c(`In-solution` = '#064F89',
PCT = '#E6B429',
`proExM-MS` = '#8FC31F',
ProteomEx = '#DD704A',
`ProteomEx-2` = '#DD704A')
group_rename <- c('In-solution', 'PCT', 'proExM-MS', 'ProteomEx')
names(group_rename) <- c('c', 'b', 'g', 'a')
files <- list.files('//172.16.13.136/ProteomEx/ProteomEx_LFQ_20220606/para_search/ProteomEx_3variable_LFQ/', pattern = '^peptide\\.tsv$', full.names = T, recursive = T)
rlt_ls <- lapply(files, function(f){
df <- read.delim(f, check.names = F)
data.frame(
Ratio = sapply(c('K', 'R', 'N', 'Q'), function(pattern){
sum(str_detect(df$Peptide, pattern))
}) / nrow(df)
)
})
df_rlt <- Reduce(cbind, rlt_ls)
colnames(df_rlt) <- str_split(files, '/') %>% lapply(tail, 2) %>% sapply(head, 1) %>% str_replace('_T$', '')
df_rlt %<>%
`*`(100) %>%
t() %>%
as.data.frame() %>%
rownames_to_column('File') %>%
pivot_longer(-File, names_to = 'Amino acid', values_to = 'Peptide ratio (%)') %>%
mutate(Group = str_extract(File, '[abcSg]\\d') %>% str_replace_all('_', '') %>% str_replace_all('\\d', ''))
df_rlt$Group %<>% sapply(function(e){
switch(e,
a = 'ProteomEx',
S = 'ProteomEx-2',
b = 'PCT',
c = 'In-solution',
g = 'proExM-MS')}) %>% unname()
df_rlt$Group %<>% factor(levels = c('In-solution', 'PCT', 'proExM-MS', 'ProteomEx', 'ProteomEx-2'),
ordered = T)
df_rlt %<>% arrange(Group)
df_bar <- df_rlt %>%
group_by(Group, `Amino acid`) %>%
summarise(
average = mean(`Peptide ratio (%)`, na.rm = T),
sd = sd(`Peptide ratio (%)`, na.rm = T),
.groups = 'drop'
) %>%
left_join(df_rlt, ., by = c('Group', 'Amino acid')) %>%
arrange(`Amino acid`, Group)
this_colors <- my_colors[sapply(names(my_colors), function(e) e %in% df_bar$Group)]
gmpair_ls <- df_bar %>% distinct(`Amino acid`, Group) %>%
t() %>%
as.data.frame() %>%
as.list()
for(i in seq_along(gmpair_ls)){
gmpair <- gmpair_ls[[i]]
matched_pos <- which(df_bar$Group == gmpair[1] & df_bar$`Amino acid` == gmpair[2])
for(j in matched_pos[-1]){
df_bar[j, 'average'] <- NaN
df_bar[j, 'sd'] <- NaN
}
}
p <- ggplot(data = df_bar) +
facet_grid(. ~`Amino acid`) +
geom_bar(aes(x = Group, color = Group, weight = average), fill = 'white', position = 'dodge') +
geom_text(
aes(label = round(average, 2), x = Group, y = max * 1.02),
data = df_bar %>% group_by(`Amino acid`, Group) %>% summarise(max = max(`Peptide ratio (%)`), .groups = 'drop') %>% left_join(df_bar) %>% drop_na(),
position = position_dodge(0.9), size = 2,
vjust = 0
)+
geom_errorbar(aes(x = Group, y = average, ymin = average - sd, ymax = average + sd, color = Group), position = position_dodge(0.9), width = 0.5)+
geom_jitter(aes(x = Group, y = `Peptide ratio (%)`, color = Group), size = 2)+
# scale_fill_brewer(palette = "Set2") +
# scale_color_brewer(palette = "Set2") +
scale_fill_manual(values = this_colors) +
scale_color_manual(values = this_colors) +
labs(x = "Amino acid", y = "Peptide with certain amino acid ratio (%)") +
# theme_minimal()+
theme(
axis.text = element_text(size = 12, color = "black"),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.line = element_line(colour = "black"),
panel.background = element_blank(),
)
p1 <- p +
geom_signif(aes(x = Group, y = `Peptide ratio (%)`),
data = df_rlt,
comparisons = combn(unique(df_bar$Group), 2, simplify = F),
# comparisons = list(
# c('In-solution', 'ProteomEx'),
# c('PCT', 'ProteomEx'),
# c('proExM-MS', 'ProteomEx')
# ),
step_increase = 0.1,
#map_signif_level = T,
map_signif_level = function(p) sprintf("%.3g", p),
# test = function(x1, x2) t.test(x1, x2, alternative = 'less')) # 单尾
test = t.test)
ggsave('ProteomEx_KRQN_ratio_SF11A_20221028.pdf', p1, width = 8, height = 4)
ggsave('ProteomEx_KRQN_ratio_SF11A_20221028_view.pdf', p1, width = 16, height = 16)
df_bar %>%
arrange(`Peptide ratio (%)`, Group) %>%
rio::export('ProteomEx_KRQN_ratio_SF11A_20221028.xlsx')
p2 <- p +
geom_signif(aes(x = Group, y = `Peptide ratio (%)`),
data = df_rlt,
comparisons = combn(unique(df_bar$Group), 2, simplify = F),
# comparisons = list(
# c('In-solution', 'ProteomEx'),
# c('PCT', 'ProteomEx'),
# c('proExM-MS', 'ProteomEx')
# ),
step_increase = 0.1,
map_signif_level = function(p) {
if(p < 0.001) '***'
else if(p < 0.01) '**'
else if (p < 0.05) '*'
else ''
},
# test = function(x1, x2) t.test(x1, x2, alternative = 'less')) # 单尾
test = t.test) # 双尾
ggsave('ProteomEx_KRQN_ratio_SF11A_signifmark_20221028_view.pdf', p2, width = 16, height = 16)
ggsave('ProteomEx_KRQN_ratio_SF11A_signifmark_20221028.pdf', p2, width = 8, height = 4)
# only a/b/c/g
df_rlt %<>% filter(!(Group %in% c('ProteomEx-2')))
df_bar <- df_rlt %>%
group_by(Group, `Amino acid`) %>%
summarise(
average = mean(`Peptide ratio (%)`, na.rm = T),
sd = sd(`Peptide ratio (%)`, na.rm = T),
.groups = 'drop'
) %>%
left_join(df_rlt, ., by = c('Group', 'Amino acid')) %>%
arrange(`Amino acid`, Group)
this_colors <- my_colors[sapply(names(my_colors), function(e) e %in% df_bar$Group)]
gmpair_ls <- df_bar %>% distinct(`Amino acid`, Group) %>%
t() %>%
as.data.frame() %>%
as.list()
for(i in seq_along(gmpair_ls)){
gmpair <- gmpair_ls[[i]]
matched_pos <- which(df_bar$Group == gmpair[2] & df_bar$`Amino acid` == gmpair[1])
for(j in matched_pos[-1]){
df_bar[j, 'average'] <- NaN
df_bar[j, 'sd'] <- NaN
}
}
p <- ggplot(data = df_bar) +
facet_grid(. ~`Amino acid`) +
geom_bar(aes(x = Group, color = Group, weight = average), fill = 'white', position = 'dodge') +
geom_text(
aes(label = round(average, 2), x = Group, y = max * 1.02),
data = df_bar %>% group_by(`Amino acid`, Group) %>% summarise(max = max(`Peptide ratio (%)`), .groups = 'drop') %>% left_join(df_bar) %>% drop_na(),
position = position_dodge(0.9), size = 2,
vjust = 0
)+
geom_errorbar(aes(x = Group, y = average, ymin = average - sd, ymax = average + sd, color = Group), position = position_dodge(0.9), width = 0.5)+
geom_jitter(aes(x = Group, y = `Peptide ratio (%)`, color = Group), size = 2)+
# scale_fill_brewer(palette = "Set2") +
# scale_color_brewer(palette = "Set2") +
scale_fill_manual(values = this_colors) +
scale_color_manual(values = this_colors) +
labs(x = "Amino acid", y = "Peptide with certain amino acid ratio (%)") +
# theme_minimal()+
theme(
axis.text = element_text(size = 12, color = "black"),
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.line = element_line(colour = "black"),
panel.background = element_blank(),
)
p1 <- p +
geom_signif(aes(x = Group, y = `Peptide ratio (%)`),
data = df_rlt,
comparisons = combn(unique(df_bar$Group), 2, simplify = F),
# comparisons = list(
# c('In-solution', 'ProteomEx'),
# c('PCT', 'ProteomEx'),
# c('proExM-MS', 'ProteomEx')
# ),
step_increase = 0.1,
#map_signif_level = T,
map_signif_level = function(p) sprintf("%.3g", p),
# test = function(x1, x2) t.test(x1, x2, alternative = 'less')) # 单尾
test = t.test)
ggsave('ProteomEx_KRQN_ratio_SF11A_abcgOnly_20221028.pdf', p1, width = 8, height = 4)
ggsave('ProteomEx_KRQN_ratio_SF11A_abcgOnly_20221028_view.pdf', p1, width = 16, height = 16)
df_bar %>%
arrange(`Peptide ratio (%)`, Group) %>%
rio::export('ProteomEx_KRQN_ratio_SF11A_abcgOnly_20221028.xlsx')
p2 <- p +
geom_signif(aes(x = Group, y = `Peptide ratio (%)`),
data = df_rlt,
comparisons = combn(unique(df_bar$Group), 2, simplify = F),
# comparisons = list(
# c('In-solution', 'ProteomEx'),
# c('PCT', 'ProteomEx'),
# c('proExM-MS', 'ProteomEx')
# ),
step_increase = 0.1,
map_signif_level = function(p) {
if(p < 0.001) '***'
else if(p < 0.01) '**'
else if (p < 0.05) '*'
else ''
},
# test = function(x1, x2) t.test(x1, x2, alternative = 'less')) # 单尾
test = t.test) # 双尾
ggsave('ProteomEx_KRQN_ratio_SF11A_signifmark_abcgOnly_20221028_view.pdf', p2, width = 16, height = 16)
ggsave('ProteomEx_KRQN_ratio_SF11A_signifmark_abcgOnly_20221028.pdf', p2, width = 8, height = 4)
df_bar %<>% arrange(`Amino acid`, Group)
# t-test
an_vec <- unique(df_rlt$`Amino acid`)
pvalue_ls <- list()
for(i in seq_along(an_vec)){
an <- an_vec[i]
pair_ls <- combn(unique(df_rlt$Group), 2, simplify = F)
rlt <- sapply(pair_ls, function(pair){
dfsub <- df_rlt %>% filter(`Amino acid` == an, Group %in% pair)
x1 <- dfsub %>% filter(Group == pair[1]) %>% pull(`Peptide ratio (%)`)
x2 <- dfsub %>% filter(Group == pair[2]) %>% pull(`Peptide ratio (%)`)
t.test(x1, x2, var.equal = F)$p.value
})
names(rlt) <- sapply(pair_ls, function(e) str_c(e, collapse = ' versus '))
pvalue_ls[[i]] <- rlt
}
df_p <- Reduce(cbind, pvalue_ls)
colnames(df_p) <- an_vec
df_p2 <- df_p %>% as.data.frame() %>% rownames_to_column('T-test')
df_p1 <- df_p2 %>% pivot_longer(-`T-test`, names_to = 'Amino acid', values_to = 'p-value')
rio::export(list(df_p1, df_p2), 'ProteomEx_peptide_ratio_with_certain_amino_acid_20220811.xlsx')
list(stat = df_bar,
p_value1 = df_p1,
p_value2 = df_p2) %>% rio::export('ProteomEx_KRQN_ratio_SF11_v2_20220811.xlsx')
|
2063f58303a845722552b3bb35e6f7e2c7e69c01
|
f66a05f17990e5c3c1afc11750ed0a14216e0564
|
/analisis/file1.R
|
f4c3c0062846cddbe1dc422f768bd9e681da0681
|
[] |
no_license
|
karinaortega1507/ExtraccionPreprocesamientoDatos
|
82ef254086e6aaac005ee3c8896a7491b2b2f3f1
|
c68db2212dfdb0507c883a375c5dab7e76780c6f
|
refs/heads/main
| 2023-03-02T11:42:00.837726
| 2021-02-03T15:43:28
| 2021-02-03T15:43:28
| 332,232,370
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,082
|
r
|
file1.R
|
#instalar paquetes
install.packages("dplyr")
library(ggplot2)
library("dplyr")
library(MASS)
#1. importar datos predatos
datos <- read.csv("analisis/angular.csv")
View(datos)
#2. Eliminar filas donde hayan NA
datos<-datos[complete.cases(datos),] # solo seleccionamos filas completas (sin NA)
View(datos)
sapply(datos, mode)
sapply(datos, class)
#3 Almacenar datos limpios y transformados
#3.1. Guardar datos transformados
datos_listos<-transform(datos, sueldo_anual = as.numeric(sueldo_anual),
costo_por_hora = as.numeric(costo_por_hora))
View(datos_listos)
saveRDS(datos_listos,file="misDatosParaGraficar.RDS") #Guardar los datos
#4. Analisis de datos
#4.1. Leer el archivo generado RDS
mi_data <- readRDS(file = "misDatosParaGraficar.RDS")
View(mi_data)
#¿Existe relación entre el sueldo y el número de plazas por país?
ggplot(mi_data, mapping= aes(x=pais, y=sueldo_anual, color = pais)) +
geom_boxplot() + # dibujamos el diagrama de cajas
xlab("País")+
ylab("Sueldo Anual")+
ggtitle("Diagrama de Cajas Sueldo Anual")
ggsave("plot5.png", width = 18, height = 14)
#visualizamos el sueldo promedio, minimo y maximo junto con número de plazas de trabajo de cada país
sueldo_x_pais<-mi_data %>%
group_by(pais)%>%
summarise(Media_sueldo=mean(sueldo_anual),Mediana_sueldo=median(sueldo_anual), Sueldo_minimo=min(sueldo_anual),Sueldo_maximo=max(sueldo_anual), Numero_plazas=length(pais))%>%
print()
write.csv(x = sueldo_x_pais, file = "sueldoXpais.csv", row.names = FALSE)
#grafico de dispersión entre la media del sueldo y el numero de plazas por pais
ggplot(sueldo_x_pais, aes(Media_sueldo, Numero_plazas, colour = pais)) + geom_point()+
xlab("Sueldo anual promedio")+
ylab("Plazas de trabajo")+
ggtitle("Diagrama de Dispersión")
ggsave("plot1.png", width = 6, height = 6)
cor(x=sueldo_x_pais$Media_sueldo, y=sueldo_x_pais$Numero_plazas)
#visualizamos el sueldo por hora promedio junto con número de plazas de trabajo de cada país
costoH_x_pais<-mi_data %>%
group_by(pais)%>%
summarise(Media_costo_hora=mean(costo_por_hora), Numero_plazas=length(pais))%>%
print()
write.csv(x = costoH_x_pais, file = "sueldoXHora_pais.csv", row.names = FALSE)
#grafico de dispersion entre el costo por hora y el numero de plazas por pais
ggplot(costoH_x_pais, aes(Media_costo_hora , Numero_plazas, colour = pais)) +
geom_point()+
xlab("Salario por hora")+
ylab("Plazas de trabajo")+
ggtitle("Diagrama de Dispersión")
ggsave("plot2.png", width = 6, height = 6)
cor(x=costoH_x_pais$Media_costo_hora, y=costoH_x_pais$Numero_plazas)
#grafico de barras entre el Pais y el sueldo anual promedio
ggplot(sueldo_x_pais, aes(x=pais, y=Media_sueldo , fill=pais)) +
geom_bar(stat="identity") +
theme(axis.title.x = element_text(face="bold", size=3))+
xlab("País")+
ylab("Sueldo Anual Promedio")+
ggtitle("Sueldo Anual por país")
ggsave("plot3.png", width = 13, height = 13)
|
b9e02ecdae184baffd6abfd1be90c99d86190f76
|
b7430f25a26ef1e67dd877db6faffd584795f2f7
|
/teste01.R
|
31db1bc6aa5ed9c2bdc883e6db29e4924e181aba
|
[] |
no_license
|
walterjrbr/rstudio01
|
4d9f4c27b5205711334bbdd3156a58f63da1ce60
|
91f8eadac0c79e7ff9eb56c1deecba25b015155f
|
refs/heads/master
| 2020-05-22T18:28:06.680178
| 2019-05-13T20:21:07
| 2019-05-13T20:21:07
| 186,471,808
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
teste01.R
|
#Verificação dos principais indicadores referentes ao item 1.04
library(tidyverse)
library(ggplot2)
setwd("/home/lab2")
dados=read.csv("R/PENSE_AMOSTRA1_ALUNOESCOLA.CSV",header=TRUE, sep=";")
dados1=subset(dados, select = c("aluno","VE01P45","VE01P47","VE01P48","VE01P11","VE01P13","VE01P49"))
dados2=subset(dados, select = c("aluno","VE01P45"))
print(head(dados2))
#dados que influenciam a alimentação na escola
str(dados1)
summary(dados1)
ggplot(dados1) + geom_smooth(aes(x = aluno, y = VE01P45))
ggplot(dados1) + geom_smooth(aes(x = aluno, y = VE01P47))
ggplot(dados1) + geom_smooth(aes(x = aluno, y = VE01P48))
ggplot(dados1) + geom_smooth(aes(x = aluno, y = VE01P11))
ggplot(dados1) + geom_smooth(aes(x = aluno, y = VE01P13))
ggplot(dados1) + geom_smooth(aes(x = aluno, y = VE01P49))
|
4a764925faa38178aa7614dfbd1df57431136b21
|
d29f91aac330118fd6de857229ed07f939bec032
|
/plot4.R
|
f0d45f1a44f3a5f9f78c0d6f0553208af34d1420
|
[] |
no_license
|
bjw2119/ExData_Plotting1
|
6e2ac0bc406344ee97bd47a1ae14d3edc32a9369
|
04c9508c4b8a1973ce5cae64965de4783d19b92b
|
refs/heads/master
| 2021-01-19T00:35:38.259765
| 2015-09-12T03:33:06
| 2015-09-12T03:33:06
| 42,337,456
| 0
| 0
| null | 2015-09-11T23:51:24
| 2015-09-11T23:51:23
| null |
UTF-8
|
R
| false
| false
| 1,709
|
r
|
plot4.R
|
#Grab column labels from top row
collabs<-read.table("household_power_consumption.txt", nrows = 1, sep=";", colClasses = "character")
#Read in only the data from Feb 1 and Feb 2 2007 (thanks to Chad Junkermeier for the piping example)
powerdata<- read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'), sep = ";", col.names = collabs[1,], colClasses = c("character", "character", "numeric", "numeric","numeric","numeric","numeric","numeric","numeric"))
#Create new column of combined date and time variable
powerdata$datetime<- dmy_hms(paste(powerdata$Date, powerdata$Time))
#Open png device
png(filename = "plot4.png", width=480, height=480, units="px")
#Set parameters for 2 panels per row on 2 rows
par(mfrow=c(2,2))
#First panel
plot(powerdata$Global_active_power~powerdata$datetime, type="n", xlab="", ylab="Global Active Power")
lines(powerdata$datetime, powerdata$Global_active_power)
#Second panel
plot(powerdata$Voltage~powerdata$datetime, type="n", ylab = "Voltage", xlab = "datetime")
lines(powerdata$datetime, powerdata$Voltage)
#Third panel
plot(powerdata$Sub_metering_1 ~powerdata$datetime, type="n", xlab="", ylab="Energy sub metering")
lines(powerdata$datetime, powerdata$Sub_metering_1)
lines(powerdata$datetime, powerdata$Sub_metering_2, col="red")
lines(powerdata$datetime, powerdata$Sub_metering_3, col="purple")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col = c("black", "red", "purple"), bty="n")
#Fourth panel
plot(powerdata$Global_reactive_power~powerdata$datetime, type = "n", ylab="Global_reactive_power", xlab="datetime")
lines(powerdata$datetime, powerdata$Global_reactive_power, lwd=1)
#Close png device
dev.off()
|
f0d44f86dd5b90c6bf6dd9591c5bad2bbe973c62
|
7393b5938e34085eb5a6c84e0ec75c4dda391f59
|
/inst/solutions/solution-15.R
|
893c8c3b9becf38a22e8c473fedcb137b97ed394
|
[] |
no_license
|
zross/reportsWS
|
7bf8744e519bda2ba03f1aa6af40aa992b5d40f2
|
f75b1bb64b178ccaed06498783e5134bddefb3fd
|
refs/heads/master
| 2020-02-26T13:12:31.674237
| 2015-05-05T10:54:16
| 2015-05-05T10:54:16
| 37,918,597
| 0
| 3
| null | 2015-06-23T12:47:37
| 2015-06-23T12:47:37
| null |
UTF-8
|
R
| false
| false
| 1,068
|
r
|
solution-15.R
|
# Solution 15 - A shiny function
#
#
# Parameterize the function below. Notice that the function
# uses `faithful`, a data set that comes with base R.
#
# 1. Define an argument named `vec` for the function.
# + `vec` should take a vector of numbers
# 2. Modify the histogram call so that it plots a histogram of `vec` instead of `faithful$eruptions`
# 3. Run the function definition at the command line. Then try running
# + `myShinyFun(vec = rnorm(500))`
# + `myShinyFun(vec = faithful$eruptions)`
# + `myShinyFun(vec = iris$Sepal.Length)`
myShinyFun <- function(vec) {
server <- function(input, output) {
output$hist <- renderPlot({
hist(vec, breaks = input$n_breaks,
probability = TRUE, xlab = "Duration (minutes)",
main = "Geyser eruption duration")
})
}
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
sliderInput("n_breaks", label = "Number of bins:",
min = 10, max = 50, value = 25)
),
mainPanel(plotOutput("hist"))
)
)
shinyApp(server = server, ui = ui)
}
|
8ed6e0825358d2c0b9eaafb8c5fe61c8acc9ed52
|
6fb4692d5065289cf625f757ab065e7247a65b9c
|
/1.R
|
b70878fcc020556b2a53f16e927bbb7a1c412993
|
[] |
no_license
|
babjineelam/parents-feedback
|
ec3ad6610462f721036b9ecd7872abe1cab94639
|
c2b661e8a47a90885b4a00bb6676844ceacfe32f
|
refs/heads/master
| 2021-01-25T08:07:51.155541
| 2017-06-08T16:36:31
| 2017-06-08T16:36:31
| 93,721,911
| 1
| 0
| null | 2017-06-08T16:36:32
| 2017-06-08T07:49:29
|
R
|
UTF-8
|
R
| false
| false
| 17
|
r
|
1.R
|
v <- 2
print (v)
|
f85cb8d1143388d20a2babe442d2c30b58372f47
|
e6f7c85f7bab4f39723bb3460c9c00b9eb781081
|
/obesity.R
|
68d392cdd26d0c7ba4374561fc1d5b743500434e
|
[] |
no_license
|
ashah999/Advance-Data-Mining
|
7b56d6e6834370866697c822efc38401e4ef7417
|
1fa3400473325ba47ab27689e3acedcc27745705
|
refs/heads/master
| 2020-06-14T00:38:05.892789
| 2019-08-16T21:26:38
| 2019-08-16T21:26:38
| 194,838,155
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,378
|
r
|
obesity.R
|
# Load all the packages required for the analysis
library(dplyr) # Data Manipulation
library(Amelia) # Missing Data: Missings Map
library(ggplot2) # Visualization
library(scales) # Visualization
library(caTools) # Prediction: Splitting Data
library(car) # Prediction: Checking Multicollinearity
library(ROCR) # Prediction: ROC Curve
library(e1071) # Prediction: SVM, Naive Bayes, Parameter Tuning
library(rpart) # Prediction: Decision Tree
library(rpart.plot) # Prediction: Decision Tree
library(randomForest) # Prediction: Random Forest
library(caret) # Prediction: k-Fold Cross Validation
setwd("C:/Users/Anish/Downloads/eating-health-module-dataset1")
data<-read.csv("C:/Users/Anish/Downloads/eating-health-module-dataset1/ehresp_2014.csv")
str(data)
#Checking for Missing Values
colSums(is.na(data)|data=='')
head(data$eeincome1)
BMImean <-mean(data$erbmi)
BMImedian <- median(data$erbmi)
BMImedian
BMImean
BMImax <- max(data$erbmi)
BMIMin <- min(data$erbmi)
BMImax
BMIMin
as.double(data$erbmi)
plot(data$erbmi)
length(data$erbmi)
temp <- abs(data)
#Creating new cloumn by taking mean of BMI .
#Adult BMI chart showing ranges "obese I: BMI 30–34.9," "obese II: BMI 35–39.9" and "obese III: BMI ≥ 40."
#If your BMI is less than 18.5, it falls within the underweight range.
#If your BMI is 18.5 to <25, it falls within the normal.
#If your BMI is 25.0 to <30, it falls within the overweight range. If your BMI is 30.0 or higher
temp$obease = ifelse(temp$erbmi>=26.5,"Yes","No")
write.csv(temp,file="main.csv")
str(temp)
table(temp$obease)
hist(temp$erbmi)
ggplot(temp,aes(temp$obease,temp$erbmi)) +
geom_boxplot(aes(fill=factor(temp$obease)),alpha=0.5) +
ggtitle("BMI distribution against Obesity")
maxbmi <- max(data[!is.na(temp$erbmi),]$erbmi)
temp$range <- cut(temp$erbmi,breaks =c(0,18.5,25,30,maxbmi),labels = c("0-18.5","18.5-25","25-30","30"))
table(temp$range)
prop.table(table(temp$range))
colSums(is.na(temp)|temp=='')
#filter(data, is.na(data$obease)==TRUE|data$obease=='')
#data$obease[is.na(data$obsese)] <- "Yes"
#colSums(is.na(data)|data=='')
#table(data$obease)
as.numeric(temp$erbmi)
a<-write.csv(temp,file="main1.csv")
str(temp)
final <- read.csv("final.csv")
set.seed(1975)
index <- sample(1:dim(final)[1],dim(temp)[1]* .75 ,replace = FALSE)
training <-final[index,]
testing <-final[-index,]
model <- glm(erbmi ~ euexfreq + euwgt + euhgt + ertpreat + eufastfdfrq, data = temp)
plot(model)
par(mfrow = c(2,2))
plot(model)
avPlots(model)
library(corrplot)
corrplot.mixed(corr = cor(temp[,3:37]),tl.pos = "lt")
colSums(is.na(data)|data=='')
###########################################Naive Bayes#########################################################
library(e1071)
library(pROC)
testing$obease<- as.factor(testing$obease)
training$obease <- as.factor(training$obease)
#For Tunning Naive Bayes
#search_grid <- expand.grid(usekernel = c(TRUE, FALSE), fL = 0:5, adjust = seq(0, 5, by = 1))
nb <- naiveBayes(training,training$obease)
nbpredict <- predict(nb,newdata = testing[,-21])
caret :: confusionMatrix(nbpredict,testing$obease,positive = "Yes")
auc(naiveBayes)
str(nbpredict)
str(testing$obease)
nbpredic <- as.numeric(nbpredict)
#plotting ROC CUrve
nb1 <- prediction(as.numeric(nbpredict), testing$obease)
roc_nb <- performance(nb1, measure = "tpr", x.measure = "fpr")
plot(roc_nb)
auc(testing$obease, nbpredict)
library(mlr)
#Create a classification task for learning on obease Dataset and specify the target feature
task = makeClassifTask(data = training, target = "obease")
#Initialize the Naive Bayes classifier
selected_model = makeLearner("classif.naiveBayes")
#Train the model
NB_mlr = train(selected_model, task)
#Read the model learned
NB_mlr$learner.model
#Predict on the dataset without passing the target feature
predictions_mlr = as.data.frame(predict(NB_mlr, newdata = testing[,-21]))
##Confusion matrix to check accuracy
table(predictions_mlr[,1],testing$obease)
caret :: confusionMatrix(predictions_mlr[,1],testing$obease,positive = "No")
####################################Feature Selection ##############################################################
# 1 ] Using Boruta
library(Boruta)
boruta_output <- Boruta(training$obease ~ ., data=na.omit(training), doTrace=0)
names(boruta_output)
boruta_signif <- getSelectedAttributes(boruta_output, withTentative = TRUE)
print(boruta_signif)
dat <- boruta_signif
roughFixMod <- TentativeRoughFix(boruta_output)
boruta_signif <- getSelectedAttributes(roughFixMod)
print(boruta_signif)
imps <- attStats(roughFixMod)
imps2 = imps[imps$decision != 'Rejected', c('meanImp', 'decision')]
head(imps2[order(-imps2$meanImp), ]) # descending sort
# 2] Random Forest
library(randomForest)
final$obease <- as.factor(final$obease)
model1 <- randomForest(training$obease ~ ., data = training, importance = TRUE,ntree=100)
model1
predTrain <- predict(model1, testing, type = "class")
caret::confusionMatrix(testing$obease, predTrain)
# Checking classification accuracy
table(predTrain, testing$obease)
predValid <- predict(model1, testing, type = "class")
# Checking classification accuracy
mean(predValid == testing$obease)
table(predValid,testing$obease)
importance(model1)
varImpPlot(model1)
#plotting ROC CUrve
rf <- prediction(as.numeric(predTrain), testing$obease)
roc_rf <- performance(rf, measure = "tpr", x.measure = "fpr")
plot(roc_rf)
auc(testing$obease, predTrain)
#######################################Decision Tree###########################################################
library(C50)
c50model <- C5.0(training$obease ~., data=training, trials=10)
plot(c50model)
summary(c50model)
#indexdecision <- sample(1:length(temp),length(temp)*.30, replace= FALSE)
#training_tree <- temp[indexdecision]
#testing_tree <- temp[-indexdecision]
cFifity <- C5.0(training$obease ~ .,data = training)
cFifity
c <- predict(cFifity,testing[,-21])
caret :: confusionMatrix(c,testing$obease,positive = "No")
#plotting ROC CUrve
dt <- prediction(as.numeric(c), testing$obease)
roc_dt <- performance(dt, measure = "tpr", x.measure = "fpr")
plot(roc_dt)
auc(testing$obease, c)
#Winnowing Feature Selection process
cFiftyWinnow <- C5.0(training$obease ~ ., data = training, control = C5.0Control(winnow = TRUE))
c <- predict(cFiftyWinnow,testing[,-38])
caret :: confusionMatrix(testing$obease,c,positive = "Yes")
control <- trainControl(method="repeatedcv", number=10, repeats=5) #5 x 10-fold cv
metric <- "Kappa"
temp <- as.data.frame(temp)
optimModel <- train(temp$obease~., data=temp, method="C5.0", metric=metric, trControl=control)
plot(optimModel)
str(training)
str(testing)
######################################Support Vector Machine (SVM)################################
library(kernlab)
svm_model <- svm (obease ~., data=training)
summary (svm_model)
pred1 <- predict (svm_model, testing)
library (caret)
caret::confusionMatrix (pred1, testing$obease)
#plotting ROC curve
pr <- prediction(as.numeric(pred1), testing$obease)
roc <- performance(pr, measure = "tpr", x.measure = "fpr")
plot(roc)
auc(testing$obease, pred1)
|
bf6b69178ddc4b6c35f5a4c00019705cc57c47b8
|
d1f7dfcd4f4c5d673081d95393c7ff3956dee39c
|
/modules/weights_ui.R
|
716975cb0752e1c13ca2eb67722a5fb9d9fafe7d
|
[
"MIT"
] |
permissive
|
rfsaldanha/tobler
|
bbde1a52e86a7c2f21aa080d2368ccf0a59f287e
|
b103a563ce8782095ff259767c342387cf89c820
|
refs/heads/master
| 2022-02-03T04:14:25.727182
| 2022-01-28T12:07:17
| 2022-01-28T12:07:17
| 208,253,774
| 11
| 2
|
MIT
| 2021-07-24T13:40:21
| 2019-09-13T11:45:58
|
R
|
UTF-8
|
R
| false
| false
| 7,016
|
r
|
weights_ui.R
|
weights_tab <- argonTabItem(
tabName = "weights",
argonH1(display = 3, "Spatial Weights Matrix"),
argonRow(
argonColumn(
width = 12,
argonTabSet(
id = "weights-tab",
card_wrapper = TRUE,
horizontal = TRUE,
circle = FALSE,
size = "lg",
width = 12,
#iconList = lapply(X = 1:3, FUN = argonIcon, name = "atom"),
argonTab(
tabName = "Spatial Contiguity Matrix",
active = TRUE,
h3("Spatial Contiguity Matrix"),
radioButtons(inputId = "weights_contiguity_radio",
label = "Contiguity matrix options",
choices = list("Rook" = 1, "Queen" = 2),
selected = 1),
numericInput(inputId = "weights_contiguity_order", label = "Order", min = 1, value = 1),
radioButtons(inputId = "weights_contiguity_style",
label = "Coding scheme style",
choices = list("Binary coding" = "B",
"Row standardised" = "W",
"Globally standardised" = "C"),
selected = "W"
),
actionButton(inputId = "weights_contiguity_create", label = "Create and use as primary matrix"),
actionButton(inputId = "weights_contiguity_create_secondary", label = "Create and use as secondary matrix"),
),
argonTab(
tabName = "Inverse Distance Matrix",
active = FALSE,
h3("Inverse Distance Matrix"),
numericInput(inputId = "weights_inverse_distance_lower_bound", label = "Lower Distance bound (km)", min = 0, value = 0),
numericInput(inputId = "weights_inverse_distance_upper_bound", label = "Upper Distance bound (km)", min = 1, value = 10000),
numericInput(inputId = "weights_inverse_distance_power", label = "Power", min = 1, max = 2, value = 1),
radioButtons(inputId = "weights_inverse_distance_style",
label = "Coding scheme style",
choices = list("Binary coding" = "B",
"Row standardised" = "W",
"Globally standardised" = "C"),
selected = "W"
),
actionButton(inputId = "weights_inverse_distance_create", label = "Create and use as primary matrix"),
actionButton(inputId = "weights_inverse_distance_create_secondary", label = "Create and use as secondary matrix")
),
argonTab(
tabName = "K-Nearest Neighbors Matrix",
active = FALSE,
h3("K-Nearest Neighbors Matrix"),
numericInput(inputId = "weights_k_nearest_k", label = "k", min = 1, value = 1),
radioButtons(inputId = "weights_k_nearest_style",
label = "Coding scheme style",
choices = list("Binary coding" = "B",
"Row standardised" = "W",
"Globally standardised" = "C"),
selected = "W"
),
actionButton(inputId = "weights_k_nearest_create", label = "Create and use as primary matrix"),
actionButton(inputId = "weights_k_nearest_create_secondary", label = "Create and use as secondary matrix")
),
argonTab(
tabName = "Baumont (2004) procedure",
active = FALSE,
h3("Baumont (2004) procedure"),
p("This procedure will regress a model by OLS and test the residuals for spatial autocorrelation (Moran's Test) using several K-Nearest neighbors matrixes, varing k from 1 to the selected maximum k. The matrix with the higher Moran's I will be selected."),
tags$a(href="https://hal.archives-ouvertes.fr/hal-01525664/document", target="_blank", "Baumont (2004) paper link."),
uiOutput("weights_baumont_dependent_variable_UI"),
uiOutput("weights_baumont_idependent_variable_UI"),
numericInput(inputId = "weights_baumont_max_k", label = "Max k", min = 5, value = 20),
radioButtons(inputId = "weights_baumont_style",
label = "Coding scheme style",
choices = list("Binary coding" = "B",
"Row standardised" = "W",
"Globally standardised" = "C"),
selected = "W"
),
actionButton(inputId = "weights_baumont_create", label = "Create and use as primary matrix"),
actionButton(inputId = "weights_baumont_create_secondary", label = "Create and use as secondary matrix")
),
argonTab(
tabName = "General specification procedure",
p("This adapted procedure will create several spatial weights matrixes (Queen, Rook, Inverse Distance and K-Nearest Neighbors with k = 1, 5, 10, 15 and 20) and estimate several spatial models (SAR, SEM, SAC, SLX, SDM, SDEM) with those matrixes. The final matrix will be selected observing the minimum AIC obtained among all models."),
tags$a(href="https://doi.org/10.1111/j.1435-5957.2008.00213.x", target="_blank", "Stakhovych-Bijmolt (2009) paper link."),
uiOutput("weights_stakhovych_dependent_variable_UI"),
uiOutput("weights_stakhovych_idependent_variable_UI"),
radioButtons(inputId = "weights_stakhovych_style",
label = "Coding scheme style",
choices = list("Binary coding" = "B",
"Row standardised" = "W",
"Globally standardised" = "C"),
selected = "W"
),
active = FALSE,
actionButton(inputId = "weights_stakhovych_create", label = "Create and use as primary matrix"),
actionButton(inputId = "weights_stakhovych_create_secondary", label = "Create and use as secondary matrix")
)
)
)
),
br(),br(),
argonRow(
h3("Current spatial matrix configuration"),
argonColumn(
width = 12,
argonTabSet(
id = "weights-results-tab",
card_wrapper = TRUE,
horizontal = TRUE,
circle = FALSE,
size = "lg",
width = 12,
argonTab(
tabName = "Primary weights matrix",
active = TRUE,
h3("Primary weights matrix"),
uiOutput("matrix_info1_UI", ),
uiOutput("matrix_info2_UI", ),
uiOutput("matrix_info3_UI"),
uiOutput("matrix_plot_UI")
),
argonTab(
tabName = "Secondary weights matrix",
active = FALSE,
h3("Secondary weights matrix"),
uiOutput("matrix_secondary_info1_UI", ),
uiOutput("matrix_secondary_info2_UI", ),
uiOutput("matrix_secondary_info3_UI"),
uiOutput("matrix_secondary_plot_UI")
)
)
)
)
)
|
a8298ad58dc4ed47dd7fa4f0b7d39b187d5e678e
|
4369f3f66f35082d36b98f89e44118c5d3751009
|
/week_3/Week 3加強.R
|
9f353e929d8a830fa9792bac593b7bc94549a7d8
|
[] |
no_license
|
PeterChiu1202/Politics-and-Information
|
b55ea995b3b83aa90719f6b5231d8e603f02cf17
|
137f5b662740c56d32b895328b3f64bb5ce4f159
|
refs/heads/master
| 2021-04-06T08:18:38.381137
| 2018-04-16T05:53:05
| 2018-04-16T05:53:05
| 124,852,139
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 434
|
r
|
Week 3加強.R
|
# install.packages('rvest')
library(rvest)
title=read_html("http://sports.ltn.com.tw/baseball")
title=html_nodes(title,".boxTitle .listA .list_title")
title=html_text(title) # 只篩選出文字
# title=iconv(title,"UTF-8")
title
url=read_html("http://sports.ltn.com.tw/baseball")
url=html_nodes(url,".boxTitle .listA a")
url=html_attr(url,"href")
url
for (i in c(1:9)) {
"http://sports.ltn.com.tw/baseball/7"
}
|
803df762d8649bbba37129ad0bd1fa19421a83cb
|
b8536379258ff810885878d0ea9bbfc940de30fd
|
/R-Scripts/Clustering/GitHub/SOM.R
|
1d24a921edb37fc35eac1b9cd159da3f690f59e7
|
[] |
no_license
|
SajjadZaidi/ComparitiveStudyOfTextMining
|
32e88834e505cbec3fd7bcdae59e0ca29bb7cca0
|
9b8e6873fcf8f28e4b5993af3c2fecb7c5a6f953
|
refs/heads/master
| 2021-08-24T09:42:23.948358
| 2017-12-09T03:07:04
| 2017-12-09T03:07:04
| 113,623,889
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,157
|
r
|
SOM.R
|
library("kohonen")
#Add Confusion Matrix
# read arff file
set.seed(800)
start_time <- Sys.time()
git = read.csv("D:/Masters/Sem2/671/Project/DataSetAndStudy/DataSetsForStudy/5-Github/Git.csv", stringsAsFactors=F)
# scale data
git.sc = scale(git[, 2:7])
# build grid
git.grid = somgrid(xdim = 5, ydim=5, topo="hexagonal")
# build model
git.som = som(git.sc, grid=git.grid, rlen=100, alpha=c(0.05,0.01))
git.som
plot(git.som, type="changes")
# A bunch different Visulaization
plot(git.som, type="count")
plot(git.som, type="dist.neighbours")
plot(git.som, type="codes")
coolBlueHotRed <- function(n, alpha = 1) {rainbow(n, end=4/6, alpha=alpha)[n:1]}
#plot(iris.som, type = "property", property = iris.som$codes[,5], main=names(iris.som$data)[4], palette.name=coolBlueHotRed)
## use hierarchical clustering to cluster the codebook vectors
groups = 2
git.hc = cutree(hclust(dist(unlist(git.som$codes))), groups)
# plot
plot(git.som, type="codes", bgcol=rainbow(groups)[git.hc])
#cluster boundaries
add.cluster.boundaries(git.som, git.hc)
end_time <- Sys.time()
TotalTime=end_time - start_time
TotalTime
object_size(gitCluster)
ptm <- proc.time()
ptm
|
46148d8529231c07e4961ef8528a9560f1eddf7b
|
a8d0d4c55c48d6e7b70ac6de5f41ef1ab36c9df2
|
/packages/knockoff/tests/testthat/test_stats.R
|
8f3a0c027d54aa0873bef6b5d4d80318d7c4ee51
|
[] |
no_license
|
maximilian-aigner/pdm
|
6ba345de761b93b29761ec0ad8870a80167b3e37
|
fab3b21b8e6b7d4cc611f2ea3fd02ca028c53233
|
refs/heads/master
| 2022-07-20T07:29:30.480829
| 2018-07-09T06:03:45
| 2018-07-09T06:03:45
| 125,624,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,424
|
r
|
test_stats.R
|
test_that('Statistics obey antisymmetry property', {
n = 10; p = 5;
prob = random_problem(n, p)
knock.variables = create.fixed(prob$X)
X = knock.variables$X
Xk = knock.variables$Xk
G = cbind(X, Xk)
y = prob$y
i = sort(sample(p, sample(p))) # Indices to swap.
G_swap = G
G_swap[,c(i,i+p)] <- G[,c(i+p,i)]
expect_antisymmetric <- function(stat) {
orig = 1:p; ko = (p+1):(2*p);
expect_equal(stat(G[,orig],G[,ko],y),
stat(G_swap[,orig],G_swap[,ko],y) * ifelse(1:p %in% i, -1, 1),tolerance = 1e-3)
}
expect_antisymmetric(stat.forward_selection)
stats_fs_omp = function(X,Xk,y) stat.forward_selection(X, Xk, y, omp=FALSE)
expect_antisymmetric(stats_fs_omp)
stats_lasso_diff = function(X,Xk,y) stat.lasso_lambdadiff(X, Xk, y, nlambda=100000)
expect_antisymmetric(stats_lasso_diff)
stats_lasso_signed_max = function(X,Xk,y) stat.lasso_lambdasmax(X, Xk, y, nlambda=100000)
expect_antisymmetric(stats_lasso_signed_max)
})
test_that('Finding the max lambda in lasso works for orthonormal design', {
n = 30; p = 10; amplitude = 3.5;
X = qr.Q(qr(rnorm_matrix(n,p)))
beta = amplitude * rnorm(p)
y = X %*% beta + rnorm(n)
beta_ls = as.vector(t(X) %*% y)
# expect_equal(lasso_max_lambda_lars(X, y), abs(beta_ls))
expect_equal(lasso_max_lambda_glmnet(X, y, nlambda = 1e4, intercept=F, standardize=F), abs(beta_ls),
tolerance = 1e-3)
})
|
9c1f13d5a74f428353c0fe347c50a9c1481b31f1
|
af153a41d83bc7194a4b2a4e22ba0c82780f0389
|
/CourseraDataScience/pollutantmean.R
|
d963ed9fb6a8fdb8278fbbbaa532896df87fda0e
|
[] |
no_license
|
d25higgins/datasciencecoursera
|
45fb26f7da8c07be6e0ef38a417e70ccb558de2d
|
46110700bd5ca0ce81aecb2b7bd3828d894f4919
|
refs/heads/master
| 2016-09-06T01:05:06.297867
| 2014-12-22T18:46:33
| 2014-12-22T18:46:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,807
|
r
|
pollutantmean.R
|
## ==================================================================
## ProgrammingAssignment #1 - pollutantmean.R
## ==================================================================
## Author: d25higgins
## Incept: Dec 6, 2014
## Description:
## ==================================================================
## A function that loads pollution data from a directory of .csv
## files and returns the mean for the pollutant accross all of the
## monitors while ignoring missing data.
## ==================================================================
## Edit History:
##
## ==================================================================
pollutantmean <- function(directory, pollutant, id = 1:332){
## create vector to hold data
polData <- NULL
allMonData <- NULL
for(i in id){
## load data from file into dataframe
fileName <- buildFileName(directory, i)
monData <- read.csv(fileName)
if(identical(pollutant,"sulfate")){
polData <- monData$sulfate
} else if(identical(pollutant, "nitrate")){
polData <- monData$nitrate
}
## clean the data
#cleanData <- cleanPollutantData(polData)
## append the data to the vector
allMonData <- append(allMonData, polData)
}
## calculate and return the mean
round(mean(allMonData, na.rm = TRUE), digits = 3)
}
cleanPollutantData <- function(v){
bad <- is.na(v)
v[!bad]
}
buildFileName <- function (directory, i) {
converted <- paste("000", as.character(i), sep = "")
startPos <- nchar(converted) - 2
fileName <- paste(directory, "/",
substr(converted, start = startPos, stop = nchar(converted)),
".csv", sep = "")
#print(fileName)
fileName
}
|
727bf713054e72e24127c718fcb58048cc4f9de1
|
a815407aeafc5224b546fbe905af757f40bed1d3
|
/R/import_norm.R
|
a9d645f49a72001b493c12fbaaedfb8d9969cee7
|
[] |
no_license
|
iaradsouza1/MicroarrayMethods
|
a51de58e2e8a527af3bd3f50e32eedc593024360
|
e717ec81551daacebb9dfe804a761c29371139bb
|
refs/heads/master
| 2021-11-24T09:03:44.411844
| 2021-10-29T17:27:44
| 2021-10-29T17:27:44
| 166,096,496
| 0
| 1
| null | 2020-06-25T23:34:12
| 2019-01-16T19:17:58
|
R
|
UTF-8
|
R
| false
| false
| 831
|
r
|
import_norm.R
|
#' Import and normalize
#'
#' Import CEL files (Affymetrix) and normalize with RMA method
#'
#' @param celfiles_path Path to the celfiles directory
#' @param pheno_data File name with metadata information from the experiment
#' @param sep \code{pheno_data} separated format
#' @param filenames a vector containg the names of .CEL files, if only a subset of files are needed to be imported. (optional)
#' @param ... Other parameters to be passed to \code{rma()} function. See \code{?rma}
#'
#' @return An ExpressionSet object with normalized expression values
#' @export
#'
#' @examples
import_norm <- function(celfiles_path, pheno_data, sep, filenames = NULL, ...) {
raw <- import_celfiles(celfiles_path = celfiles_path, pheno_data = pheno_data, sep = sep, filenames = filenames)
eset <- affy::rma(raw, ...)
return(eset)
}
|
2c8f667e5b6c508b8bd84a56e4a118458ad80f4b
|
65f98b3f7e3842afe26ab5a2ccf0e80ed2a2de63
|
/R/write_SIAMESE_input.R
|
c469855ab5a7180f3210e16e415781bac8de16eb
|
[] |
no_license
|
nroming/IDA
|
4dd46faa4c9679869bfd2aa30e590d7015f18b07
|
87b4d815051aa6074993d7870ce04e5f205e1f7b
|
refs/heads/master
| 2021-01-08T05:29:53.759564
| 2020-02-28T08:03:23
| 2020-02-28T08:03:23
| 241,926,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,852
|
r
|
write_SIAMESE_input.R
|
#' Creates an output file in excel format that can be used as SIAMESE downscaling input
#'
#' @param df, A compatible dataframe
#' @param filename path of the Excel file where to write to (e.g. "test.xlsx")
#' @param source source_id from which scenario should be taken (e.g. "AR5")
#' @param scen Name of the scenario for which to output the data
#' @param scale_scen Name of the scenario whose GDP and population data should
#' be used for downscaling
#' @param region Region for which the downscaling should be done
#' @param subregions A named list with the name of the list entry being the name
#' of the subregion and the actual entries of the list being the iso3c codes
#' of the countries. All countries not explicitely mentioned will end up in
#' ROR (Rest of Region).
#' @param share Output absolute values or regional shares (default: shares)
#' @return NONE
#' @import openxlsx dplyr
#' @export
write_SIAMESE_input <- function(df, filename, source, scen, scale_scen = "SSP2",
region, subregions, share = TRUE){
tmp <- filter(df, source_id %in% c(source, "SSP", "IEA_2014"))
# turn the list of countries by subregions into a vectors of country codes
countries <- filter(map_reg, MESSAGE == region) %>% select(IDA) %>%
unlist(use.names = TRUE)
# variable vector (used for renaming)
vars <- c("Pop" = "Population",
"Y" = "GDP",
"Q_bio" = "Primary Energy|Biomass",
"Q_coal" = "Primary Energy|Coal",
"Q_gas" = "Primary Energy|Gas",
"Q_oil" = "Primary Energy|Oil",
"Q_nuc" = "Primary Energy|Nuclear",
"Q_ren" = "Primary Energy|Non-Biomass Renewables")
tmp$variable <- gsub("|Total", "", tmp$variable, fixed = TRUE)
# filter IAM data
df_iam <- filter(tmp, source_id == source, scenario == scen,
spatial == region, variable %in% vars)
if(!("Primary Energy|Non-Biomass Renewables" %in% unique(df_iam$variable))){
df_iam_re <- aggregate_variable(filter(tmp, scenario == scen, spatial == region),
new_var <- "Primary Energy|Non-Biomass Renewables",
vars = c("Primary Energy|Wind",
"Primary Energy|Geothermal",
"Primary Energy|Hydro",
"Primary Energy|Solar"))
df_iam <- rbind(df_iam, df_iam_re)
}
# filter IEA energy data
df_energy <- filter(tmp, source_id == "IEA_2014", variable %in% vars,
spatial %in% countries)
# filter SSP population and GDP data
df_ssp <- filter(tmp, source_id == "SSP", scenario == scale_scen,
variable %in% vars,
model %in% c("OECD Env-Growth", "IIASA-WiC POP"),
spatial %in% countries)
# remove temporary data
rm(tmp)
# reduce the regional map so that it only contains the region of interest with
# its countries
map_reg <- select(map_reg, IDA, MESSAGE) %>% filter(MESSAGE == region)
# rename variables
for(v in 1:length(vars)){
try(df_iam <- rename_var(df_iam, var = vars[v], new_name = names(vars)[v]), silent = TRUE)
try(df_ssp <- rename_var(df_ssp, var = vars[v], new_name = names(vars)[v]), silent = TRUE)
try(df_energy <- rename_var(df_energy, var = vars[v], new_name = names(vars)[v]), silent = TRUE)
}
# get country/or sub-region level data
df_country <- rbind(df_ssp, df_energy)
df_country <- filter(df_country, spatial %in% map_reg$IDA, temporal %in% seq(2010, 2100, 10))
if(share){
# we need to compute region totals in order to compute country or subregion
# shares
df_region <- group_by(df_country, temporal, variable) %>%
summarise(total = sum(value, na.rm = TRUE)) %>% ungroup()
df_country <- inner_join(df_country, df_region) %>%
mutate(value = value/total) %>%
select(-total)
reg_values <- rename(df_iam, region = value) %>% select(temporal, variable, region)
df_country <- inner_join(df_country, reg_values) %>%
mutate(value = value * region) %>%
select(-region)
}
# include another column to categorise the countries based on whether they
# belong to the subregion of interest or the remainder of the region
df_country$subregion <- NA
for(reg in names(subregions)){
df_country[df_country$spatial %in% subregions[reg][[1]], "subregion"] <- reg
}
# # if NAs remain, these are filled with "ROR" - Rest Of Region
df_country[is.na(df_country$subregion), "subregion"] <- "ROR"
df_country <- group_by(df_country, temporal, subregion, variable) %>%
summarise(value = sum(value, na.rm = TRUE)) %>% ungroup()
df_country <- dcast(df_country, subregion + temporal ~ variable)
df_country <- rename(df_country, time = temporal)
# fill up with zeros - just because in the current (2016-05) state, that is what
# the emulator expects
df_country[is.na(df_country)] <- 0
df_iam <- filter(df_iam, temporal %in% seq(2010, 2100, 10))
df_iam <- dcast(df_iam, temporal ~ variable)
df_iam <- rename(df_iam, time = temporal)
# create openxlsx workbook
wb <- createWorkbook()
# add worksheet
addWorksheet(wb, sheetName = "metadata")
addWorksheet(wb, sheetName = region)
for(reg in names(subregions)){
addWorksheet(wb, sheetName = reg)
}
addWorksheet(wb, sheetName = "ROR")
# write metadata
meta <- data.frame(name = c("scenario", "scaling scenario", "share", "region", "subregions"),
item = c(scen, scale_scen, share, region, " "))
# add subregions
for(reg in names(subregions)){
meta <- rbind(meta, data.frame(name = reg,
item = paste(unlist(subregions[reg],
use.names = FALSE), collapse = ", ")))
}
# find countries in ROR
countries_ror <- setdiff(countries, unlist(subregions, use.names = FALSE))
# add ROR information
meta <- rbind(meta, data.frame(name = "ROR", item = paste0(countries_ror, collapse = ", ")))
writeData(wb, sheet = "metadata", x = meta, colNames = FALSE)
message <- paste("File generated by", Sys.info()["user"], "on", Sys.info()["nodename"], "at", date())
writeData(wb, sheet = "metadata", x = message,
startRow = dim(meta)[1] + 2)
# write data
writeData(wb, sheet = region, x = df_iam)
for(reg in names(subregions)){
writeData(wb, sheet = reg,
x = filter(df_country, subregion == reg) %>% select(-subregion))
}
writeData(wb, sheet = "ROR", x = filter(df_country, subregion == "ROR") %>% select(-subregion))
# write out to excel file
saveWorkbook(wb, file = filename, overwrite = TRUE)
}
|
5d0a9f11e8fd304b8b023af5d2c62b921069e722
|
530f2465d447e630fa8430768e915e54b62d8caa
|
/R/sourceFunctions.R
|
e69db514c7943d34bfaa94d36444df4f3796dec4
|
[] |
no_license
|
dnegrey/spork
|
bf9cabf7dac81214c756a311b747e43e62c7e69a
|
3c3b0f8fff368aee9da079f7a5801cad53ad16c7
|
refs/heads/master
| 2021-01-10T13:01:04.612721
| 2016-04-28T02:42:39
| 2016-04-28T02:42:39
| 52,160,224
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 745
|
r
|
sourceFunctions.R
|
#' @title Source a directory of R functions
#' @description \code{sourceFunctions} sources all of the \code{*.R} files
#' found in the specified directory
#' @param dir path containing the files to source
#' @param recursive logical value; recurse into sub-directories?
#' @return The \code{source} function is applied to any {*.R} files found in
#' \code{dir}.
#' @examples
#' write("foo <- function(){'Hello, world!'}", "foo.R")
#' sourceFunctions(".")
#' @seealso \code{\link{source}, \link{list.files}}
#' @export
sourceFunctions <- function(dir, recursive = TRUE) {
f <- list.files(
dir,
pattern = ".R$",
full.names = TRUE,
recursive = recursive
)
invisible(
lapply(f, source)
)
}
|
9d2f294631f3a7a2b133df8f0c70af136fa7ac7a
|
3c7122be9073fd05934f6c192232b15fbbb16a0d
|
/2018-07-31-upload_data_into_Postgresql.R
|
77e9a5b450c347a41211d803310d427e42c5601b
|
[] |
no_license
|
qihaowei89/my_work_script
|
ac2bad26c0f7f5cc2763fe0deb0b952264775638
|
bb3daf53b9064329ba398b9117cdfc0b4c61655b
|
refs/heads/master
| 2020-03-26T08:22:22.368313
| 2018-08-14T10:11:28
| 2018-08-14T10:11:28
| 144,698,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,684
|
r
|
2018-07-31-upload_data_into_Postgresql.R
|
library(RPostgreSQL)
# install.packages("RPostgreSQL")
con = dbConnect(PostgreSQL(),host="127.0.0.1",user="postgres",password="wqh123",dbname="mydb")
dbListTables(con)
dbWriteTable(con,"mtcars",mtcars)
dbListTables(con)
dbListFields(con,"mtcars")
dbReadTable(con,"mtcars")
res = dbSendQuery(con,"select * FROM mtcars where cyl =4 order by mpg")
# You can fetch all results:
dbFetch(res)
dbClearResult(res)
# Or a chunk at a time
res <- dbSendQuery(con, "SELECT * FROM mtcars WHERE cyl = 4")
while(!dbHasCompleted(res)){
chunk <- dbFetch(res, n = 5)
print(nrow(chunk))
}
# Clear the result
dbClearResult(res)
# Disconnect from the database
dbDisconnect(con)
dbListTables(con)
dbListFields(con,"test0729")
dbReadTable(con,"test")
dbSendStatement(con,"drop table test0729;")
dbSendStatement(con,"create table test (id int, name varchar(20));insert into test values (1,'tom');")
conn=dbConnect(PostgreSQL(),host="192.168.1.205",port=5440,dbname="ancestry",user="postgres",password="123456")
a = dbListTables(conn)
dbDisconnect(conn)
dbReadTable(conn,a[12]) %>% tail(20)
dbSendStatement(conn,"")
library(stringr)
library(RPostgreSQL)
con = dbConnect(PostgreSQL(),host="127.0.0.1",user="postgres",password="wqh123",dbname="ancestry")
dbListTables(con)
trans_to_use = function(out_file){
group = read.table(out_file,stringsAsFactors = F)
group_num = str_extract(group[2:4],pattern = "\\(\\d{1}\\)") %>% sapply(FUN = function(n) gsub(pattern = "\\((\\d)\\)", replacement = "\\1",x = n,perl = T))
group_class = group[2:4] %>% sapply(FUN = function(n) gsub(pattern = "(.?)\\(\\d\\)", replacement = "\\1",x = n,perl = T))
group_class[group_class == ""] = "NULL"
group_use = data.frame(id=group[[1]],
final=group[[5]],
png=paste0(group[5],".png"),
group1=group_class[1],
num1=group_num[1],
group2=group_class[2],
num2=group_num[2],
group3=group_class[3],
num3=group_num[3],stringsAsFactors = F)
return(group_use)
}
file = region_results_use
table = "region_results"
AddValues2Db = function(file,table){
if(table=="region_results") {index="sample"}else{index="id"}
if(table=="locations") break()
# if(table=="region_results") index="sample"
header=colnames(file)
a = which(index %in% header)
if(index %in% header){
id = dbSendStatement(con,sprintf('select %s from %s;',index,table)) %>% dbFetch(n = -1) %>% unique() %>% '[['(1)
if(any(id %in% unique(file[,a]))){
if(any(grepl("_",id))){
id_tmp=unique(file[,a]) %>% str_split(pattern = "_",simplify = T) %>% '['(2)
dbSendStatement(con,sprintf('delete from %s where %s like %s%s%s ',table,index,"'%",id_tmp,"'"))
}else{
dbSendStatement(con,sprintf('delete from %s where %s = %s ',table,index,unique(file[,a])))
}
}
}
dbWriteTable(con,table,file,append=T,row.names=F)
}
if(F){
dbSendStatement(con,
"CREATE TABLE REGION_RESULTS( ID SERIAL PRIMARY KEY NOT NULL,
SAMPLE TEXT NOT NULL,
REGION TEXT NOT NULL,
PERCENT INTEGER);
CREATE TABLE LOCATIONS( ETHNIC TEXT PRIMARY KEY NOT NULL,
LOCAL TEXT NOT NULL,
OCEANIA TEXT NOT NULL);
CREATE TABLE NAMES ( LOCATION TEXT PRIMARY KEY NOT NULL,
CHINESE TEXT NOT NULL);
CREATE TABLE CONTENTS( ID INTEGER PRIMARY KEY NOT NULL,
COVER_PAGE TEXT NOT NULL,
REGION_PAGE TEXT NOT NULL,
OUTLINE TEXT NOT NULL);
CREATE TABLE Y_DESCRIPTION( GROUPS TEXT PRIMARY KEY NOT NULL,
DESCRIPTION TEXT NOT NULL);
CREATE TABLE SAMPLE_GENDER( SAMPLE TEXT PRIMARY KEY NOT NULL,
GENDER TEXT NOT NULL);
CREATE TABLE Y_GROUP (ID TEXT PRIMARY KEY NOT NULL,
FINAL TEXT NOT NULL,
PNG TEXT NOT NULL,
group1 TEXT ,
num1 INT NOT NULL,
group2 TEXT ,
num2 INT NOT NULL,
group3 TEXT ,
num3 INT NOT NULL
);
CREATE TABLE MT_GROUP(ID TEXT PRIMARY KEY NOT NULL,
FINAL TEXT NOT NULL,
PNG TEXT NOT NULL,
group1 TEXT ,
num1 INT NOT NULL,
group2 TEXT ,
num2 INT NOT NULL,
group3 TEXT ,
num3 INT NOT NULL);")
}
dbListTables(con)
region_results = read.table("/home/wqh/B10_10003858.region.out",sep = "\t",header = T,stringsAsFactors = F)
region_num = round(as.numeric(region_results[,2][-58])*100)
region_results_use = data.frame(sample="B10_10003858",region=as.character(region_results[-58,1]),percent=region_num,stringsAsFactors = F)
content = read.table("/home/wqh/content_2.csv",sep = ",",header = T,stringsAsFactors = F)
names_db = read.table("/home/wqh/ethnic_name.csv",sep=",",header=F,stringsAsFactors = F)
head(names_db)
colnames(names_db) = c("ethnic","ethnic_ch","local","local_ch","oceania","oceania_ch")
names_db$ethnic %<>% sapply(FUN=function(n) str_replace_all(string = n,pattern = " ",replacement = "_"),simplify = T)
locations = names_db[,c(1,3,5)]
names = names_db[,c(1,2)]
colnames(names) = c("location","chinese")
y_group = trans_to_use("/home/wqh/B10_10003858.Y.out")
MT_group = trans_to_use("/home/wqh/B10_10003858.MT.out")
dbReadTable(con,"region_results")
dbWriteTable(con,"region_results",region_results_use,append=T,row.names=F)
AddValues2Db(file = region_results_use,table = "region_results")
dbReadTable(con,"region_results")
# dbSendStatement(con,"drop table y_group")
dbWriteTable(con,"content",content ,append=T,row.names=F)
dbReadTable(con,"content")
AddValues2Db(file =content, table="content")
dbWriteTable(con,"locations",locations,append=T,row.names=F)
AddValues2Db(file =locations, table="locations")
dbReadTable(con,"locations")
dbWriteTable(con,"names",names,append=T,row.names=F)
dbReadTable(con,"names")
AddValues2Db(file =y_group, table="y_group")
dbWriteTable(con,"y_group",y_group,append=T,row.names=F)
dbReadTable(con,"y_group")
dbWriteTable(con,"mt_group",MT_group,append=T,row.names=F)
dbReadTable(con,"mt_group")
|
577d44c4102f62b38833f463e5d79b38d1e39c2f
|
f14b7c62e8037a6e6811ee4286e113d8cd6c5621
|
/ting2/man/ting2-package.Rd
|
cf36600bebabf932538d1d8a4d8eb9229feee474
|
[] |
no_license
|
tinggao0716/WeatherStations
|
c6d17c8dea3deb991b1766523aec8625356baf3a
|
c67ab0a1dbf86fe5c8ca047a207e47baaa391a08
|
refs/heads/master
| 2016-09-16T13:22:01.758997
| 2014-03-19T21:35:46
| 2014-03-19T21:35:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,510
|
rd
|
ting2-package.Rd
|
\name{ting2-package}
\alias{ting2-package}
\alias{ting2}
\docType{package}
\title{
Weather Station data
}
\description{
Organize, visualize and model weather station conditions.
}
\details{
\tabular{ll}{
Package: \tab ting2\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2014-03-19\cr
License: \tab What license is it under?\cr
Depends: \tab methods\cr
}
~~ An overview of how to use the package, including the most important ~~
~~ functions ~~
}
\author{
Ting Gao <tinggao0716@gmail.com>
}
\references{
SODA
}
\keyword{ package }
\examples{
library(XML)
library(ggplot2)
library(gridExtra)
library(RgoogleMaps)
library(ggmap)
## get weather stations object (S4 class)
#source("station-from-web.R")
#location <- c(37.786289,-122.405234)
#dist_km <- 30
#filename <- paste("./data/", location[1], "_", location[2],
# "_", dist_km, ".rds", sep="")
#df <- Stations(location, dist_km)
#PwsStations <- WeatherStations(df)
#saveDf(PwsStations, filename)
#ws <- loadDf(filename)
#cat("Weather stations got")
#
## Plot stations
#png("ws.png")
#plot(ws)
#dev.off()
#cat("Weather stations plotted")
#
## get weather conditions for station list
#source("cond_util.R")
#startDate <- "2014-03-12"
#endDate <- "2014-03-13"
##weatherConditions <- getCond(startDate, endDate, getDf(ws))
#cond_file <- "./data/conditions.rds"
##saveRDS(weatherConditions, cond_file)
#wc <- readRDS(cond_file)
#cat("Conditions obtained")
#
##approximate station weather data for a particular time
#Inptime <- "2014-03-12 12:00"
#InptimeCond <- approxCond(Inptime, getDf(ws), wc)
#cat("approximate conditions obtained")
#
##list out conditions for reference
#names(InptimeCond)[!(names(InptimeCond) %in% c("Id", "Lat", "Lon"))]
#
##plot a weather condition for all stations
##plot will be in pdf
#plotCond(InptimeCond, "temperatureC")
#plotCond(InptimeCond, "humidity")
#cat("Conditions plotted")
#
## Plot for question 5
#startDt <- "2014-03-15"
#endDt <- "2014-03-17"
##weatherCond5 <- getCond(startDt, endDt, getDf(ws))
#cond_file <- "./data/conditions_5.rds"
##saveRDS(weatherCond5, cond_file)
#wc5 <- readRDS(cond_file)
#cat("Conditions for question 5 obtained")
#
#source("computation_util.R")
#stationId <- getDf(ws)$Id[1]
#timeSeq <- seq(as.POSIXct(startDt),to=as.POSIXct(endDt),by="hour")
#plotByInterp(wc5, stationId, timeSeq)
}
|
bf6ed4846b3aa119515bbdb6e3194b0892c56489
|
aed2befcda06b44324e887a55c289e32e5c7c8ae
|
/fireDataHardTest.R
|
1194247d98426d45af71d1dd028c575be3441a60
|
[] |
no_license
|
benubah/gsoc-test
|
8f95c3074c15df91e68c2d9c13d8d7840ce89314
|
39db428b67e5b6972377aa3ec938a54277ea7291
|
refs/heads/master
| 2021-04-12T11:30:51.323308
| 2019-04-01T02:06:12
| 2019-04-01T02:06:12
| 126,233,627
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,528
|
r
|
fireDataHardTest.R
|
# For upload function
\examples{
#example uploading mtcars data-frame to firebase
upload(x = mtcars, projectURL = "https://firedata-b0e54.firebaseio.com/", directory = "main")
\donttest{
upload example where write access is restricted to registered users only. Replace token with idtoken obtained by calling the
auth() function
upload(x = mtcars, projectURL = "https://firedata-b0e54.firebaseio.com/", directory = "main", token = "your token")
}
}
# For download function
\examples{
#example downloading mtcars data-frame from firebase using fireData
download(projectURL = "https://firedata-b0e54.firebaseio.com/", fileName = "main/-KxwWNTVdplXFRZwGMkH")
\donttest{
download example where read access is restricted to registered users only. Replace token with idToken obtained by calling the
auth() function
download(projectURL = "https://firedata-b0e54.firebaseio.com/", fileName = "main/-KxwWNTVdplXFRZwGMkH", token = "")
}
}
# For resetPassword function
\examples{
\donttest{
# reset password example.
resetPassword(projectAPI = "AIzaSyAjZLO9-CRV3gObpwdFz-k8AiTOxHSBmdc", email = "useYourOwn@email.com")
}
}
# For dataBackup function
\examples{
#database backup example
dataBackup(projectURL = "https://firedata-efa5a.firebaseio.com", secretKey = "2bYA6k72wKna90MqPGa6yuMG7jAysoDJZwJqYXsm", "test.json")
\donttest{
dataBackup(projectURL = "your project URL", secretKey = "your secret Key", "yourfile.json")
}
}
|
fe84d7ddeec122f12a449952049d84a11a372dba
|
0b0e58d6578ea085b65407ea8702db7e60780ba5
|
/skyline_graph_v2.r
|
4eefc9244eb717f86775df9fd9ea6d67f1e05eda
|
[] |
no_license
|
hug-fr/skyline
|
0fa19e44b7923452b86d422292446b639fc152f9
|
a10ef4c62fa1a644d9112513d211af23d7a6bdb8
|
refs/heads/master
| 2021-03-12T20:00:48.256762
| 2015-06-05T07:57:21
| 2015-06-05T07:57:21
| 22,676,846
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 1,818
|
r
|
skyline_graph_v2.r
|
# INTRODUCTION DONNEES ATTRIBUTAIRES SQL
library (RODBC)
library (plotrix)
library (reshape)
ch = odbcConnect("PostgreSQL35W",uid="hug",pwd="hug")
sql=
"
/*
select a.gid, nom_station, azimut, angle from stations.station_meteo_skyline a
join stations.geo_station_meteofrance b on a.gid=b.gid
*/
select gid, gid nom_station, azimut, angle from stations.station_meteo_skyline_v2
"
data=sqlQuery(ch, paste(sql, collapse=' '))
close(ch)
mylim <- ceiling(max(data[,4])/20)*20
#data2 <- cast(data, nom_station ~ azimut, value = "angle")
for (sta in unique(data[,1])){
png(
file=paste0("C:/Users/hugues.francois/Desktop/github/skyline/graph_",sta,".png"),
width = 800, height=600, res=150
)
mylay<-layout(matrix(c(
1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,
1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3)
,18,2))
#layout.show(mylay)
par(mar = c(0,0,0,0))
plot.new()
mtext(unique(data[data[,1]==sta,2]), cex=1, line =-4)
par(mar=c(2,2,0,1), cex.axis=.6)
data2 <- cast(data[data[,1]==sta,2:4], nom_station ~ azimut, value = "angle")
radial.plot(
mylim-data2[,2:length(data2)],
labels=c("N","NE","E","SE","S","SW","W","NW"),
rp.type="p",
radial.lim=c(0,mylim),
radial.labels=rev(pretty(c(0,mylim))),
boxed.radial=F,
grid.unit ="°",
line.col="#648bda",
lwd = 2,
start=pi/2,
clockwise = T,
poly.col="#648bda50"
)
par(mar=c(2,4,0,1))
plot(data[data[,1]==sta,3], data[data[,1]==sta,4],
type = "l",
col="#648bda",
lwd = 2,
axes=F,
ylim=c(0,mylim),
xlab = NA,
ylab = NA,
xlim=c(0,360)
)
axis(side = 1, tck = -.02, labels = NA)
axis(side=1, line = -.8, lwd = 0, cex.axis =.7, font.lab=2)
mtext(side=1, line=1.2, cex=.6, "Azimuts (degrees)")
axis(side = 2, tck = -.02, labels = NA)
axis(side=2, line = -.6, lwd = 0, cex.axis =.7, font.lab=2)
mtext(side=2, line=1.4, cex=.6, "Skyline angle (degrees)")
dev.off()
}
|
5aed371bfdc577047fe6f9e089dd9bf7ec6c12b1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/aspace/examples/CF.Rd.R
|
30ac81d4f91d956af6b1973d6d980a0ae80d6c7c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 367
|
r
|
CF.Rd.R
|
library(aspace)
### Name: CF
### Title: Central Feature (CF) Calculator
### Aliases: CF
### Keywords: arith
### ** Examples
## CF example
CF(id=1, filename="CF_Output.txt", points=activities)
## CF to shapefile example (exclude the comments below to run script)
## shp <- convert.to.shapefile(cfloc,cfatt,"id",5)
## write.shapefile(shp, "CF_Shape", arcgis=T)
|
fa2e76a13d92c900353ad2f7f8743b36fcdc058b
|
afa0f52844fd96b0a783e31901e6d1ec03ff54e9
|
/Rfuns/ExpCompareBetweenGroup.R
|
a8f8229a5cbe7665be61b23807038f18b7fff5e4
|
[] |
no_license
|
juwonk/jwbi
|
daf8d6317646019a94ccb20fd8ff5928d3937637
|
0497028c37a8fa6549ae103ad92fbbd5d48362b0
|
refs/heads/master
| 2020-04-11T20:57:57.350665
| 2018-12-17T07:30:07
| 2018-12-17T07:30:07
| 162,090,017
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,881
|
r
|
ExpCompareBetweenGroup.R
|
####################################################
###### Expression compare between group ############
####################################################
####################################################
###### Mission 1. KDM expression GBM vs LGG ########
####################################################
###
data_raw <- list()
data_raw[[1]]<-as.data.frame(TCGA_GBM_assay)
data_raw[[2]]<-as.data.frame(TCGA_LGG_assay)
colData <- list()
colData[[1]] <- TCGA_GBM_gene
colData[[2]] <- TCGA_LGG_gene
which(colData[[2]]$external_gene_name %in% c("JMJD1C", "PHF2", "PHF8", "KDM4A", "KDM4B", "KDM4C", "KDM4D"))
## normalization
for(j in 1:2){
m<-mean(colMeans(data_raw[[j]]))
for(i in 1:ncol(data_raw[[j]])){
m2<-colMeans(data_raw[[j]][i])
data_raw[[j]][i]<-data_raw[[j]][i]*m/m2
}
}
## normalization check
colMeans(data_raw[[1]])
data <- data_raw
KDMs<-list()
for(i in 1:2){
data[[i]]$symbol <- colData[[i]]$external_gene_name
KDMs[[i]] <- data[[i]][which(data[[i]]$symbol %in% c("JMJD1C", "PHF2", "PHF8", "KDM4A", "KDM4B", "KDM4C", "KDM4D")),]
rownames(KDMs[[i]]) <- KDMs[[i]]$symbol
KDMs[[i]] <- KDMs[[i]][,1:(ncol(data[[i]])-1)]
}
colnames(KDMs[[1]])<-replicate(ncol(KDMs[[1]]), "GBM") #paste("GBM", 1:ncol(KDMs[[1]]),sep = "_")
colnames(KDMs[[2]])<-replicate(ncol(KDMs[[2]]), "LGG") #paste("LGG", 1:ncol(KDMs[[2]]),sep = "_")
dim(KDMs[[1]])
table(rownames(KDMs[[1]])==rownames(KDMs[[2]]))
KDMs_table<-cbind(KDMs[[1]], KDMs[[2]])
KDMs_table$symbols <- rownames(KDMs_table)
KDMs_melted<-melt(KDMs_table)
KDMs_melted$variable <- factor(KDMs_melted$variable, levels = c("GBM", "LGG"))
ggplot(KDMs_melted, aes(x=KDMs_melted$symbols, y=KDMs_melted$value)) +
geom_col(aes(fill=KDMs_melted$variable), position = "dodge") +
ggtitle("GBM(174) vs LGG(529) on KDMs") +
xlab("Genes") + ylab("mean(counts)") +
scale_fill_discrete(name="Group")
|
a5b1af74ed46e1e81ce7a4ae1dfe7bea0f938c42
|
e395de81cb996f9f82dbc98db8d4e8b46bcc0d18
|
/ui.R
|
e26fe5d6bc829c39a18418efd728233beb676fae
|
[] |
no_license
|
mhoban/tides
|
3abe4dd2b3522247b045b0af8d198b51604bc440
|
2b77336a03db21fe2d739ca26b2a5ced401817d7
|
refs/heads/master
| 2022-12-27T18:37:28.283222
| 2020-10-13T03:22:00
| 2020-10-13T03:22:00
| 296,749,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,666
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(stringr)
library(tidyverse)
library(waiter)
library(lubridate)
library(plotly)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
use_waiter(),
# use_waitress(),
# Application title
titlePanel("Kāne‘ohe Bay Tide Predictions"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
dateRangeInput(
"daterange",
"Date Range",
start = today(tzone = "HST"),
end = today(tzone = "HST"),
min = today(tzone = "HST") - years(5),
max = today(tzone = "HST") + years(5)
),
# h5("hover over the graph"),
# h5("to see data points"),
# h5("(you might have to click on it)"),
#tags$small("clicking the 'autoscale' button will make the plot look bad"),
h5("On the graph:"),
div("Moonrise looks like this: ",tags$img(src="img/first_quarter.png",style="width: 50px; height: 50px")),
div("Moonset looks like this: ",tags$img(src="img/first_quarter_set.png",style="width: 50px; height: 25px")),
div("Moon phases won't be perfect, or even particularly accurate, ok?")
),
mainPanel(
plotlyOutput("tidegraph"),
tableOutput("tidetable"),
tableOutput("moonphase")
)
)
))
|
5c6f9735552c774891ced8f4ceb67c1e3650f59a
|
c57bc21130d865deea21bf10a59abd873f75b9f2
|
/DailyDashboard/DailyStandup.R
|
981d1201e46a164928583ba8f3514591f2dbb166
|
[] |
no_license
|
ygg/bl-centara
|
88b1630016d4f0d5b1fd618b9308a2a67a60db5c
|
e3624982fc21221f103161abff2a138a09fb5c55
|
refs/heads/master
| 2021-01-13T16:54:12.044998
| 2017-01-22T12:25:42
| 2017-01-22T12:25:42
| 79,711,482
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,672
|
r
|
DailyStandup.R
|
library(shiny)
library(dplyr)
library(shinydashboard)
library(ggvis)
# kpi <- readRDS('../../AdHoc/Budget.rds')
kpi %>%
glimpse()
kpi_list <- c("UWC","NetGW","Bonus","NetEGW","APD","NDC","NVC","UDC","UDC","Wager","WD","Dep","TRTP")
month_list <- 1:12
ui <- dashboardPage(
dashboardHeader(title = "Track Daily KPI"),
dashboardSidebar(
menuItem("Dashboard", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Widgets", icon = icon("th"), tabName = "widgets",
badgeLabel = "new", badgeColor = "green")
),
dashboardBody(
tabItems(
tabItem(tabName = "dashboard",
fluidRow(
box(
selectInput('aaa','Select KPI' , choices = kpi_list, selected=c("UWC"))
)
,box(
selectInput('mm' , 'Month' , choices = month_list)
)
),
fluidRow(
# column(1, htmlOutput("trend_ui"))
# ,column(1, offset=5,htmlOutput("csum_ui"))
#
tabBox(
title = "KPI Trend",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1", height = "250px",
tabPanel("Trend", htmlOutput("trend_ui")),
tabPanel("Cumulative", offset=5,htmlOutput("csum_ui"))
)
)
)
)
)
)
server <- function(input, output) {
output$plot1 <- renderPlot(
{
data <- histdata[seq_len(input$slider)]
hist(data)
}
)
# Make selection
data <- reactive({
kpi$kpi <- kpi[,input$aaa]
kpi %>%
filter(month_of_year==input$mm) %>%
mutate(year = as.factor(year_of_calendar)) %>%
group_by(year,day_of_month) %>%
arrange(year,day_of_month) %>%
summarise(kpi = sum(kpi))
# mutate(kpi =cumsum(kpi))
})
data %>%
ggvis(x=~day_of_month,y=~kpi) %>%
layer_smooths(stroke=~year) %>%
bind_shiny("trend")
output$trend_ui <- renderUI({
ggvisOutput("trend")
})
## ccum
data_ccum <- reactive({
kpi$kpi <- kpi[,input$aaa]
kpi %>%
filter(month_of_year==input$mm) %>%
mutate(year = as.factor(year_of_calendar)) %>%
group_by(year,day_of_month) %>%
arrange(year,day_of_month) %>%
summarise(kpi = sum(kpi)) %>%
mutate(kpi =cumsum(kpi))
})
data_ccum %>%
ggvis(x=~day_of_month,y=~kpi) %>%
layer_smooths(stroke=~year) %>%
bind_shiny("csum")
output$csum_ui <- renderUI({
ggvisOutput("csum")
})
}
shinyApp(ui, server)
##
## forecast
##
library(forecast)
|
3d84c9fb91695fa81d821bf751446ee6529aa922
|
7cd151dc58a4aa414338b8fbadb465585ee198b0
|
/plot4.R
|
e1a28caa2a05661d70f79f7461b0100d58722320
|
[] |
no_license
|
Jing666/ExData_Plotting1
|
8adfe1cd548a0bacb9cacedf6deffe71de6876f2
|
85d0c6a1a755287a0829afaf2955bb41ae93e00d
|
refs/heads/master
| 2021-01-15T16:48:49.079961
| 2015-07-13T12:54:57
| 2015-07-13T12:54:57
| 38,987,608
| 0
| 0
| null | 2015-07-13T02:38:43
| 2015-07-13T02:38:43
| null |
UTF-8
|
R
| false
| false
| 1,725
|
r
|
plot4.R
|
######################################################
# Exploratory data analysis class project 1
# chret 4
# July 2015
######################################################
# read text file
myfile <- read.table("c:/coursera/Exploratory data analysis/household_power_consumption.txt", header=T, sep=";", na.string="?")
# format date/time
myfile$Date <- as.Date(myfile$Date, format = "%d/%m/%Y")
# read date with dates range 2007-02-01 and 2007-02-02
plotfile1 <- myfile[myfile$Date >= "2007-02-01" & myfile$Date <= "2007-02-02",]
# convert factor data to numberic
plotfile1[,3:9] <- lapply(plotfile1[,3:9], function(x){as.numeric(as.character(x))})
#append date and time
date_time <- paste(plotfile1$Date,plotfile1$Time, sep=" ")
plotfile1$DateTime <- as.POSIXct(date_time)
png(filename="c:/coursera/Exploratory data analysis/plot4.png")
par (mfrow = c(2,2))
# chart 1
plot(plotfile1$DateTime, plotfile1$Global_active_power,
type = "l",
ylab="Global Active Power (kilowatts)",
xlab="" )
# chart 2: get Voltage
plot(plotfile1$DateTime, plotfile1$Voltage,
type = "l",
ylab="Voltage",
xlab="datetime" )
# chart 3
plot(plotfile1$DateTime, plotfile1$Sub_metering_1,
type = "l",
col= "black",
ylab="Energy Sub metering",
xlab="" )
# graph sub metering 2
lines(plotfile1$DateTime, plotfile1$Sub_metering_2,
type = "l",
col= "red")
# graph sub metering 3
lines(plotfile1$DateTime, plotfile1$Sub_metering_3,
type = "l",
col= "blue")
# add legend
legend("topright", names(plotfile1[,7:9]), lty="solid", col=c('black','red','blue'))
# chart 4: Global_reactive_power
plot(plotfile1$DateTime, plotfile1$Global_reactive_power,
type = "l",
ylab="Voltage",
xlab="datetime" )
dev.off()
|
e9c090bc6e870e4007670fa4ac9fac352fb03c71
|
860598c2cb818a9abd51e858abe648ab761fed07
|
/run_analysis.R
|
a3d6679fbe4921b6b276d7fa22b3327f05db1b4e
|
[] |
no_license
|
sschanel/tidy-data
|
b2f53b35cf99f38392a957e5981429c3047ad828
|
61a50b1eb710145e8c18010021c9a28847ee0313
|
refs/heads/master
| 2016-09-08T01:54:25.903977
| 2015-04-26T23:07:16
| 2015-04-26T23:07:16
| 34,630,560
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,468
|
r
|
run_analysis.R
|
library(plyr)
library(dplyr)
library(reshape2)
## cleanupName "cleans" a measurement name so it is more easily readable.
## For example:
## tBodyAcc-mean()-X => TimeBodyAccMeanX
cleanupName <- function(name) {
name <- gsub("^t", "Time", name)
name <- gsub("^f", "FFT", name)
name <- gsub("-std", "Std", name)
name <- gsub("-mean", "Mean", name)
name <- gsub("\\(\\)|-", "", name)
name
}
## tidyMeans
## Downloads the data set and outputs a tidy data set of the means of all
## the measurements
tidyMeans <- function() {
## For reproducibility (mentioned in the week 1 lectures), download the file
## here in the script.
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile="UCI HAR Dataset.zip",
method="curl")
## Unzip the data set.
unzip("UCI HAR Dataset.zip")
## (1) Merge the data sets
## First load everything up.
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## rbind the X, y and subject tables
X <- rbind(X_train, X_test)
y <- rbind(y_train, y_test)
subject <- rbind(subject_train, subject_test)
## (4) Label the data set with descriptive variable names
## Easier to follow if we do this sooner than later.
## Load the labels from the features.txt file.
features <- read.table("./UCI HAR Dataset/features.txt")
## Label the columns
names(X) <- features[,2]
## (2) Extract only the measurements on the mean and standard deviation
## Only keep columns that contain the text "-mean" or "-std"
X_cols <- grep("(\\-mean)|(\\-std)", names(X))
X <- X[,X_cols]
## cleanup the names while we're at it (back to 4)
names(X) <- sapply(names(X), cleanupName)
## (3) Use descriptive activity names to name the activities
## We have labels - they are in the activity_labels.txt file.
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
## Rename columns so we can do an natural join
names(activity_labels) <- c("activityID", "activity")
names(y) <- c("activityID")
names(subject) <- c("subject")
y_labeled <- inner_join(y, activity_labels)
## Add the subject and activity columns
X <- cbind(subject, select(y_labeled, activity), X)
## (5) Create a tidy data set with the average of each variable
## for each activity and each subject
## Melt this thing so the remaining columns (not subject or activity)
## are pivoted into variable and value.
melted <- melt(X, id=c("subject", "activity"))
## ddply the melted data frame with a summary column for the mean of each
## kind of measurement
means <- ddply(melted, .(subject, activity, variable), summarize, mean=mean(value))
## give the columns some better names
names(means) = c("Subject", "Activity", "Measurement", "Mean")
## write it into the working directory
write.table(means, "./means.txt", row.names=FALSE)
## return it as a value
means
}
|
6a2a738fefb0b4f65ababba557f977c08daec688
|
9951be18da40a04eaea834221b0078c673e01287
|
/mac/BPPFormatUI.r
|
8db22a5f8c93b4e9da362bec380740c3af6719ba
|
[] |
no_license
|
MatthewCallis/bppformat
|
6a5ad19f50314c14e23ea78abe66b3ad98651fc6
|
06122598ca27be82e7aa61d0c3015586ab595ba9
|
refs/heads/master
| 2021-01-19T07:56:48.865218
| 2013-05-02T22:49:14
| 2013-05-02T22:49:14
| 9,818,430
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 299
|
r
|
BPPFormatUI.r
|
//-------------------------------------------------------------------------------
//
// File:
// BPPFormatUI.r
//
// Description:
// Dialog for the BPPFormat Mac project.
//
//-------------------------------------------------------------------------------
#include "Types.r"
// end BPPFormatUI.r
|
699f593357d85359da498d6708a049a809efe5bf
|
0b55af6f2fc71dbd11501e51db353fedbfbd2bb1
|
/man/agent_sim.Rd
|
369772be9994a0a931f04c391d615ec5361c41a0
|
[] |
no_license
|
brandmaier/agentSim
|
dab9c838b76846ed07e61a64dad99aaf32369207
|
2fe7708a1eb078ff204fd0fccc4959afd898c8bf
|
refs/heads/master
| 2023-01-05T20:42:33.681368
| 2020-10-24T09:59:23
| 2020-10-24T09:59:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 748
|
rd
|
agent_sim.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation.R
\name{agent_sim}
\alias{agent_sim}
\title{Set up an agentSim simulation}
\usage{
agent_sim(
agents = NULL,
time_total = NULL,
environment = list(),
referee = NULL
)
}
\arguments{
\item{agents}{}
\item{time_total}{Number of time ticks to simulate}
}
\value{
a simulation
}
\description{
Set up an agentSim simulation
}
\examples{
#Example simulation in which each agent prints its own id to the console
init_pop <- tibble::tibble(
id = c(1:50),
x = runif(50, 0, 1),
y = runif(50, 0, 1)
)
agent_task <- create_agent_task({
print(agent$id)
})
sim <- agents_from_param_table(init_pop) \%>\%
set_task(agent_task) \%>\%
agent_sim(10)
}
|
720dfe3e29501d3f4d75a224e5071a4df5e76acd
|
f133eaaa60ae28cad49fcf4ad043c7584f84d0a7
|
/benchmarkvalues.R
|
c910fa4b29679f0dade300839809b7674433b1eb
|
[] |
no_license
|
3841marklid/itusoneanalysis
|
51967456dd3813998a9d690bc12836d45c7e4ea0
|
91058d2bf55badca2a6a8cd03f7c90f3312cacd1
|
refs/heads/master
| 2021-01-19T01:01:35.007280
| 2016-08-02T04:15:04
| 2016-08-02T04:15:04
| 64,668,121
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,240
|
r
|
benchmarkvalues.R
|
###this section is for calculating the ideal relative heart rate using Vo2
##worker1
age1 <- Vo2[1,"Age.Worker.1"] #this calls the age
absmaxhr1=209-.7*(age1) #maximum hr equation
rhr1=((Heat.vs.Hr$Worker.1.HR)/absmaxhr1)*100 #calculates relative heart rate index
vo2rhr1 <- data.frame(Relative.Heart.Rate=rhr1, Vo2.Abs=Vo2$VO2.Abs.Worker.1) #create new data framewith just vo2 and rhr
vo2w1=vo2rhr1$Vo2.Abs #create index for vo2 values
plot(rhr1,Vo2$VO2.Abs.Worker.1) #plot vo2 vs rhr
abline(h=fit_thresholdworker1,untf=FALSE,col="red") #create the cutoff value for vo2. Anything less than 2.4 is considered "unsafe"
model1 <- lm(vo2w1 ~ rhr1, data=vo2rhr1) #create a linear model
abline(model1,col="green") #plot the model
equation1=coef(model1) #display the coefficients of the model
paste('y=',coef(model1)[[2]],'*x','+',coef(model1)[[1]]) #output the linear equation
slope1=coef(model1)[[2]] #pulls slope from model
intercept1=coef(model1)[[1]] #pulls intercept from model
maxrhr1=(fit_thresholdworker1-intercept1)/slope1 #find where the plot drops below the acceptable vo2 value
maxhr1=(maxrhr1*(absmaxhr1)/100) #calculate corresponding heart rate
summary(model1)
##worker2
age2 <- Vo2[1,"Age.Worker.2"] #this calls the age
absmaxhr2=209-.7*(age2) #maximum hr equation
rhr2=((Heat.vs.Hr$Worker.2.HR)/absmaxhr2)*100 #calculates relative heart rate index
vo2rhr2 <- data.frame(Relative.Heart.Rate=rhr2, Vo2.Abs=Vo2$VO2.Abs.Worker.2) #create new data framewith just vo2 and rhr
vo2w2=vo2rhr2$Vo2.Abs #create index for vo2 values
plot(rhr2,Vo2$VO2.Abs.Worker.2) #plot vo2 vs rhr
abline(h=fit_thresholdworker2,untf=FALSE,col="red") #create the cutoff value for vo2. Anything less than 2.4 is considered "unsafe"
model2 <- lm(vo2w2 ~ rhr2, data=vo2rhr2) #create a linear model
abline(model2,col="green") #plot the model
equation2=coef(model2) #display the coefficients of the model
paste('y=',coef(model2)[[2]],'*x','+',coef(model2)[[1]]) #output the linear equation
slope2=coef(model2)[[2]] #pulls slope from model
intercept2=coef(model2)[[1]] #pulls intercept from model
maxrhr2=(fit_thresholdworker2-intercept2)/slope2 #find where the plot drops below the acceptable vo2 value
maxhr2=(maxrhr2*(absmaxhr2)/100) #calculate corresponding heart rate
summary(model2) # p value is less than 2.2e-16 and r squared adjusted is .95. Can assume data is significant.
##worker3
age3 <- Vo2[1,"Age.Worker.3"] #this calls the age
absmaxhr3=209-.7*(age3) #maximum hr equation
rhr3=((Heat.vs.Hr$Worker.3.HR)/absmaxhr3)*100 #calculates relative heart rate index
vo2rhr3 <- data.frame(Relative.Heart.Rate=rhr3, Vo2.Abs=Vo2$VO2.Abs.Worker.3) #create new data framewith just vo2 and rhr
vo2w3=vo2rhr3$Vo2.Abs #create index for vo2 values
plot(rhr3,Vo2$VO2.Abs.Worker.3) #plot vo2 vs rhr
abline(h=fit_thresholdworker3,untf=FALSE,col="red") #create the cutoff value for vo2. Anything less than 2.4 is considered "unsafe"
model3 <- lm(vo2w3 ~ rhr3, data=vo2rhr3) #create a linear model
abline(model3,col="green") #plot the model
equation3=coef(model3) #display the coefficients of the model
paste('y=',coef(model3)[[2]],'*x','+',coef(model3)[[1]]) #output the linear equation
slope3=coef(model3)[[2]] #pulls slope from model
intercept3=coef(model3)[[1]] #pulls intercept from model
maxrhr3=(fit_thresholdworker3-intercept3)/slope3 #find where the plot drops below the acceptable vo2 value
maxhr3=(maxrhr3*(absmaxhr3)/100) #calculate corresponding heart rate
summary(model3)
##worker4
age4 <- Vo2[1,"Age.Worker.4"] #this calls the age
absmaxhr4=209-.7*(age4) #maximum hr equation
rhr4=((Heat.vs.Hr$Worker.4.HR)/absmaxhr4)*100 #calculates relative heart rate index
vo2rhr4 <- data.frame(Relative.Heart.Rate=rhr4, Vo2.Abs=Vo2$VO2.Abs.Worker.4) #create new data framewith just vo2 and rhr
vo2w4=vo2rhr4$Vo2.Abs #create index for vo2 values
plot(rhr4,Vo2$VO2.Abs.Worker.4) #plot vo2 vs rhr
abline(h=fit_thresholdworker4,untf=FALSE,col="red") #create the cutoff value for vo2. Anything less than 2.4 is considered "unsafe"
model4 <- lm(vo2w4 ~ rhr4, data=vo2rhr4) #create a linear model
abline(model4,col="green") #plot the model
equation4=coef(model4) #display the coefficients of the model
paste('y=',coef(model4)[[2]],'*x','+',coef(model4)[[1]]) #output the linear equation
slope4=coef(model4)[[2]] #pulls slope from model
intercept4=coef(model4)[[1]] #pulls intercept from model
maxrhr4=(fit_thresholdworker4-intercept4)/slope4 #find where the plot drops below the acceptable vo2 value
maxhr4=(maxrhr4*(absmaxhr4)/100) #calculate corresponding heart rate
summary(model4)
##create data frame with all max hr values
Max_HR_Values <- data.frame(Worker.1=maxhr1,Worker.2=maxhr2,Worker.3=maxhr3,Worker.4=maxhr4)
##this section is for calculating the ideal heat index given rhr
#worker1
plot(rhr1,Heat.vs.Hr$Worker.1.Heat)
modelheat1 <- lm(worker1heat ~ rhr1)
abline(modelheat1)
heatvsrhrslope1=coef(modelheat1)[[2]]
heatvsrhrintercept1=coef(modelheat1)[[1]]
maxheat1=((heatvsrhrslope1)*maxrhr1)+heatvsrhrintercept1
summary(modelheat1) #p value is .871. We can safely assume no relationship exists between heat index and rhr. Therefore, the 74 high heat index cannot be reliable.
|
23248195ae6752dff3157e2fdeb07ca9ba5ed88b
|
30ee5256f363954bcacf452ab94249ddf04b270e
|
/randomforest.r
|
5005dcd4b7806eed5c8d4d5164e671397557014d
|
[] |
no_license
|
tiborh/r
|
70c45812347a65786c5bf95eccc7376f8caf7f72
|
8de2d56608b2e52faaf554f3cc955a456c58f57f
|
refs/heads/master
| 2022-11-01T05:54:55.451584
| 2022-10-27T15:15:37
| 2022-10-27T15:15:37
| 36,147,838
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 652
|
r
|
randomforest.r
|
#!/usr/bin/env Rscript
source("common.r")
## source:
## https://www.tutorialspoint.com/r/r_random_forest.htm
stop.if.not.installed(c("party","randomForest"))
print(str(readingSkills))
cat("\n1. Creating a forest:\n")
output.forest <- randomForest(nativeSpeaker ~ age + shoeSize + score,
data = readingSkills)
cat("\n2. Printing the result:\n")
print(output.forest)
cat("\n3. Printing the importance of each predictor:\n")
print(importance(output.forest,type = 2))
fn <- file.path(IMG.DIR,"randomForest_plot.png")
png(file=fn)
plot(output.forest)
dev.off()
cat("Random Forest output has been written to:",fn,"\n")
|
1d0fdbca7223ab814c8bf30533640330b2c49362
|
e155ec2079871717911417d41c5f15afb559a2ea
|
/clase 26 de septiembre 2016, formatos de fechas.R
|
01e875820eca2c5f46e64ab9988091dd09aa30d2
|
[] |
no_license
|
Moni105/Programacion_Actuarial_lll_OT16
|
c433315850d4d8f6d04def9fe267adab7d68d896
|
bd5b856669907aa002ef4f0c8344fdb3190177ed
|
refs/heads/master
| 2020-12-25T14:33:18.084755
| 2016-12-05T02:44:13
| 2016-12-05T02:44:13
| 65,914,580
| 0
| 0
| null | null | null | null |
ISO-8859-2
|
R
| false
| false
| 863
|
r
|
clase 26 de septiembre 2016, formatos de fechas.R
|
hacer.potencia <- function(n){
potencia <- function(x){
x^n
}
potencia
}
cubica <- hacer.potencia(3)
cuadrada <- hacer.potencia(2)
cubica(3)
cuadrada(2)
#funciones que crean funciones
#entorno de la función
ls(environment(cubica))
get("n",environment(cubica))
ls(environment(cuadrada))
get("n",environment(cuadrada))
#resoluciones:
#algebráica: 3 + 3 x 3 = 12
#cadena: 3 + 3 x 3 = 18
y <- 10
f <- function(x){
y <- 2
y^2 + g(x)
}
g <- function(x){
x*y
}
f(3)
#en vba 10, en r 34
#vba dinámico
prueba a 4 espacios
#tabulacion
tabulacion
#leer datos, separarlos, procesarlos, generar un reporte
#formatos de fecha
x <- as.Date("1987-06-022") #pasó 0 dias
x
unclass(x)
unclass(as.Date("1970-01-02")) #pasó 1 día
date()
as.POSIXct.date
as.POSIXlt()
date()
x <- as.date("1987-06-022")
as.POSIXct(x)
as.POSIXct(x)
|
f8b174781feee1df9b1388862019ee17e3a0f16e
|
f7c0693382b13c2ec3872b9342a5820c77f78778
|
/man/mdl_courses.Rd
|
42eace8c5104f27798c1ae528f7b1ffd663f066a
|
[
"MIT"
] |
permissive
|
NAlcan/moodleR
|
2542c32d259dc9d3f8b3729219fd42b7e8f6b162
|
c505ffcd1c50a5f7e8e6ab02adb939ae65e521f7
|
refs/heads/main
| 2023-04-06T13:44:07.308081
| 2021-04-21T16:57:52
| 2021-04-21T16:57:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 494
|
rd
|
mdl_courses.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mdl_courses.R
\name{mdl_courses}
\alias{mdl_courses}
\title{Get Courses}
\usage{
mdl_courses(con = mdl_get_connection())
}
\arguments{
\item{con}{a database connection object}
}
\value{
A dbplyr reference object.
}
\description{
Returns a reference to the (cached) course table, with the most relevant columns selected.
}
\details{
For convenience a join with the category table is made, and "category_name" added
}
|
4bb166a6feae4b8dda3107b08e6cbf0c97e359c5
|
5227e8fb4619e3b4212613f3f779df8b7c3706b2
|
/man/ploteQTL.Rd
|
5a31094c3efa247302d5fe02d7236b0d0f1e907a
|
[] |
no_license
|
phamasaur/qtlpvl
|
5a5b930b4f12d67c7cc00840a63e9725da3dbc2b
|
9c199f47c21a8deb5a50cda5c8b5423c49efa15c
|
refs/heads/master
| 2023-04-19T05:30:01.716383
| 2015-08-26T05:32:19
| 2015-08-26T05:32:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
rd
|
ploteQTL.Rd
|
% Generated by roxygen2 (4.1.1.9000): do not edit by hand
% Please edit documentation in R/ploteQTL.R
\name{ploteQTL}
\alias{ploteQTL}
\title{eQTL plot}
\usage{
ploteQTL(marker.info, probepos, phenonames, markers, chr1, pos1, main = "",
plot.chr = c(1:19, "X"), add = FALSE, col = "black")
}
\arguments{
\item{marker.info}{information of markers}
\item{probepos}{probe position}
\item{phenonames}{names of probes to be ploted}
\item{markers}{marker names of eQTL}
\item{chr1}{chr of eQTLs}
\item{pos1}{pos of eQTLs}
\item{main}{main title of plot}
\item{plot.chr}{chrs where probes from to be ploted}
\item{add}{start a new plot or add to an existed one.}
\item{col}{color of dots}
}
\description{
eQTL plot
}
|
90299af7c8f2d8f77e56afe51c9aece027835582
|
0cd9d5c94b659ba91d8e5e3a7b9f7538341e7a04
|
/man/listAnnotations.Rd
|
c4ec6d15121d164ebcf8aed3edef2545634c948b
|
[] |
no_license
|
Przemol/WB.WS259
|
c9f5e11e999527773e1e939d1b1e4fb1cf3b614a
|
e7836f8f6762d3d9cb4b9d9fc145b142430511c4
|
refs/heads/master
| 2021-01-01T04:49:15.775477
| 2017-07-17T17:26:26
| 2017-07-17T17:26:26
| 97,256,528
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 254
|
rd
|
listAnnotations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_functions.R
\name{listAnnotations}
\alias{listAnnotations}
\title{listAnnotations}
\usage{
listAnnotations()
}
\value{
annotations table
}
\description{
listAnnotations
}
|
b3c4d8ace50f20b335fc254e76e5cfb99efd7b95
|
75e25e706517e28b2baf63c0c78e26daf883e924
|
/ejemplo_mirt.R
|
65eb18204f9aad49bb7737ecc6f0d96cfee0b618
|
[] |
no_license
|
Cabrotfe/prueba3
|
25ed09b4f153d2a5f02c50e05928d06e07ba66df
|
02888b3fd915b68dd4ff7f4bdf7462c8d67f90b5
|
refs/heads/master
| 2023-02-01T04:48:04.606220
| 2020-12-16T02:33:03
| 2020-12-16T02:33:03
| 321,827,422
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
ejemplo_mirt.R
|
# Apertura de paquetes ----------------------------------------------------
pacman::p_load(tidyverse, mirt, stringr)
# Creación de datos -------------------------------------------------------
datos= simdata(a=runif(10,0.8,1.2), d=rnorm(10,0,1), itemtype = "2PL", N =1000)
datos = data.frame(datos)
# Modelamiento y gráficos -------------------------------------------------
empirical_plot(datos, which.items = 1:10, smooth = F)
# Modelo ------------------------------------------------------------------
model=mirt(itemtype = "Rasch", model = 1, SE = T,data=datos)
coef(model, IRTpars = T, simplify=T)
coef(model, IRTpars = T, simplify=F)
plot(model, type = "trace")
# ajustes -----------------------------------------------------------------
M2(model)
itemfit(model)
itemfit(model, empirical.plot = 9, empirical.CI = .95)
personfit(model)
# Ordenamiento de ítems por dificultad ------------------------------------
dif=coef(model, IRTpars=T, simplify =T)$items[,2] ## dificultades
it_orden=function(data, coefs){
dificultades=data.frame(cbind(items=names(data),dif=coefs))
dificultades = dificultades %>% mutate(dif = as.numeric(as.character(dif))) %>% arrange(dif)
datos_orden=datos[,as.character(dificultades$items)]
return(datos_orden)
}
hola=it_orden(datos, coefs = dif)
hola %>% colSums()
|
d0e6e90f813e5337d26de06aacc65e7f72e28881
|
278477474fe4117f01bcf5ad101225e49f37e72d
|
/Engine/AnalysisResults/Tests/enrich_distance_timezone.R
|
8cf43dd0e0538798ae143f527034898c60ef0a9f
|
[] |
no_license
|
esteevanderwalt/TwitterAnalysis
|
ff967c432a88e84f0df502f4bfa9897f19cdd88b
|
2858a30f355cde4d0ec0d3e2cea52378415bf9a8
|
refs/heads/master
| 2020-09-22T12:04:31.670706
| 2019-05-07T15:48:37
| 2019-05-07T15:48:37
| 67,336,784
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,110
|
r
|
enrich_distance_timezone.R
|
suppressMessages(library(RODBC))
options(scipen=999)
#LINUX
myconn<-odbcConnect("SAPHANA", uid="SYSTEM", pwd="oEqm66jccx", believeNRows=FALSE, rows_at_time=1, DBMSencoding="UTF-8")
# Convert degrees to radians
deg2rad <- function(deg) return(deg*pi/180)
# Calculates the geodesic distance between two points specified by radian latitude/longitude using the
# Haversine formula (hf)
haversine <- function(long1, lat1, long2, lat2) {
long1 <- deg2rad(long1)
lat1 <- deg2rad(lat1)
long2 <- deg2rad(long2)
lat2 <- deg2rad(lat2)
#actual formule
R <- 6371 # Earth mean radius [km]
delta.long <- (long2 - long1)
delta.lat <- (lat2 - lat1)
a <- sin(delta.lat/2)^2 + cos(lat1) * cos(lat2) * sin(delta.long/2)^2
c <- 2 * asin(min(1,sqrt(a)))
d = R * c
return(d) # Distance in km
}
hf_latlon <- function(x, myconn, t) {
d <- haversine(as.numeric(x["longitude"]),as.numeric(x["latitude"]),as.numeric(x["lon"]),as.numeric(x["lat"]))
#print(d)
sql <- paste("update ",t," set DISTANCE_TZ=",d,sep="")
sql <- paste(sql," where ID='",x["user_id"] ,"'",sep="")
sql <- paste(sql, " and SCREENNAME='",x["screenname"],"'",sep="")
sqlQuery(myconn, sql)
}
getl <- function(myconn, t1, t2) {
table1 <- t1
table2 <- t2
#run first sql one then the other
sql1 <- paste("SELECT U.ID, U.SCREENNAME, L.LONGITUDE AS LONG1, L.LATITUDE AS LAT1, TZ.LONGITUDE AS LONG2, TZ.LATITUDE AS LAT2 FROM ",table1," U JOIN TWITTER.SMP_LOCATION L ON LOWER(TRIM(L.LOCATION)) = LOWER(TRIM(U.LOCATION)) AND L.LATITUDE IS NOT NULL JOIN TWITTER.SMP_LOCATION TZ ON LOWER(TRIM(TZ.LOCATION)) = LOWER(TRIM(U.TIMEZONE)) AND TZ.LATITUDE IS NOT NULL WHERE U.TIMEZONE IS NOT NULL AND U.LOCATION IS NOT NULL",sep="")
#sql1 <- paste("SELECT U.ID, U.SCREENNAME, L.\"lon\" AS LONG1, L.\"lat\" AS LAT1, TZ.\"lon\" AS LONG2, TZ.\"lat\" AS LAT2 FROM ",table1," U JOIN TWITTER.SMP_LOCATION_C L ON UPPER(TRIM(L.\"location\")) = UPPER(TRIM(U.LOCATION)) AND L.\"lat\" IS NOT NULL JOIN TWITTER.SMP_LOCATION_C TZ ON UPPER(TRIM(TZ.\"location\")) = UPPER(TRIM(U.TIMEZONE)) AND TZ.\"lat\" IS NOT NULL WHERE U.TIMEZONE IS NOT NULL AND U.LOCATION IS NOT NULL",sep="")
#' ###Load data
#+ get_data
tl <- system.time(data.latlon <- sqlQuery(myconn, sql1) )
#latlon vs timezone geo (deception)
data.latlon_clean <- na.omit(data.frame(data.latlon$ID, data.latlon$SCREENNAME, data.latlon$LONG1, data.latlon$LAT1, data.latlon$LONG2, data.latlon$LAT2))
colnames(data.latlon_clean) <- c("user_id", "screenname", "longitude", "latitude", "lon", "lat")
apply(data.latlon_clean, 1, hf_latlon, myconn, table2)
}
getl(myconn, "TWITTER.zz_fake_users", "TWITTER.zz_fake_users_enrich")
#getl(myconn, "TWITTER.tweets2_users_20170106", "TWITTER.zz_users_enrich_20170106")
#getl(myconn, "TWITTER.tweets2_users_20170418", "TWITTER.zz_users_enrich_20170418")
#getl(myconn, "TWITTER.tweets2_users_20170429", "TWITTER.zz_users_enrich_20170429")
#getl(myconn, "TWITTER.tweets2_users_20170517", "TWITTER.zz_users_enrich_20170517")
#getl(myconn, "TWITTER.tweets2_users_20170527", "TWITTER.zz_users_enrich_20170527")
close(myconn)
|
93564931aad3e6364641868cbf3f4f54184db75d
|
aae46958c9b9ca7b33fd2e530f8cfc713d546560
|
/community/gbif/gbif_data.R
|
27e3bdec1d439346c6b79316a5927c4390f58bc6
|
[] |
no_license
|
EnquistLab/PFTC4_Svalbard
|
a38f155c5f905af74b7e265ace7dc256eaa4e2f9
|
f372df13dc8002f347e2fe8810c4034ed02f06fc
|
refs/heads/master
| 2022-02-16T09:20:34.170459
| 2022-01-28T14:14:50
| 2022-01-28T14:14:50
| 130,364,868
| 2
| 13
| null | 2021-05-19T11:11:34
| 2018-04-20T13:19:53
|
HTML
|
UTF-8
|
R
| false
| false
| 1,274
|
r
|
gbif_data.R
|
library("rgbif")
library("tidyverse")
library("mapdata")
name_backbone("Sphagnum warnstorfii")
if(!exists("gbif/sj.rdata")){
load("gbif/sj.rdata")
}else{#slow
sj <- occ_search(
taxonKey = 7707728,
hasCoordinate = TRUE,
country = "SJ",
limit = 100000,
decimalLatitude = "74,82",
decimalLongitude = "8,36"
)
bryo <- occ_search(
taxonKey = 35,
hasCoordinate = TRUE,
country = "SJ",
limit = 100000,
decimalLatitude = "74,82",
decimalLongitude = "8,36"
)
save(sj, bryo, file = "gbif/sj.rdata")
}
sj$data <- sj$data %>% filter(basisOfRecord != "FOSSIL_SPECIMEN")
sj$data %>% count(name) %>% print(n = Inf)
mp <- map_data("worldHires", region = "Norway:Svalbard")
sj$data %>% filter(grepl("Salix", name)) %>% count(name) %>% arrange(desc(n))
sj$data %>%
filter(genus == "Silene") %>%
mutate(name = gsub("^(\\w)\\w+ (.*)$", "\\1.\\2", name)) %>%
ggplot(aes(x = decimalLongitude, decimalLatitude)) +
geom_map(data = mp, map = mp, aes(map_id = region), inherit.aes = FALSE, fill = "grey80") +
geom_point() +
coord_quickmap() +
lims(x = c(10, 34), y = c(74.6, 80.2)) +
labs(x = "", y = "") +
theme_minimal() +
facet_wrap(~ name, ncol = 7)
#bryo
bryo$data %>% count(name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.