blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb5d4aa8ec543c6016bb5cdad53a28f83d2298a5
|
a2c432551444129daffeb20a9cdac645ed343deb
|
/R/app_ui.R
|
586d87ffa454940a845172b49be858e14f71573f
|
[
"MIT"
] |
permissive
|
ScottSobel/EDA
|
036e41b27653654bc5cc5249474d1269f7ddbaa9
|
9989f5fa3a9df07aefd2b50b09900bac413c7e7a
|
refs/heads/master
| 2023-02-22T05:22:50.528139
| 2021-01-23T01:12:34
| 2021-01-23T01:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,261
|
r
|
app_ui.R
|
#' App UI
#'
#' @return tagList for app's UI
#' @export
#' @importFrom shiny tagList
#' @importFrom shinydashboardPlus dashboardPagePlus
app_ui <- function() {
shiny::tagList(
# adding external resources
add_external_resources(),
# shinydashboardPagePlus with right_sidebar
shinydashboardPlus::dashboardPagePlus(
header = header_ui(),
sidebar = sidebar_ui(),
body = body_ui(),
rightsidebar = right_sidebar_ui(),
# footer = footer_ui(),
# title = "OW EDA",
skin = "black" #,
# enable_preloader = TRUE,
# loading_duration = 2
)
)
}
#' Add External Resources for owEDA
#'
#' @return invisible
#' @export
#' @importFrom shinyjs useShinyjs
#' @importFrom shinyWidgets useSweetAlert useShinydashboardPlus
#' @importFrom shiny addResourcePath tags
add_external_resources <- function(){
shiny::addResourcePath(
'www', system.file('app/www', package = 'owEDA')
)
shiny::tags$head(
shinyjs::useShinyjs(),
shinyWidgets::useSweetAlert(),
shinyWidgets::useShinydashboardPlus(),
# shinyCleave::includeCleave(country = "us"),
shiny::tags$link(rel = "stylesheet", type = "text/css", href = "www/styles.css"),
shiny::tags$script(src = "www/custom.js")
)
}
|
3357483307006738de38b647256c8a96f402150b
|
f2e968433a4091d4479cca636a465534edfdf829
|
/RScripts/KNearestNeighbour.R
|
bfe4201569d6a982f0c9ec896454f3a96e05438d
|
[] |
no_license
|
ChandanBP/MachineLearning
|
be614ec1e9959abb59b0f823c93ce39bb312f873
|
a27995de832a8a6ae5ceb11e602c37b5f00c637a
|
refs/heads/master
| 2020-05-29T18:50:08.887709
| 2016-09-07T16:14:14
| 2016-09-07T16:14:14
| 47,040,143
| 1
| 1
| null | 2016-03-04T00:36:28
| 2015-11-28T21:29:28
|
Java
|
UTF-8
|
R
| false
| false
| 789
|
r
|
KNearestNeighbour.R
|
library(class)
args <- commandArgs(TRUE)
fileContents <- read.csv(file = args[1],header = TRUE, sep = ",")
randomNumber <- runif(10,0.1,0.9)
totalAccuracy = 0
for(i in 1:10){
val <- floor(randomNumber[i]*nrow(fileContents))
index<-sample(nrow(fileContents),size=val)
trainingData<-fileContents[index,]
testData<-fileContents[-index,]
targetTrainingData<-fileContents[index,9]
testTrainingData<-fileContents[-index,9]
knnModel <- knn(trainingData, testData, as.factor(targetTrainingData), k = 11,prob=TRUE)
summary(knnModel)
tab<-table(testTrainingData,knnModel)
accuracy<-sum(diag(tab))/sum(tab)
accuracy<-accuracy*100
totalAccuracy = totalAccuracy+accuracy
}
averageAccuracy = totalAccuracy/10
paste("Average accuracy =",averageAccuracy,"%",sep="")
|
dca7fe946a4bbe372fb2d509d2160249d78627ef
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/splithalf/man/splithalf.Rd
|
55b69b090387e71d34d936f1fc4ef47d6b160ca6
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,883
|
rd
|
splithalf.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splithalf.R
\name{splithalf}
\alias{splithalf}
\title{Internal consistency of task measures via a permutation split-half reliability approach}
\usage{
splithalf(
data,
outcome = "RT",
score = "difference",
conditionlist = FALSE,
halftype = "random",
permutations = 5000,
var.RT = "latency",
var.ACC = "accuracy",
var.condition = FALSE,
var.participant = "subject",
var.trialnum = "trialnum",
var.compare = "congruency",
compare1 = "Congruent",
compare2 = "Incongruent",
average = "mean",
plot = FALSE,
round.to = 2
)
}
\arguments{
\item{data}{specifies the raw dataset to be processed}
\item{outcome}{indicates the type of data to be processed, e.g. response time or accuracy rates}
\item{score}{indicates how the outcome score is calculated, e.g. most commonly the difference score between two trial types. Can be "average", "difference", "difference_of_difference", and "DPrime"}
\item{conditionlist}{sets conditions/blocks to be processed}
\item{halftype}{specifies the split method; "oddeven", "halfs", or "random"}
\item{permutations}{specifies the number of random splits to run - 5000 is good}
\item{var.RT}{specifies the RT variable name in data}
\item{var.ACC}{specifiec the accuracy variable name in data}
\item{var.condition}{specifies the condition variable name in data - if not specified then splithalf will treat all trials as one condition}
\item{var.participant}{specifies the subject variable name in data}
\item{var.trialnum}{specifies the trial number variable}
\item{var.compare}{specified the variable that is used to calculate difference scores (e.g. including congruent and incongruent trials)}
\item{compare1}{specifies the first trial type to be compared (e.g. congruent trials)}
\item{compare2}{specifies the first trial type to be compared (e.g. incongruent trials)}
\item{average}{use mean or median to calculate average scores?}
\item{plot}{gives the option to visualise the estimates in a raincloud plot. defaults to FALSE}
\item{round.to}{sets the number of decimals to round the estimates to defaults to 2}
}
\value{
Returns a data frame containing permutation based split-half reliability estimates
splithalf is the raw estimate of the bias index
spearmanbrown is the spearman-brown corrected estimate of the bias index
Warning: If there are missing data (e.g one condition data missing for one participant) output will include details of the missing data and return a dataframe containing the NA data. Warnings will be displayed in the console.
}
\description{
This function calculates split half reliability estimates via a permutation approach for a wide range of tasks
The (unofficial) version name is "This function gives me the power to fight like a crow"
}
\examples{
## see online documentation for examples
}
|
289212e37610e6e55cbae5a6fe25e571e977a2e6
|
7a71d8d557a253fae611ffb44eda9ede3780b1a7
|
/shiny_inverse/server.R
|
ffbd0f131695048e0c667ab460507f0fb4ba6836
|
[] |
no_license
|
lromang/shiny_apps
|
b67e0bca9d9e1e304f5e2a35ee53ff0e6681f9f5
|
2447aa66f7708ce26606ffa895ae1df714e8eb5e
|
refs/heads/master
| 2020-05-26T18:23:03.286378
| 2015-08-24T17:28:03
| 2015-08-24T17:28:03
| 41,316,685
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,680
|
r
|
server.R
|
library(shiny)
library(ggplot2)
shinyServer(function(input, output) {
output$exp_hist <- renderPlot({
exp <- data.frame(value = rexp(n = input$sim, rate = input$lam),
label = rep("exp", input$sim))
sim <- data.frame(value = -log(runif(input$sim))/input$lam,
label = rep("sim", input$sim))
data <- rbind(exp, sim)
ggplot(data, aes(x = value, fill = label)) +
geom_histogram(alpha = .7) +
theme(panel.background = element_blank(),
axis.text = element_text(colour = "#1565C0"),
axis.title = element_text(colour = "#1565C0"),
legend.title = element_text(colour = "#1565C0")) +
scale_fill_manual(name = "Distribución",
values = c("#1565C0","#2196F3"))
})
output$exp_dens <- renderPlot({
exp <- data.frame(value = rexp(n = input$sim, rate = input$lam),
label = rep("exp", input$sim))
sim <- data.frame(value = -log(runif(input$sim))/input$lam,
label = rep("sim", input$sim))
data <- rbind(exp, sim)
ggplot(data, aes(x = value, fill = label)) +
geom_density(alpha = .7) +
theme(panel.background = element_blank(),
axis.text = element_text(colour = "#1565C0"),
axis.title = element_text(colour = "#1565C0"),
legend.title = element_text(colour = "#1565C0")) +
scale_fill_manual(name = "Distribución",
values = c("#1565C0","#2196F3"))
})
output$qqplot <- renderPlot({
data <- data.frame(value = -log(runif(input$sim))/input$lam,
label = rep("sim", input$sim))
ggplot(data, aes(sample = value)) +
stat_qq(alpha = .7, col = "#1565C0") +
theme(panel.background = element_blank(),
axis.text = element_text(colour = "#1565C0"),
axis.title = element_text(colour = "#1565C0"),
legend.title = element_text(colour = "#1565C0"))
})
})
|
a32b2992bc28a39ad71b21e64a78b93ed829c177
|
8ec8b44a804a9b0580e352bba3f8a5d1736169ed
|
/CCC_automation_script.R
|
d542e8b3a6052188cbd69729c0fab02d899fb6dd
|
[] |
no_license
|
jenitivecase/CCC_automation
|
e2f3dd641a3ebc36fc0840e83b1345dadf607137
|
b3e9f1456b4d35b21cd9626b5f52360d2cf5d2a2
|
refs/heads/master
| 2021-07-15T21:00:32.548692
| 2017-10-20T17:11:59
| 2017-10-20T17:11:59
| 107,448,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,907
|
r
|
CCC_automation_script.R
|
source("K:/AscendKC/Corp/R_and_D/1-USERS/Jennifer Brussow/options.R")
needed_packages <- c("XLConnect", "excel.link")
sapply(needed_packages, load_packages)
#steps for creation
### 1. Pull all datasets from Report Manager individually using school ID (ex. 14)
# and date range (7/1 – 6/30)
#going to read in sample file for now. can add SQL query later if desired.
old_fname <- "Bakersfield 2016.xlsx"
new_fname <- "14.xlsx"
new_data <- read.xlsx(new_fname) %>%
mutate(Year = lubridate::year(Sys.Date()))
old_data <- read.xlsx(old_fname) %>%
mutate(Year = lubridate::year(Sys.Date())-1)
cols_keep <- c(names(old_data)[1:27], "Year")
school_name <- gsub(" 2016.xlsx", "", old_fname)
school_id <- gsub(".xlsx", "", new_fname)
### 2. Copy 2017 “original” file into same-school 2016 “from school” file.
#overwrite names to match old data file
names(new_data)[1:27] <- names(old_data)[1:27]
#put the compatible rows together
synthesized <- bind_rows(old_data[cols_keep], new_data[cols_keep]) %>%
mutate(TEAS.Date = as.Date(TEAS.Date, origin = as.Date("1899-12-30", format = "%Y-%m-%d"))) %>%
mutate(Birthdate = as.Date(as.numeric(Birthdate), origin = as.Date("1899-12-30", format = "%Y-%m-%d")))
duplicates <- synthesized %>%
group_by(User.ID) %>%
filter(length(User.ID) > 1) %>%
ungroup() %>%
arrange(User.ID)
### 3. Save new file as “Schoolname_id#_toschool17”. If multiple “original” files
# exist for the school (separate file ids, same name), check for them and merge
# into the new “to school’ file as well. This is now the working file for the school.
### 4. Hide two columns (M & N).
### 5. Delete rows prior to 2015 send out (green color).
### 6. Delete columns prior to Fall 2014.
### 7. Create columns for dataset AF and AG as Fall 16 and Spring 17.
### 8. Copy validation from columns AD/AE and “Paste special => formatting only”
# into columns AF and AG.
### 9. Color all data from 2017 purple.
### 10. Format all cells from 2017 with ‘all borders’.
### 11. Extend school ID (column A) through the new data (so all values in column A
# should match the school ID used in the file name).
### 12. Copy formatting in one line of last year’s data, then “Paste special =>
# formatting only” into all rows of this year’s data (in purple).
### 13. Re-save and password protect with formula password (include the school id
# used in the filename).
#set up fname & pw
filename <- paste0(school_name, "_", school_id, "_toschool", lubridate::year(Sys.Date()), ".xlsx")
pw <- paste0("kr76_", school_id)
#apply password on save
xl.save.file(synthesized, filename = filename, row.names = FALSE, col.names = TRUE,
password = pw)
# eApp <- COMCreate("Excel.Application")
# wk <- eApp$Workbooks()$Open(Filename="file.xlsx")
# wk$SaveAs(Filename="file.xlsx", Password="mypassword")
|
284c36f5061cf6fe066c84ee896d914622070692
|
16de1ce5ff1c870ca724fa18c6bee09105386531
|
/R/machinery.r
|
9ba2eb2e8ff953e28162c18fc245581e4b467e49
|
[
"MIT"
] |
permissive
|
cran/crayon
|
bfd4889ba3bea7bfd2f97b2d32311f4cf3377026
|
54fadb66830d0a926224a4bbe6f216e4229ff201
|
refs/heads/master
| 2022-10-31T22:08:31.506968
| 2022-09-29T15:20:24
| 2022-09-29T15:20:24
| 24,388,706
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,325
|
r
|
machinery.r
|
## ----------------------------------------------------------------------
crayon_template <- function(...) {
my_styles <- attr(sys.function(), "_styles")
text <- mypaste(...)
nc <- num_ansi_colors()
if (nc > 1) {
for (st in rev(my_styles)) {
if (!is.null(st$palette)) st <- get_palette_color(st, nc)
text <- st$open %+%
gsub_(st$close, st$open, text, fixed = TRUE, useBytes = TRUE) %+%
st$close
}
}
text
}
hash_color_regex <- "^#([A-Fa-f0-9]{6}|[A-Fa-f0-9]{8})$"
is_builtin_style <- function(x) {
is_string(x) && x %in% names(builtin_styles)
}
#' @importFrom grDevices colors
is_r_color <- function(x) {
if (!is.character(x) || length(x) != 1 || is.na(x)) {
FALSE
} else {
x %in% grDevices::colors() || grepl(hash_color_regex, x)
}
}
is_rgb_matrix <- function(x) {
is.matrix(x) && is.numeric(x) && (nrow(x) == 3 || nrow(x) == 4)
}
#' @importFrom grDevices col2rgb
ansi_style_from_r_color <- function(color, bg, num_colors, grey) {
style_from_rgb(col2rgb(color), bg, num_colors, grey)
}
# multicolor depends on this name, apparently
style_from_r_color <- ansi_style_from_r_color
style_8_from_rgb <- function(rgb, bg) {
ansi_cols <- if (bg) ansi_bg_rgb else ansi_fg_rgb
dist <- colSums((ansi_cols - as.vector(rgb)) ^ 2 )
builtin_name <- names(which.min(dist))[1]
builtin_styles[[builtin_name]]
}
style_from_rgb <- function(rgb, bg, num_colors, grey) {
if (num_colors < 256) { return(style_8_from_rgb(rgb, bg)) }
ansi256(rgb, bg, grey)
}
#' Create an ANSI color style
#'
#' Create a style, or a style function, or both. This function
#' is intended for those who wish to use 256 ANSI colors,
#' instead of the more widely supported eight colors.
#'
#' @details
#' The crayon package comes with predefined styles (see
#' [styles()] for a list) and functions for the basic eight-color
#' ANSI standard (`red`, `blue`, etc., see \link{crayon}).
#'
#' There are no predefined styles or style functions for the 256 color
#' ANSI mode, however, because we simply did not want to create that
#' many styles and functions. Instead, `make_style()` can be
#' used to create a style (or a style function, or both).
#'
#' There are two ways to use this function: \enumerate{
#' \item If its first argument is not named, then it returns a function
#' that can be used to color strings.
#' \item If its first argument is named, then it also creates a
#' style with the given name. This style can be used in
#' [style()]. One can still use the return value
#' of the function, to create a style function.
#' }
#'
#' The style (the code{...} argument) can be anything of the
#' following: \itemize{
#' \item An R color name, see [colors()].
#' \item A 6- or 8-digit hexa color string, e.g. `#ff0000` means
#' red. Transparency (alpha channel) values are ignored.
#' \item A one-column matrix with three rows for the red, green
#' and blue channels, as returned by `col2rgb` (in the base
#' grDevices package).
#' }
#'
#' `make_style()` detects the number of colors to use
#' automatically (this can be overridden using the `colors`
#' argument). If the number of colors is less than 256 (detected or given),
#' then it falls back to the color in the ANSI eight color mode that
#' is closest to the specified (RGB or R) color.
#'
#' See the examples below.
#'
#' @param ... The style to create. See details and examples below.
#' @param bg Whether the color applies to the background.
#' @param grey Whether to specifically create a grey color.
#' This flag is included because ANSI 256 has a finer color scale
#' for greys than the usual 0:5 scale for R, G and B components.
#' It is only used for RGB color specifications (either numerically
#' or via a hexa string) and is ignored on eigth color ANSI
#' terminals.
#' @param colors Number of colors, detected automatically
#' by default.
#' @return A function that can be used to color strings.
#'
#' @family styles
#' @export
#' @examples
#' ## Create a style function without creating a style
#' pink <- make_style("pink")
#' bgMaroon <- make_style(rgb(0.93, 0.19, 0.65), bg = TRUE)
#' cat(bgMaroon(pink("I am pink if your terminal wants it, too.\n")))
#'
#' ## Create a new style for pink and maroon background
#' make_style(pink = "pink")
#' make_style(bgMaroon = rgb(0.93, 0.19, 0.65), bg = TRUE)
#' "pink" %in% names(styles())
#' "bgMaroon" %in% names(styles())
#' cat(style("I am pink, too!\n", "pink", bg = "bgMaroon"))
make_style <- function(..., bg = FALSE, grey = FALSE,
colors = num_colors()) {
args <- list(...)
stopifnot(length(args) == 1)
style <- args[[1]]
orig_style_name <- style_name <- names(args)[1]
stopifnot(is.character(style) && length(style) == 1 ||
is_rgb_matrix(style) && ncol(style) == 1,
is.logical(bg) && length(bg) == 1,
is.numeric(colors) && length(colors) == 1)
ansi_seqs <- if (is_builtin_style(style)) {
if (bg && substr(style, 1, 2) != "bg") {
style <- "bg" %+% capitalize(style)
}
if (is.null(style_name)) style_name <- style
builtin_styles[[style]]
} else if (is_r_color(style)) {
if (is.null(style_name)) style_name <- style
ansi_style_from_r_color(style, bg, colors, grey)
} else if (is_rgb_matrix(style)) {
style_from_rgb(style, bg, colors, grey)
} else {
stop("Unknown style specification: ", style)
}
if (!is.null(orig_style_name)) define_style(orig_style_name, ansi_seqs)
make_crayon(structure(list(ansi_seqs), names = style_name))
}
make_crayon <- function(ansi_seq) {
crayon <- crayon_template
attr(crayon, "_styles") <- ansi_seq
class(crayon) <- "crayon"
crayon
}
#' @include styles.r
#'
#' @usage
#' ## Simple styles
#' red(...)
#' bold(...)
#' # ...
#'
#' ## See more styling below
#'
#' @param ... Strings to style.
#' @name crayon
#
#' @details
#'
#' Crayon defines several styles, that can be combined. Each style in the list
#' has a corresponding function with the same name.
#'
#' @section Genaral styles:
#'
#' \itemize{
#' \item reset
#' \item bold
#' \item blurred (usually called \sQuote{dim}, renamed to avoid name clash)
#' \item italic (not widely supported)
#' \item underline
#' \item inverse
#' \item hidden
#' \item strikethrough (not widely supported)
#' }
#'
#' @section Text colors:
#'
#' \itemize{
#' \item black
#' \item red
#' \item green
#' \item yellow
#' \item blue
#' \item magenta
#' \item cyan
#' \item white
#' \item silver (usually called \sQuote{gray}, renamed to avoid name clash)
#' }
#'
#' @section Background colors:
#'
#' \itemize{
#' \item bgBlack
#' \item bgRed
#' \item bgGreen
#' \item bgYellow
#' \item bgBlue
#' \item bgMagenta
#' \item bgCyan
#' \item bgWhite
#' }
#'
#' @section Styling:
#'
#' The styling functions take any number of character vectors as arguments,
#' and they concatenate and style them: \preformatted{ library(crayon)
#' cat(blue("Hello", "world!\n"))
#' }
#'
#' Crayon defines the \code{\%+\%} string concatenation operator, to make it easy
#' to assemble stings with different styles. \preformatted{ cat("... to highlight the " \%+\% red("search term") \%+\%
#' " in a block of text\n")
#' }
#'
#' Styles can be combined using the `$` operator: \preformatted{ cat(yellow$bgMagenta$bold('Hello world!\n'))
#' } See also [combine_styles()].
#'
#' Styles can also be nested, and then inner style takes
#' precedence: \preformatted{ cat(green(
#' 'I am a green line ' \%+\%
#' blue$underline$bold('with a blue substring') \%+\%
#' ' that becomes green again!\n'
#' ))
#' }
#'
#' It is easy to define your own themes: \preformatted{ error <- red $ bold
#' warn <- magenta $ underline
#' note <- cyan
#' cat(error("Error: subscript out of bounds!\n"))
#' cat(warn("Warning: shorter argument was recycled.\n"))
#' cat(note("Note: no such directory.\n"))
#' }
#'
#' @aliases
#' reset bold blurred italic underline inverse hidden strikethrough
#' black red green yellow blue magenta cyan white silver
#' bgBlack bgRed bgGreen bgYellow bgBlue bgMagenta bgCyan bgWhite
#'
#' @export reset bold blurred italic underline inverse hidden strikethrough
#' @export black red green yellow blue magenta cyan white silver
#' @export bgBlack bgRed bgGreen bgYellow bgBlue bgMagenta bgCyan bgWhite
#'
#' @seealso [make_style()] for using the 256 ANSI colors.
#' @examples
#' cat(blue("Hello", "world!"))
#'
#' cat("... to highlight the " %+% red("search term") %+%
#' " in a block of text")
#'
#' cat(yellow$bgMagenta$bold('Hello world!'))
#'
#' cat(green(
#' 'I am a green line ' %+%
#' blue$underline$bold('with a blue substring') %+%
#' ' that becomes green again!'
#' ))
#'
#' error <- red $ bold
#' warn <- magenta $ underline
#' note <- cyan
#' cat(error("Error: subscript out of bounds!\n"))
#' cat(warn("Warning: shorter argument was recycled.\n"))
#' cat(note("Note: no such directory.\n"))
#'
NULL
#' ANSI escape sequences of crayon styles
#'
#' You can use this function to list all availables crayon styles,
#' via `names(styles())`, or to explicitly apply an ANSI
#' escape seauence to a string.
#'
#' @return A named list. Each list element is a list of two
#' strings, named \sQuote{open} and \sQuote{close}.
#'
#' @seealso [crayon()] for the beginning of the crayon manual.
#' @export
#' @examples
#' names(styles())
#' cat(styles()[["bold"]]$close)
styles <- function() {
data_env$my_styles
}
data_env <- new.env(parent = emptyenv())
data_env$my_styles <- structure(list(), names = character())
sapply(names(builtin_styles), function(style) {
data_env$my_styles[[style]] <- builtin_styles[[style]]
assign(style, make_style(style), envir = asNamespace("crayon"))
})
define_style <- function(name, ansi_seq) {
data_env$my_styles[[name]] <- ansi_seq
}
#' Remove a style
#'
#' @param style The name of the style to remove. No error is given
#' for non-existing names.
#' @return Nothing.
#'
#' @family styles
#' @export
#' @examples
#' make_style(new_style = "maroon", bg = TRUE)
#' cat(style("I am maroon", "new_style"), "\n")
#' drop_style("new_style")
#' "new_style" %in% names(styles())
drop_style <- function(style) {
data_env$my_styles[[style]] <- NULL
invisible()
}
|
9e06113f59139cb75f4e29b811d76457d4f519b1
|
56587ef8f52bfeb5a610e40ba6bf0a0a66daf1d1
|
/man/stable_mle_fit.Rd
|
4c738f892206b989b285ca07ffdbc2fef8659652
|
[] |
no_license
|
cran/stabreg
|
d7e1fb39c19f7d3578ec292ef090d91c6217bb5d
|
3b5eec750b6fa9426921f8f5728e7685b678e891
|
refs/heads/master
| 2020-12-22T19:07:08.652322
| 2019-06-06T13:20:03
| 2019-06-06T13:20:03
| 236,902,021
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 427
|
rd
|
stable_mle_fit.Rd
|
\name{stable_mle_fit}
\alias{stable_mle_fit}
\title{Fit a stable distribution to a sample using maximum likelihood}
\usage{
stable_mle_fit(x, init_vals, trace)
}
\arguments{
\item{x}{sample vector}
\item{init_vals}{initial guess for parameters. Defaults to NULL in which case these are set to defaults}
\item{trace}{trace level}
}
\description{
Fit a stable distribution to a sample using maximum likelihood
}
|
e436445b3a043b83a89baaa5bf8d9427ccd8bcac
|
f4adbfce7b1679f8a21cb0a96831e347550bf1a6
|
/man/letour.Rd
|
482e64223de88b1b73a421d1cd14b103f28782a9
|
[
"MIT",
"LicenseRef-scancode-public-domain"
] |
permissive
|
smnnlt/sportsdata
|
22a00e4fc3aad6a4ee4a6dad8b3ab2be49393491
|
9846b0308f001aecc7acb3032a064bffeda7c67e
|
refs/heads/master
| 2023-07-28T02:33:44.038811
| 2021-09-16T21:12:57
| 2021-09-16T21:12:57
| 391,653,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 732
|
rd
|
letour.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{letour}
\alias{letour}
\title{All riders of the Tour de France}
\format{
A data frame with 9452 rows and 8 variables:
\describe{
\item{year}{year of holding}
\item{name}{name of the rider}
\item{rank}{final position in classement generale}
\item{distance}{total distance in km}
\item{pace}{individual average pace in km/h}
\item{team}{name of the rider's team}
\item{time}{total time in seconds}
\item{stages}{number of stages}
}
}
\source{
\url{https://github.com/camminady/LeTourDataSet}
}
\usage{
letour
}
\description{
A dataset containing the individual results for all riders of the Tour de
France.
}
\keyword{datasets}
|
f2382b101f2f71472570783663305b20975031c4
|
b5c8b2c86015d336be972deeccb902edfa2e21a8
|
/learn_shiny.R
|
47837c4b7912361f0f64974e465fad721a34cdab
|
[
"MIT"
] |
permissive
|
pstessel/medicare
|
d1b4fcf5da9cb3dc999f885205717800067501e6
|
3618764c5e960632766ed855aa0704e17caf7a13
|
refs/heads/master
| 2021-01-10T02:57:42.890094
| 2015-07-01T19:29:09
| 2015-07-01T19:29:09
| 37,549,045
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
learn_shiny.R
|
setwd("/Volumes/HD2/Users/pstessel/Documents/Git_Repos/medicare")
library(shiny)
counties <- readRDS("census-app/data/counties.rds")
head(counties)
library(maps)
library(mapproj)
source("census-app/helpers.R")
counties <- readRDS("census-app/data/counties.rds")
percent_map(counties$white, "darkgreen", "% white")
|
1612ce1ed01386e7682ea565ad4e673b574cb286
|
d1a2e883603f49df6d071f536a565276410fab44
|
/app-data.R
|
6ef2eb67df53de617f238142734db946e81e23f3
|
[] |
no_license
|
nicholasviau/r-notes
|
802ff1680bfeb227ded0f366f0159bebcf2cee43
|
78fae3daca5a9519a315c5e3af90a2b3576225d6
|
refs/heads/master
| 2022-11-20T12:55:40.291600
| 2020-07-20T21:01:54
| 2020-07-20T21:01:54
| 281,221,832
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 922
|
r
|
app-data.R
|
## Make Data for the Book
library(tidyverse)
library(gutenbergr)
wuthering_heights <- gutenberg_download(768, meta_fields = "title")
wuthering_heights <- wuthering_heights %>%
slice(7:11) %>%
mutate(id = paste0("0", 1:5)) %>%
select(id, text)
wuthering_heights %>%
pwalk(
~write_file(x = .y, path = paste0("./data/texts/", .x, ".txt"))
)
## Read in
library(tidytext)
all_texts <- list.files("./data/texts", full.names = TRUE)
map_dfr(all_texts, ~{
tibble(txt = read_file(.x),
id = .x)
})
map_dfr(all_texts,
~ tibble(txt = read_file(.x)) %>%
mutate(filename = basename(.x)) %>%
unnest_tokens(word, txt))
write_file("hello", "./data/texts/test.txt")
wuthering_heights %>%
mutate(text = str_replace_all(text, "", "\\n"))
pull(wuthering_heights, text) %>%
str_c(collapse = " ") %>%
str_split(pattern = "\\.|\\?|\\!")
wuthering_heights %>%
slice()
|
cd59b34c065e1ca67d2f8ac90bf240e07fcfb5b0
|
d27df9c1aa48d2019afe8ddb15b8ea8d1af31770
|
/R/logistic_fun.R
|
0feb3c516fa8f5218e1123d968e1383e3e3204e9
|
[] |
no_license
|
MangoTheCat/mangoTraining
|
67d274faa9296c28631ca93891edcd82ec321e43
|
1a40f2e6a997768103fc03cb23a239ef529cdaaf
|
refs/heads/master
| 2022-03-17T16:27:55.419039
| 2021-04-27T09:48:44
| 2021-04-27T09:48:44
| 45,688,569
| 9
| 7
| null | 2020-05-18T08:48:02
| 2015-11-06T15:04:39
|
R
|
UTF-8
|
R
| false
| false
| 810
|
r
|
logistic_fun.R
|
#' Function to fit logistic model
#'
#' Simple logistic function as used in Mango training materials. Note: This function has be renamed using tidyverse-style snake_case
#' naming conventions. However the original name of the function has been kept to ensure backwards compatibility with the book SAMS
#' Teach Yourself R in 24 Hours (ISBN: 978-0-672-33848-9).
#'
#' @param Dose The dose value to calculate at
#' @param E0 Effect at time 0
#' @param EC50 50\% of maximum effect
#' @param Emax Maximum effect
#' @param rc rate constant
#'
#' @return Numeric value/vector representing the response value.
#'
#' @examples logistic_fun(Dose = 50)
#'
#' @export
logistic_fun <- function(Dose, E0 = 0, EC50 = 50, Emax = 1, rc = 5) {
E0 + Emax / (1 + exp((EC50 - Dose) / rc))
}
logisticFun <- logistic_fun
|
ce9033410bea0cac4b7fef34f33c74b5f5b4eea9
|
4c728658dcdf38c23429974b773e161d6c3370e8
|
/Modules/ui.R
|
0840c37a3f6bb815f911f8cbfc095c519fda6d75
|
[] |
no_license
|
FelipheStival/RegressaoLinear
|
bda81288c71308c82aa655360951aa3e8e421e04
|
8b2b0138a76c41394a104d8214dcc77d2b667c73
|
refs/heads/master
| 2022-12-13T11:51:11.356021
| 2020-09-09T19:59:45
| 2020-09-09T19:59:45
| 268,523,334
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 469
|
r
|
ui.R
|
ui = fluidPage(
#================Criando pagina=========================
titlePanel(NOME_APLICACAO),
withMathJax(),
#================inputs interface=======================
sidebarLayout(
createInputsUI(),
#=======================================================
#============plot interface=============================
createPlotUI()
)
#=======================================================
)
|
5b972b86d788d35a8cc56f4af24faa0a9c1ef05d
|
b950792250db982d82a114cbe071128c522ba7e7
|
/C05_preparation_df5_878067.R
|
873db9f96be76b2d767b952ab801b32de4c57a4a
|
[] |
no_license
|
SilviaRanieri/Digital-marketing
|
67d2c9bdeb2d810f7fd3cc84464de7109ad3ec3b
|
5241baf37ce790324031021d347e3eac93529205
|
refs/heads/main
| 2023-08-14T12:30:03.848295
| 2021-09-30T12:40:22
| 2021-09-30T12:40:22
| 412,053,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
C05_preparation_df5_878067.R
|
## Dataset 5
# df5: Categorization of the marketing email communications
# Variables are:
#- `ID_CAMP`: identify the email campaign (**Key**);
#- `TYP_CAMP`: identify the type email campaign.
#- `CHANNEL_CAMP`: channel of campaign submission.
#### FIRST LOOK of df_5 ####
str(df_5_camp_cat)
summary(df_5_camp_cat)
#### START CLEANING df_5 ####
df_5_camp_cat_clean <- df_5_camp_cat
#### CLEANING LOW VARIANCE in df_5 ####
df_5_camp_cat_clean <- df_5_camp_cat_clean %>%
select(-CHANNEL_CAMP)
df_5_camp_cat_clean
#### FINAL REVIEW df_5_clean ####
str(df_5_camp_cat_clean)
summary(df_5_camp_cat_clean)
#Information about this file:
#Type of camapign are distribution: Product (43,8%), Personalized (19,9%), National (17,6%), Newsletter (12,8%) and Local (0,69%).
|
fb6a881a35e8bc20a8c6f8e02d8486b210be7965
|
d8b49ccd3f3b41532f9092973a3f7e835f4eb166
|
/Assignment1.R
|
c5fae50a65bf78a280ae73290e5565f795d538cf
|
[] |
no_license
|
b-thi/STAT-852
|
31bb56e9c1a4bb9c3d35177b0b1883c9d3d8bdeb
|
52ed492192e37d113771798f2110c3b1f105562f
|
refs/heads/master
| 2022-03-14T14:10:16.837232
| 2019-11-23T21:53:35
| 2019-11-23T21:53:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,318
|
r
|
Assignment1.R
|
################################################################################
# #
# #
# Assignment 1 - Barinder Thind - STAT 852 #
# #
# #
################################################################################
#############
# #
# Libraries #
# #
#############
library(tidyverse)
library(GGally)
library(MASS)
library(leaps)
##############
# #
# Lecture 2a #
# #
##############
##############
# #
# Question 1 #
# #
##############
# First, I take tom's code and make a function except add some
# parameters in correspondance to what changes in the questions. Namely,
# the parameters are sample size, beta1, and beta2
mspe_q1 <- function(sample_size, beta_1, beta_2) {
set.seed(392039853)
reps <- 200 # Number of data sets
N <- sample_size # Sample size
# Create test data
test <- expand.grid(x1 = c(.1,.3,.5,.7,.9), x2 = c(.1,.3,.5,.7,.9), x3=c(.1,.3,.5,.7,.9))
# Assuming beta1=1, beta2=1, beta3=0
# Create vector of true means = 1*x1 + 1*x2
mu <- beta_1*test$x1 + beta_2*test$x2
# Prepare for looping over reps
counter <- 1
# Matrix to save predictions: rows are replicates,
# columns are different X combinations times 3 (one for each model)
save.pred <- matrix(data=NA, ncol=3*nrow(test), nrow=reps)
# Matrix to save estimates of sigma^2
# Rows are replicates, columns are different models
save.sig <- matrix(data=NA, ncol=3, nrow=reps)
# Loop to generate data, analyze, and save results
for(counter in c(1:reps)){
# Generating Uniform X's and Normal errors
x1 <- runif(n=N)
x2 <- runif(n=N)
x3 <- runif(n=N)
ep <- rnorm(n=N)
# Setting beta1=1, beta2=1, beta3=0
y <- beta_1*x1 + beta_2*x2 + ep
# reg* is model-fit object, sig* is MSE, pred* is list of predicted values over grid
reg1 <- lm(y~x1)
sig1 <- sum(resid(reg1)^2) / reg1$df.residual
# Could have used summary(reg1)$sigma^2
pred1 <- predict(reg1, newdata = test)
reg2 <- lm(y~x1 + x2)
sig2 <- sum(resid(reg2)^2) / reg2$df.residual
pred2 <- predict(reg2,newdata=test)
reg3 <- lm(y~x1 + x2 + x3)
sig3 <- sum(resid(reg3)^2) / reg3$df.residual
pred3 <- predict(reg3,newdata=test)
# Saving all results into storage objects and incrementing row counter
save.pred[counter,] <- c(pred1, pred2, pred3)
save.sig[counter,] <- c(sig1,sig2,sig3)
counter <- counter + 1
}
# Estimate bias, variance, and MSE of predictions at each X-combo
mean.pred <- apply(save.pred, MARGIN=2, FUN=mean)
bias <- mean.pred - rep(mu, times=3)
var <- apply(save.pred, MARGIN=2, FUN=var)
MSE <- bias^2 + var
# Vector of model numbers
model <- rep(c(1,2,3), each=nrow(test))
# Summary statistics for variances and MSEs for prediction by model
mse_1 <- mean(MSE[which(model==1)])
mse_2 <- mean(MSE[which(model==2)])
mse_3 <- mean(MSE[which(model==3)])
# Creating object to return
df <- data.frame(MSPE = c(mse_1, mse_2, mse_3))
row.names(df) <- c("Model 1", "Model 2", "Model 3")
# Returning MSEs
return(df)
}
# Getting the original
orig_results <- mspe_q1(20, 1, 1)
colnames(orig_results) <- "MSPE_original"
# (a)
a_results <- mspe_q1(100, 1, 1)
colnames(a_results) <- "MSPE_a"
# (b)
b_results <- mspe_q1(10, 1, 1)
colnames(b_results) <- "MSPE_b"
# (c)
c_results <- mspe_q1(20, 2, 1)
colnames(c_results) <- "MSPE_c"
# (d)
d_results <- mspe_q1(20, 0.5, 1)
colnames(d_results) <- "MSPE_d"
# (e)
e_results <- mspe_q1(20, 1, 2)
colnames(e_results) <- "MSPE_e"
# (f)
f_results <- mspe_q1(20, 1, 0.5)
colnames(f_results) <- "MSPE_f"
# (g)
g_results <- mspe_q1(20, 2, 2)
colnames(g_results) <- "MSPE_g"
### Putting together in a table
q1_table <- t(do.call("cbind", list(orig_results, a_results, b_results,
c_results, d_results, e_results,
f_results, g_results)))
### Looking at table
q1_table
##############
# #
# Question 2 #
# #
##############
# First, I take tom's code and make a function except add some
# parameters in correspondance to what changes in the questions. Namely,
# the parameters are sample size, beta1, and beta2
mspe_q2 <- function(sample_size, beta_1, beta_2) {
set.seed(392039853)
reps <- 200 # Number of data sets
N <- sample_size # Sample size
# Prepare for looping over reps
counter <- 1
save.ic<- matrix(data=NA, ncol=12, nrow=reps)
# Loop to generate data, analyze, and save results
for(counter in c(1:reps)){
x1 <- runif(n=N)
x2 <- runif(n=N)
x3 <- runif(n=N)
ep <- rnorm(n=N)
y <- beta_1*x1 + beta_2*x2 + ep
# Fit model "*" and store object in "reg*"
reg0 <- lm(y~1) # Intercept only
aic0 <- extractAIC(reg0,k=2)[2]
bic0 <- extractAIC(reg0,k=log(N))[2]
aicc0 <- aic0 + 2 * reg0$rank * (reg0$rank + 1) / (N- reg0$rank -1)
reg1 <- lm(y~x1)
aic1 <- extractAIC(reg1,k=2)[2]
bic1 <- extractAIC(reg1,k=log(N))[2]
aicc1 <- aic1 + 2 * reg1$rank * (reg1$rank + 1) / (N- reg1$rank -1)
reg2 <- lm(y~x1 + x2)
aic2 <- extractAIC(reg2,k=2)[2]
bic2 <- extractAIC(reg2,k=log(N))[2]
aicc2 <- aic2 + 2 * reg2$rank * (reg2$rank + 1) / (N- reg2$rank -1)
reg3 <- lm(y~x1 + x2 + x3)
aic3 <- extractAIC(reg3,k=2)[2]
bic3 <- extractAIC(reg3,k=log(N))[2]
aicc3 <- aic3 + 2 * reg3$rank * (reg3$rank + 1) / (N- reg3$rank -1)
save.ic[counter,] <- c(aic0, aic1, aic2, aic3, bic0, bic1, bic2, bic3, aicc0, aicc1, aicc2, aicc3)
counter <- counter + 1
}
# For each IC, figure out which column (model) holds the smallest value, and same model numbers
model.aic <- table(max.col(-save.ic[,1:4]) - 1)
model.bic <- table(max.col(-save.ic[,5:8]) - 1)
model.aicc <- table(max.col(-save.ic[,9:12]) - 1)
# Returning
return(list(model.aic = model.aic, model.bic = model.bic, model.aicc = model.aicc))
}
# Getting the original
orig2_results <- mspe_q2(20, 1, 1)
# Printing Plots
par(mfrow=c(1,3))
barplot(orig2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(orig2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(orig2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (a)
a2_results <- mspe_q2(100, 1, 1)
# Printing Plots
par(mfrow=c(1,3))
barplot(a2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(a2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(a2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (b)
b2_results <- mspe_q2(10, 1, 1)
# Printing Plots
par(mfrow=c(1,3))
barplot(b2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(b2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(b2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (c)
c2_results <- mspe_q2(20, 2, 1)
# Printing Plots
par(mfrow=c(1,3))
barplot(c2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(c2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(c2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (d)
d2_results <- mspe_q2(20, 0.5, 1)
# Printing Plots
par(mfrow=c(1,3))
barplot(d2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(d2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(d2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (e)
e2_results <- mspe_q2(20, 1, 2)
# Printing Plots
par(mfrow=c(1,3))
barplot(e2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(e2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(e2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (f)
f2_results <- mspe_q2(20, 1, 0.5)
# Printing Plots
par(mfrow=c(1,3))
barplot(f2_results$model.aic,xlab="Model Number",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(f2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(f2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
# (g)
g2_results <- mspe_q2(20, 2, 2)
# Printing Plots
par(mfrow=c(1,3))
barplot(g2_results$model.aic,xlab="Model Numebr",ylab="Number chosen",main="AIC",ylim=c(0,150))
barplot(g2_results$model.aicc,xlab="Model Number",ylab="Number chosen",main="AICc",ylim=c(0,150))
barplot(g2_results$model.bic,xlab="Model Number",ylab="Number chosen", main="BIC",ylim=c(0,150))
##############
# #
# Lecture 2b #
# #
##############
##############
# #
# Question 1 #
# #
##############
# Reading in data
prostate <- read.table("Prostate.csv", header=TRUE, sep=",", na.strings=" ")
subset_halves_function <- function(seed_chosen) {
# Splitting data in half using random uniform selection to make two "set"s.
set.seed(seed_chosen)
prostate$set <- ifelse(runif(n=nrow(prostate)) > 0.5, yes=2, no=1)
# All subsets regression using the "regsubsets" function from "leaps"
# Note: default is to limit to 8-variable models. Add nvmax argument to increase.
allsub1 <- regsubsets(x=prostate[which(prostate$set==1),2:9],
y=prostate[which(prostate$set==1),10], nbest=1)
allsub2 <- regsubsets(x=prostate[which(prostate$set==2),2:9],
y=prostate[which(prostate$set==2),10], nbest=1)
# Store summary() so we can see BICs (not comparable across different data sets)
summ.1 <- summary(allsub1)
summ.2 <- summary(allsub2)
# Fitting the models in succession from smallest to largest.
# Fit one-var model. then update to 2-var model. Could keep going.
# Each time computing sample-MSE (sMSE), BIC, and mean squared pred. error (MSPE).
results1 <- matrix(data=NA, nrow=9, ncol=4)
mod1 <- lm(lpsa ~ 1, data=prostate[which(prostate$set==1),])
sMSE <- summary(mod1)$sigma^2
BIC <- extractAIC(mod1, k=log(nrow(prostate[which(prostate$set==1),])))
pred2 <- predict(mod1, newdata=prostate[which(prostate$set==2),])
MSPE <- mean((pred2-prostate[which(prostate$set==2),]$lpsa)^2)
results1[1,] <- c(0, sMSE, BIC[2], MSPE)
#Get rid of superfluous variables so that I can call the right variables into the data set each time.
# Also move response to 1st column to be included every time below.
prostate2 <- prostate[,c(10,2:9)]
for(v in 1:8){
mod1 <- lm(lpsa ~ ., data=prostate2[which(prostate$set==1), summ.1$which[v,]])
sMSE <- summary(mod1)$sigma^2
BIC <- extractAIC(mod1, k=log(nrow(prostate2[which(prostate$set==1),])))
pred2 <- predict(mod1, newdata=prostate2[which(prostate$set==2),])
MSPE <- mean((pred2-prostate2[which(prostate$set==2),]$lpsa)^2)
results1[v+1,] <- c(v, sMSE, BIC[2], MSPE)
}
##########
# Repeat for second data set
# Fitting the models in succession from smallest to largest.
# Fit one-var model. then update to 2-var model. Could keep going.
# Each time computing sample-MSE (sMSE), BIC, and mean squared pred. error (MSPE).
results2 <- matrix(data=NA, nrow=9, ncol=4)
mod1 <- lm(lpsa ~ 1, data=prostate[which(prostate$set==2),])
sMSE <- summary(mod1)$sigma^2
BIC <- extractAIC(mod1, k=log(nrow(prostate[which(prostate$set==2),])))
pred2 <- predict(mod1, newdata=prostate[which(prostate$set==1),])
MSPE <- mean((pred2-prostate[which(prostate$set==1),]$lpsa)^2)
results2[1,] <- c(0, sMSE, BIC[2], MSPE)
#Get rid of superfluous variables so that I can call the right variables into the data set each time.
# Also move response to 1st column to be included every time below.
prostate2 <- prostate[,c(10,2:9)]
for(v in 1:8){
mod1 <- lm(lpsa ~ ., data=prostate2[which(prostate$set==2), summ.2$which[v,]])
sMSE <- summary(mod1)$sigma^2
BIC <- extractAIC(mod1, k=log(nrow(prostate2[which(prostate$set==2),])))
pred2 <- predict(mod1, newdata=prostate2[which(prostate$set==1),])
MSPE <- mean((pred2-prostate2[which(prostate$set==1),]$lpsa)^2)
results2[v+1,] <- c(v, sMSE, BIC[2], MSPE)
}
# Here, I begin to organize the data as ideally, I return the table as it
# is required in the homework.
# First, I figure out the vars chosen
vars_chosen <- c(results1[which.min(results1[,2]), 1],
results1[which.min(results1[,3]), 1],
results1[which.min(results1[,4]), 1],
results2[which.min(results2[,2]), 1],
results2[which.min(results2[,3]), 1],
results2[which.min(results2[,4]), 1])
# Now I do the same for the training error which is the sMSE
train_error <- c(results1[which.min(results1[,2]), 2],
results1[which.min(results1[,3]), 2],
results1[which.min(results1[,4]), 2],
results2[which.min(results2[,2]), 2],
results2[which.min(results2[,3]), 2],
results2[which.min(results2[,4]), 2])
# Lastly, I do the same to find the test error (MSPE)
test_error <- c(results1[which.min(results1[,2]), 4],
results1[which.min(results1[,3]), 4],
results1[which.min(results1[,4]), 4],
results2[which.min(results2[,2]), 4],
results2[which.min(results2[,3]), 4],
results2[which.min(results2[,4]), 4])
var_names <- c(paste(names(which(summ.1$which[vars_chosen[1], -1] == TRUE)), collapse = ", "),
paste(names(which(summ.1$which[vars_chosen[2], -1] == TRUE)), collapse = ", "),
paste(names(which(summ.1$which[vars_chosen[3], -1] == TRUE)), collapse = ", "),
paste(names(which(summ.2$which[vars_chosen[4], -1] == TRUE)), collapse = ", "),
paste(names(which(summ.2$which[vars_chosen[5], -1] == TRUE)), collapse = ", "),
paste(names(which(summ.2$which[vars_chosen[6], -1] == TRUE)), collapse = ", "))
# Now put it all together
final_table <- data.frame(training_set = c(rep(1, 3), rep(2, 3)),
criterion = c(rep(c("sMSE", "BIC", "MSPE"), 2)),
num_vars = vars_chosen,
vars_chosen = var_names,
training_error = train_error,
test_error = test_error)
return(final_table)
}
##### (a) #####
# Running code
subset_halves_function(120401002)
##### (b) #####
# Running code
subset_halves_function(9267926)
##### (c) #####
## (i) ##
## (ii) ##
##############
# #
# Question 2 #
# #
##############
# Reading in Data
abalone <- read.csv("Abalone.csv", as.is = T, header = T)
# Looking at data
head(abalone)
str(abalone)
# Creating male/female variable [assuming 0 = male and 1 = female]
str(abalone$Sex)
abalone$male <- ifelse(abalone$Sex == 0, 1, 0)
abalone$female <- ifelse(abalone$Sex == 1, 1, 0)
# Dropping sex variable
abalone <- abalone[,-1]
# Looking at data again
str(abalone)
##### (a) #####
# Creating scatterplot of all variable
ggpairs(abalone)
## Here, we can see that the variable shell seems to have the strongest correlation
## with the rings (our response) variable. Additionally, there seems to be a moderate
## correlation with a numebr of other variables such as length, diameter, height, whole,
## and viscera. In fact, most variables exhibit some mild correlation.
## With respect to multicollinearity, there is a large potential for this issue. In fact,
## we see a strong correlation between length and a number of other variables such as diameter,
## height, whole, shucked, and viscera. This rings true as well for the relationship between
## these variables with each other as well. The potential for this issue is clearly evident
## from the pairs plot.
## (i) ##
# Fixing the height variable
abalone <- abalone[(0 < abalone$Height)&(abalone$Height < 0.5), ]
# Let's look at the pairs plot again to see that Height has been "fixed"
ggpairs(abalone)
##### (b) #####
# Setting seed
set.seed(29003092)
# Creating data sets
abalone$set <- ifelse(runif(n=nrow(abalone)) <= 0.75, yes = 1, no = 2)
abalone_1 <- abalone[which(abalone$set == 1), -11]
abalone_2 <- abalone[which(abalone$set == 2), -11]
##### (c) #####
# Creating lm object [minimal model]
fit <- lm(Rings ~ 1, data = abalone_1)
# Doing forward stepwise regression
step(fit,
direction = "forward",
scope = (~ Length + Diameter + Height + Whole + Shucked + Viscera + Shell + male + female),
k = log(nrow(abalone_1)))
## The variables are: Shell, Shucked, Height, male, Whole, Viscera, and Diameter
##### (d) #####
# Creating lm object [minimal model]
fit <- lm(Rings ~ 1, data = abalone_1)
# Doing forward stepwise regression
step(fit,
direction = "both",
scope = (~ Length + Diameter + Height + Whole + Shucked + Viscera + Shell + male + female),
k = log(nrow(abalone_1)))
##### (e) #####
# Creating lm object [minimal model]
fit <- lm(Rings ~ 1, data = abalone_1)
# Doing forward stepwise regression
step_no_penalty <- step(fit,
direction = "forward",
scope = (~ Length + Diameter + Height + Whole +
Shucked + Viscera + Shell + male + female),
k = 0)
# Penalty term to be added:
BIC_penalty <- function(k) {return(log(nrow(abalone))*k)}
# BIC Values
BIC_values <- c(7424.8, 5876.02, 5423.15, 5215.6, 5141.32, 5079.17,
5015.91, 4972.63, 4971.12, 4970.62)
# Getting true BIC values
true_BIC <- c()
for (i in 1:10) {
true_BIC[i] <- BIC_values[i] + BIC_penalty(i - 1)
}
# Looking at true values
true_BIC
# Finding true minimum model
which.min(true_BIC)
## 8th model has the lowest BIC value with the appropriate error term
## The 8th model which is as follows: Rings ~ Shell + Shucked + Height + male + Whole + Viscera + Diameter
## which is the same as the stepwise selection in the beginning
##### (f) #####
# Running all subsets regression
allsub_training <- regsubsets(x = abalone_1[,c(1:7, 9:10)],
y = abalone_1[,8],
nbest = 1)
## (i) ##
summary_training <- summary(allsub_training)
summary_training$bic
# Best model has 7 variables - They are the same as the model from befor
# but strangely enough, the BIC values look different hm...
``
## (ii) ##
plot(allsub_training)
##### (g) #####
|
9f87f7db719a1cd897745b276342153f07f52e06
|
dbc2af76893a0b669f2d9a032980c2111bfbc4d5
|
/man/deriv_phi.Rd
|
c16dc2d28fb9e9bfa569a4d16c16f5eeafadd67f
|
[
"MIT"
] |
permissive
|
thomasblanchet/gpinter
|
e974de36c0efd4c8070fb9b8cc0311bb10c356df
|
0ce91dd088f2e066c7021b297f0ec3cecade2072
|
refs/heads/master
| 2022-11-28T11:18:10.537146
| 2022-11-22T16:22:40
| 2022-11-22T16:22:40
| 72,655,645
| 19
| 5
| null | 2017-04-19T08:25:44
| 2016-11-02T15:51:21
|
R
|
UTF-8
|
R
| false
| true
| 702
|
rd
|
deriv_phi.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution-functions.R
\name{deriv_phi}
\alias{deriv_phi}
\title{Derivative of the interpolation function from generalized Pareto
interpolation}
\usage{
deriv_phi(dist, x, ...)
}
\arguments{
\item{dist}{A \code{gpinter_dist_orig} object, as returned by
\code{tabulation_fit} or \code{share_fit}.}
\item{x}{The function evaluation point(s).}
\item{...}{Ignored.}
}
\value{
The value of the derivative of the interpolation at \code{x}.
}
\description{
This function is the first derivative of \code{phi} applied
to objects of class \code{gpinter_dist_orig}.
}
\author{
Thomas Blanchet, Juliette Fournier, Thomas Piketty
}
|
9acfa8834a8b6df05193f05c21cfd110721a4857
|
9c6d817b0ae4527ab2b14e30ff1a5cb75895e014
|
/man/plotMedianPhaseLag.Rd
|
b8bd822b02f8c4afa25decc9045ec1be8554b004
|
[
"MIT"
] |
permissive
|
mc30/wasp
|
7d11a47b9ba79e8d8c643f017e49ea3c4d71b53b
|
9cbf95b409c9d38859ce822e0d7e0e00d7854f18
|
refs/heads/master
| 2022-12-15T03:42:56.659903
| 2020-09-12T17:15:49
| 2020-09-12T17:15:49
| 106,267,620
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 672
|
rd
|
plotMedianPhaseLag.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phase.R
\name{plotMedianPhaseLag}
\alias{plotMedianPhaseLag}
\title{Plot median phase lags averaged over time}
\usage{
plotMedianPhaseLag(slideMat, perc = 0.95, ylim = c(-pi, pi),
ylab = "Median phase lag from other locations", xlab = "", ...)
}
\arguments{
\item{slideMat}{A matrix of phase angles.}
\item{perc}{Percentage envelope for quantiles around the median.}
\item{ylim}{.}
\item{ylab}{.}
\item{xlab}{.}
\item{\dots}{Additional graphical parameters.}
}
\description{
Plot median phase lags averaged over time.
}
\author{
Mikhail Churakov (\email{mikhail.churakov@pasteur.fr}).
}
|
ee8dfafb945c96d18285998687fc933429693e10
|
ccd0bdbd9df4e2fe0ad17f650d0fadb23f0ffdf5
|
/ShinyApps/Shiny_Modules/Curiosity_feature_exploration/bivariate_num_num/server.R
|
52498c413256cf094c2ca18025853848573cbc35
|
[] |
no_license
|
dzhwoo/R-Scripts
|
ada196ac20c27d9042ed56ded1b8ee3e6510fd8a
|
4c512609b0db8a4a62930f8baadeeb8d639ff5fc
|
refs/heads/master
| 2021-01-23T06:44:50.678766
| 2015-07-10T18:48:40
| 2015-07-10T18:48:40
| 38,253,097
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,072
|
r
|
server.R
|
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should re-execute automatically
# when inputs change
# 2) Its output type is a plot
data<-read.csv("C:/Users/dwoo57/Google Drive/Career/Data Mining Competitions/Kaggle/Walmart - Inventory and weather prediction/Experiments/Gamma/Train/Explanatory_Analysis/train_dept92_yoy_sales_unemployment.csv")
output$distPlot <- renderPlot({
x <- faithful[, 2] # Old Faithful Geyser data
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'skyblue', border = 'white')
fit<-lm(Weekly_Sales ~ Unemploy, data = data)
# how to do multiple plots
# C for consistency
# diagnostic 1a: Check for constant variance, variance of errors should be constant along the line
plot(Weekly_Sales ~ Unemploy, data = data)
abline(fit,col='skyblue')
# diagnostic 1b: Scatter plot of residuals
# diagnostic 2: errors should be normally distributed - as in centered and now skewed, some type of steady state
plot(fit)
# diagnostic 3: residual analysis but residual is rescaled. All values are positive.
#This checks for consistency but since before residuals can be both -ve and +ve
#diagnostic 4: leverage is a measure of how much each point influences the regression.
#useful for picking out points. Make sure no points are outside of cook's distance
# further aways from zero and more of them, means more influence
})
output$distPlotB <- renderPlot({
x <- faithful[, 2] # Old Faithful Geyser data
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'skyblue', border = 'white')
})
})
|
4a959f3ce1aafb129d063d7745c0f21cc3bf1a89
|
07621e6cbd78a4faebae40d206d986a256f3041b
|
/Train_RFmodel.R
|
5e45643e2a462b7fee7031e595831d3bff80ccab
|
[
"MIT"
] |
permissive
|
attilagk/MosaicForecast
|
37d039bf9ce20c2bfab59d4c6c211fa4b1ad1770
|
5cb8af1fe31a96d1bf67f4f27d58aa798a07d868
|
refs/heads/master
| 2020-07-07T22:08:19.355692
| 2020-01-10T20:42:19
| 2020-01-10T20:42:19
| 203,488,745
| 0
| 0
|
MIT
| 2019-08-21T02:16:58
| 2019-08-21T02:16:58
| null |
UTF-8
|
R
| false
| false
| 6,597
|
r
|
Train_RFmodel.R
|
#!/usr/bin/env Rscript
.libPaths( c( .libPaths(), "/n/data1/hms/dbmi/park/yanmei/tools/R_packages/") )
args = commandArgs(trailingOnly=TRUE)
if (length(args)!=4) {
stop("Rscript Train_RFmodel.R trainset prediction_model type_model(Phase|Refine) type_variant(SNP|INS|DEL)
Note:
The \"Phase\" model indicates the RF model trained on phasing (hap=2, hap=3, hap>3);
The \"Refine\" model indicates the RF model trained on Refined-genotypes from the multinomial logistic regression model (het, mosaic, repeat, refhom)
", call.=FALSE)
} else if (length(args)==4) {
input_file <- args[1]
prediction_model <- args[2]
type <- as.character(args[3])
type_variant <- as.character(args[4])
}
library(caret)
library(e1071)
set.seed(123)
my_chrXY <- function(x){
!(strsplit(x,"~")[[1]][2]=="X"||strsplit(x,"~")[[1]][2]=="Y")
}
if (type=="Phase") {
#head demo/trainset
#id dp_p conflict_num mappability type length GCcontent ref_softclip alt_softclip querypos_p leftpos_p seqpos_p mapq_p baseq_p baseq_t ref_baseq1b_p ref_baseq1b_t alt_baseq1b_p alt_baseq1b_t sb_p context major_mismatches_mean minor_mismatches_mean mismatches_p AF dp mosaic_likelihood het_likelihood refhom_likelihood althom_likelihood mapq_difference sb_read12_p dp_diff phase validation pc1 pc2 pc3 pc4 phase_model_corrected
#1465~2~213242167~T~C 0.281242330831645 0 0.625 SNP 0 0.428571428571429 0.0150375939849624 0.00826446280991736 0.809467316642184 0.845437840198746 0.529485771832939 1 1.10459623063158e-05 4.39561488489149 8.75803415232249e-05 3.92264997745045 0.193506568120142 0.193506568120142 0.613465093083099 TAG 0.00370927318295739 0.0115151515151515 3.61059951117257e-20 0.476377952755905 254 0.0980449728144787 0.901955027185521 0 0 0 0.801304551054221 11.2142857142857 hap=2 het 1.06829805132481 -3.94107582807268 -1.47931744929006 -2.99768009916148 het
input <- read.delim(input_file, header=TRUE)
input <- input[apply(input,1,my_chrXY),]
input$mapq_p[is.na(input$mapq_p)]<-1
all_train <- input
all_train <- subset(input, phase != "notphased")
all_train$phase <- as.factor(as.character(all_train$phase))
all_train <-all_train[!is.na(all_train$mosaic_likelihood),]
#all_train.2 <- subset(all_train, select=-c(althom_likelihood, id, validation, dp_p, pc1, pc2, pc3, pc4, phase))
#all_train.2 <- subset(all_train, select=c(querypos_p,leftpos_p,seqpos_p,mapq_p,baseq_p,baseq_t,ref_baseq1b_p,ref_baseq1b_t,alt_baseq1b_p,alt_baseq1b_t,sb_p,context,GCcontent,major_mismatches_mean,minor_mismatches_mean,mismatches_p,AF,dp,mapq_difference,sb_read12_p,dp_diff,mosaic_likelihood,het_likelihood,refhom_likelihood,phasing))
if (type_variant=="SNP"){
all_train.2 <- subset(all_train, select=c(querypos_p,leftpos_p,seqpos_p,mapq_p,baseq_p,baseq_t,ref_baseq1b_p,ref_baseq1b_t,alt_baseq1b_p,alt_baseq1b_t,sb_p,context,major_mismatches_mean,minor_mismatches_mean,mismatches_p,AF,dp,mapq_difference,sb_read12_p,dp_diff,mosaic_likelihood,het_likelihood,refhom_likelihood,phase,conflict_num,mappability, ref_softclip, alt_softclip, indel_proportion_SNPonly, alt2_proportion_SNPonly))
}else if (type_variant=="INS"||type_variant=="DEL"){
all_train.2 <- subset(all_train, select=c(querypos_p,leftpos_p,seqpos_p,mapq_p,baseq_p,baseq_t,ref_baseq1b_p,ref_baseq1b_t,alt_baseq1b_p,alt_baseq1b_t,sb_p,GCcontent,major_mismatches_mean,minor_mismatches_mean,mismatches_p,AF,dp,mapq_difference,sb_read12_p,dp_diff,mosaic_likelihood,het_likelihood,refhom_likelihood,phase,conflict_num,mappability,length,ref_softclip,alt_softclip))
}
control <- trainControl(method="repeatedcv", number=10, repeats=3, search="grid")
tunegrid <- expand.grid(.mtry=30)
metric <- "Accuracy"
rf_gridsearch <- train(phase ~., data=all_train.2, method="rf", metric=metric,tuneGrid=tunegrid, trControl=control,na.action=na.exclude)
saveRDS(rf_gridsearch,file=prediction_model)
#input$prediction_phasing <- predict(rf_gridsearch, input)
#write.table(input, "test.prediction",sep="\t",quote=FALSE,row.names=FALSE, col.names=TRUE)
} else if (type=="Refine"){
input <- read.delim(input_file, header=TRUE)
input <- input[apply(input,1,my_chrXY),]
input$mapq_p[is.na(input$mapq_p)]<-1
all_train <- input
all_train <- subset(input, phase != "notphased")
all_train$phase <- as.factor(as.character(all_train$phase))
all_train <-all_train[!is.na(all_train$mosaic_likelihood),]
#if(sum(all_train$MAF==".")>0){
# all_train$MAF<-0
#}
#all_train$MAF[is.na(all_train$MAF)]<-0
if (type_variant=="SNP"){
all_train.2 <- subset(all_train, select=c(querypos_p,leftpos_p,seqpos_p,mapq_p,baseq_p,baseq_t,ref_baseq1b_p,ref_baseq1b_t,alt_baseq1b_p,alt_baseq1b_t,sb_p,context,major_mismatches_mean,minor_mismatches_mean,mismatches_p,AF,dp,mapq_difference,sb_read12_p,dp_diff,mosaic_likelihood,het_likelihood,refhom_likelihood,phase_model_corrected,conflict_num,mappability, ref_softclip, alt_softclip, indel_proportion_SNPonly, alt2_proportion_SNPonly))
}else if (type_variant=="INS" || type_variant=="DEL"){
all_train.2 <- subset(all_train, select=c(querypos_p,leftpos_p,seqpos_p,mapq_p,baseq_p,baseq_t,ref_baseq1b_p,ref_baseq1b_t,alt_baseq1b_p,alt_baseq1b_t,sb_p,GCcontent,major_mismatches_mean,minor_mismatches_mean,mismatches_p,AF,dp,mapq_difference,sb_read12_p,dp_diff,mosaic_likelihood,het_likelihood,refhom_likelihood,phase_model_corrected,conflict_num,mappability,length,ref_softclip,alt_softclip))
}
#all_train.2 <- subset(all_train, select=c(querypos_p,leftpos_p,seqpos_p,mapq_p,baseq_p,baseq_t,ref_baseq1b_p,ref_baseq1b_t,alt_baseq1b_p,alt_baseq1b_t,sb_p,context,GCcontent,major_mismatches_mean,minor_mismatches_mean,mismatches_p,AF,dp,mapq_difference,sb_read12_p,dp_diff,mosaic_likelihood,het_likelihood,refhom_likelihood,phase_corrected,MAF,repeats,ECNT,HCNT))
all_train.2$sb_p[all_train.2$sb_p=="Inf"]<- 100
all_train.2$sb_read12_p[all_train.2$sb_read12_p=="Inf"]<- 100
control <- trainControl(method="repeatedcv", number=10, repeats=3, search="grid")
tunegrid <- expand.grid(.mtry=30)
metric <- "Accuracy"
rf_gridsearch <- train(phase_model_corrected ~., data=all_train.2, method="rf", metric=metric,tuneGrid=tunegrid, trControl=control,na.action=na.exclude)
saveRDS(rf_gridsearch,file=prediction_model)
#write.table(input, "test.prediction",sep="\t",quote=FALSE,row.names=FALSE, col.names=TRUE)
}
|
19f72a339a194a72ae2af3e4e565a70f820b1656
|
a116eb289277d5a2f86f87f89f580deb6686130a
|
/R/IO-methods.R
|
50540434194776387982670f3df37d0180f9c67f
|
[] |
no_license
|
zachcp/MibigDomainData
|
770c01a02145b1854eee1e7fe541206a5382ec86
|
3263e4657a0cd8614416d4e2f2026985ac0fb2ba
|
refs/heads/master
| 2020-04-17T23:14:37.287750
| 2016-08-28T23:02:19
| 2016-08-28T23:02:19
| 66,304,693
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,937
|
r
|
IO-methods.R
|
#' Load Blast Tabular Outputl Files.
#'
#' Loading blast data into R and returning a \code{\link{[data.table] data.table}} and
#' creating an index on the indexcolumns
#' keys on the QueryI and SubjectID
#'
#' @param blastfile. Required. Path location of a blastfile.
#' @param indexcols. Optional. List of columnvalues to set the index on.
#' @importFrom data.table fread
#' @importFrom data.table setkeyv
#'
#' @export
load_blast <- function(blastfile, indexcols = c("QueryID")) {
column_names <- c("QueryID", "SubjectID", "Perc.Ident",
"Alignment.Length", "Mismatches", "Gap.Openings", "Q.start", "Q.end",
"S.start", "S.end", "E", "Bits")
# check index columns
for (ival in indexcols) {
if (!ival %in% column_names) {
stop(paste("bad values in the indexcols. only valid column names can be used:", paste(column_names, collapse = " ")))
}
}
dt <- fread(input=blastfile, header=FALSE, col.names = column_names)
setkeyv(dt, cols = indexcols)
return(dt)
}
#' Read Useach/Vsearch UC Files
#'
#' UC files are output from Robert Edgar's USEARCH program as well as the USEARCH clone,
#' VSEARCH. The UC output file can be used as outpuf for blast-like searches as well as
#' clustering. Each of those has slighly different usages of the output columns and users are
#' encouraged to consult the documentation \url{http://drive5.com/usearch/manual/opt_uc.html}.
#' This function imports all fields except columns 6 and 7 which are dummy columns preserved in
#' the UC file for backwards compatability.
#'
#'
#' @importFrom data.table fread
#' @seealso \url{http://drive5.com/usearch/manual/opt_uc.html}
#' @export
load_uc_file <- function(ucfile) {
columns <- c("record.type", "cluster.number", "seqlength.or.clustersize", "percent.id", "strand", "compressed.alignment", "QueryID", "TargetID")
dt <- fread(ucfile, drop = c(6,7))
names(dt) <- columns
return(dt)
}
|
0e474d135e50646c427ef6ea072aadb8777a339f
|
4a2bff98ad5d6ad7ce3be718aba4137ee294a7e6
|
/ClassificarBigSmall.R
|
4c7153e3e24c06cfaaf5a91dc96a070dcc5f7007
|
[] |
no_license
|
pmeno/TG_2
|
33162ea92d968e5561091c072c7df3567435fec5
|
b476d9b164915cfb156e863077604528f7a9a41d
|
refs/heads/main
| 2023-06-11T21:04:22.298308
| 2021-07-09T22:07:28
| 2021-07-09T22:07:28
| 356,704,897
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 166
|
r
|
ClassificarBigSmall.R
|
ClassificarBigSmall <- function(dados = data.table())
{
dados <- dados[, S_B := ifelse(valorDeMercado >= median(valorDeMercado), 'B', 'S'), by = year]
dados
}
|
40d9ec48dd6f098867a6b0937e97b43d11e402ef
|
807b8cf8c5d0062836b815bee8d2b7585aa8c20f
|
/man/dbData.Rd
|
8267c407a8bef304328ec139f0d69065add9d164
|
[] |
no_license
|
heike/dbData
|
0da89e56e62f8199a3626e630a7d51dfb593c5bb
|
19fb752aacc6563e9d0d01254d81fbc6c146130c
|
refs/heads/master
| 2020-12-24T18:03:41.536904
| 2014-10-19T04:38:10
| 2014-10-19T04:38:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,237
|
rd
|
dbData.Rd
|
\name{dbData}
\alias{dbData}
\title{Function to get sufficient statistics of variables from an SQL database}
\usage{
dbData(data, vars = list(), binwidth = -1, where = "")
}
\arguments{
\item{data}{dataDB object}
\item{vars}{list of variable names}
\item{binwidth}{vector of bin sizes for each variable. -1
for minimal binwidth}
\item{where}{character string with conditional statement
for SQL query}
}
\description{
Function to get sufficient statistics of variables from
an SQL database
}
\examples{
connect <- dbConnect(dbDriver("MySQL"), user="2009Expo",
password="R R0cks", port=3306, dbname="baseball",
host="headnode.stat.iastate.edu")
pitch <- new("dataDB", co=connect, table="Pitching")
names(pitch)
head(pitch, n=10)[,1:8]
pitch.stats <- dbData(vars=list("H", "SO"), pitch)
require(ggplot2)
qplot(H, SO, alpha=Freq, data=pitch.stats)
qplot(H, SO, fill=Freq, data=dbData(pitch, list("SO", "H"),
binwidth=c(10,50)), geom="tile")
qplot(H, SO, fill=Freq, data=dbData(pitch, list("SO", "H", "yearID"),
binwidth=c(10,50, -1)), facets=~yearID, geom="tile")
qplot(H, SO, fill=Freq, data=dbData(pitch, list("SO", "H", "yearID"),
binwidth=c(10,50, -1), where="yearID > 1990"), facets=~yearID, geom="tile")
}
|
3ff53d51211d59e4e33d3487784c023a04306f40
|
291dc3a874304b9fb1d88713f23dc6bc8bb3c3a2
|
/R/topo_correct.R
|
d6de9ccb756470bbcb51db8556e51fd689d49297
|
[] |
no_license
|
atkinsjeff/endoR
|
e4bdd392ae086ec7c8b3367d1310a1533a12f282
|
0b9c6df8c1ee688de926a0749edeefcbba125bba
|
refs/heads/master
| 2021-01-14T23:48:32.123681
| 2020-02-24T17:44:36
| 2020-02-24T17:44:36
| 242,801,972
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,227
|
r
|
topo_correct.R
|
#' Calculate rugosity and other higher level complexity metrics
#'
#' \code{center_pts} calculates the effective number of layers in a canopy.
#'
#'
#' @param df a data frame of VAI for x, z bins from
#'
#' @keywords enl
#' @return the effective number of layers
#' @export
#' @examples
#' # Calculates the effective number of layers
#' calc_enl(pcl_vai)
#'
#'
topo_correct<-function(scan, resolution = 2, plane = FALSE){
las<-LAS(scan[,1:3])
crs(las)<-"+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +a=6371007 +b=6371007 +units=m +no_defs"
# r <- raster(xmn=-200, xmx=200, ymn=-200, ymx=200, resolution = resolution)
r <- raster(xmn=floor(min(las@data$X-resolution)), xmx=ceiling(max(las@data$X+resolution)), ymn=floor(min(las@data$Y-resolution)), ymx=ceiling(max(las@data$Y+resolution)), resolution = resolution)
topo<-grid_metrics(las, quantile(Z, 0.01), r)
plot(topo, col = viridis(250))
crs(topo)<-"+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +a=6371007 +b=6371007 +units=m +no_defs"
slope<-terrain(topo, opt = "slope", unit = "degrees", neighbors = 8)
plot(slope)
topo[slope>40]<-NA
setMinMax(topo)
topo.df<-as.data.frame(rasterToPoints(topo))
colnames(topo.df)<-c("X","Y","Z")
ws <- seq(3,12, 3)
th <- seq(0.1, 1.5, length.out = length(ws))
topo.las<-LAS(topo.df)
crs(topo.las)<-"+proj=eqc +lat_ts=0 +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +a=6371007 +b=6371007 +units=m +no_defs"
# topo.las@data$Classification<-2
ground<-lasground(topo.las, pmf(ws, th), last_returns = FALSE)
# plot(ground, color = "Classification")
topo.las.r<-grid_terrain(ground, res = resolution, knnidw(k = 21))
plot(topo.las.r)
las<- las - topo.las.r
return(las)
if(plane == TRUE) {
topo_pts<-as.data.frame(rasterToPoints(topo))
colnames(topo_pts)[3]<-"z"
topo_pts$r<-sqrt(topo_pts$x^2 + topo_pts$y^2)
# ggplot(topo_pts, aes(x, y, fill = z)) + geom_raster() + scale_fill_viridis()
plane<-rlm(z~x+y,
data = na.omit(topo_pts),
weights = 1/r,
scale.est = "Huber")
topo_pts$z_pred<-predict(plane,new.data = topo_pts)
}
}
|
eaca6fa013f38b3333dd801f79b038778c686b09
|
1ea23f3dfb7617a0f4fcb995f453932a68d7619d
|
/R/mwlsr.R
|
08f6ea843e69216087a6f0bba69c75644abc3fdf
|
[] |
no_license
|
PfaffLab/mwlsr
|
d4e418ba824b264c8dd7a97ab33a8a746e452430
|
05db35454d75cb26edb69e3f5470b191c3be01f4
|
refs/heads/master
| 2020-03-17T16:21:57.095988
| 2018-06-04T23:04:47
| 2018-06-04T23:04:47
| 133,745,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 33,767
|
r
|
mwlsr.R
|
#' mwlsr
#'
#' Multiple Weighted Least Squares Regression (mwlsr). Used to fit gaussian
#' glm against multiple responses simultaneously.
#'
#' @param data Input response matrix with responses in columns
#' @param design Design matrix. See \link{model.matrix}
#' @param weights Weights matrix
#' @param scale.weights If TRUE then weights are scaled (default behavior)
#' @param data.err Additional per-response-value uncertainty that should be
#' considered in the final sum of squared residual. Useful if your response
#' values have some knowm measurement uncertainty that you'd like to
#' have considered in the models.
#' @param coef.method Method used to compute coefficients. This setting is
#' passed to \link{mols.coefs} or \link{wls.coefs}
#' @param coef.tol Tolerance setting for svd based coefficient calculation.
#' Passed to \link{mols.coefs} or \link{wls.coefs}
#' @param coefs.only Stop at the coefficient calculation and return only
#' the coefficients of the models.
#'
#' @return List with the following elements:
#' \item{coefficients}{Model coefficients}
#' \item{residuals}{Residuals of the fit}
#' \item{fitted.values}{Fitted values. Same dimension as the input response matrix.}
#' \item{deviance}{Sum of squared residuals}
#' \item{dispersion}{deviance / df.residual}
#' \item{null.deviance}{Sum of squared residuals for the NULL model (intercept only)}
#' \item{weights}{Weights matrix}
#' \item{prior.weights}{Weights matrix pre-scaling}
#' \item{weighted}{TRUE if fit was a weighted fit}
#' \item{df.residual}{Degrees of freedom of the model. \code{nrows(data) - ncol(design)}}
#' \item{df.null}{Degrees of freedom of the null model. \code{nrows(data) - 1}}
#' \item{y}{Input data matrix}
#' \item{y.err}{Input \code{data.err} matrix}
#' \item{X}{Design matrix}
#' \item{x}{If design matrix was based on factor levels then this will be a
#' factor vector that matches the original grouping vector}
#' \item{intercept}{TRUE if the fit has an Intercept}
#' \item{coef.hat}{If the fit has an Intercept then this is a matrix of
#' modified coefficients that represent the per-group averages. This is
#' calculated by adding the Intercept coefficients to each of the other
#' coefficients. This only makes sense if your design was based on a single
#' multi-level factor}
#'
#' @export
#' @examples
#' # Using the iris data.
#' design <- model.matrix(~Species, data=iris)
#' fit <- mwlsr(iris[, 1:4], design)
#' # test data association with the Species factor
#' result <- mwlsr.Ftest(fit)
#' print(table(result$F.padj < 0.05))
mwlsr <- function(data, design, weights=NULL, scale.weights=TRUE, data.err=NULL,
coef.method=c("chol", "ginv", "svd", "qr"), coef.tol=1e-7, coefs.only=FALSE) {
# check parameters
if(!inherits(data, "matrix")) {
if(inherits(data, "data.frame")) {
data <- as.matrix(data)
} else {
data <- matrix(data)
colnames(data) <- "response"
}
}
if(!missing(weights)) {
if(!inherits(weights, "matrix")) {
weights <- matrix(weights)
}
}
if(!missing(data.err)) {
if(!inherits(data.err, "matrix")) {
data.err <- matrix(data.err)
}
}
if(nrow(data) != nrow(design)) {
stop("Design and data do not match")
}
# initalize some variables
coef.method <- match.arg(coef.method)
n <- nrow(design)
p <- ncol(design)
num.fits <- ncol(data)
df.null <- n
df.residual <- n-p
use.weights <- FALSE
intercept <- FALSE
coef.names <- colnames(design)
# check for intercept in the design
if(grepl("intercept", coef.names[1], ignore.case=TRUE)) {
intercept <- TRUE
df.null <- n-1
}
# user provided prior variances per response value to propagate into
# the model's residuals
if(missing(data.err)) {
data.err <- data*0
}
# weights?
if(is.null(weights)) {
# no weights - make a weights matrix of 1's
weights0 <- weights <- matrix(1, ncol=ncol(data), nrow=nrow(data))
} else {
if(!all.equal(dim(data), dim(weights))) {
stop("Weights matrix doesn't match data dimension")
}
weights0 <- weights
if(scale.weights) {
# normalize by mean
weights <- sweep(weights, 2, colMeans(weights), "/")
}
use.weights <- TRUE
}
if(use.weights) {
# calculate weighted coefficients.
rres <- lapply(1:num.fits, function(i) {
y <- data[, i]
w <- weights[, i]
# call wls.coefs
b <- drop(wls.coefs(design, y, weights=w, method=coef.method, tol=coef.tol))
return(b)
})
coefficients <- do.call(cbind, rres)
colnames(coefficients) <- colnames(data)
rownames(coefficients) <- colnames(design)
} else {
# no weights so we can calc the coefficients in one shot
coefficients <- mols.coefs(design, data, method=coef.method, tol=coef.tol)
}
if(coefs.only) {
return(coefficients)
}
coefsHat <- NULL
if(intercept && ncol(design) > 1) {
# if we have an intercept we can create a version of the coefficients that
# represents the condition means rather than the intercept + relative means
coefsHat <- coefficients
for(i in 2:nrow(coefsHat)) {
# add intercept
coefsHat[i, ] <- coefsHat[i, ]+coefsHat[1, ]
}
}
# calculate "fit" and residuals
fitted.values <- design %*% coefficients
residuals <- data-fitted.values
# calculate null deviance with prior variances
if(intercept) {
# null deviance is relative to weighted mean
null.deviance <- colSums(weights * sweep(data, 2, colSums(weights*data)/colSums(weights), "-")^2)
} else {
# no intercept so the null deviance is relative to 0
null.deviance <- colSums(weights * data^2)
}
# not entirely sure if the prior errors need to be weighted...but it kinda makes sense
null.deviance.err <- colSums(weights * data.err)
null.deviance <- null.deviance + null.deviance.err
# calculate residual deviance with prior variances - prior variances can't be
# explained by the model so they get added in on top of the residuals
deviance <- colSums(weights * residuals^2)
deviance.err <- null.deviance.err
deviance <- deviance + deviance.err
# final dispersion per fit
wfactor <- df.residual*colSums(weights)/n
dispersion <- deviance/wfactor
# annotate all of the matrices and vectors
dimnames(residuals) <- dimnames(fitted.values) <- dimnames(data)
rownames(coefficients) <- colnames(design)
colnames(coefficients) <- colnames(data)
names(dispersion) <- names(deviance) <- names(null.deviance) <- colnames(data)
# extract factor from design matrix if it's that kinda model
x <- NULL
if(ncol(design) > 1) {
try(x <- mwlsr.design2factor(design))
}
# build the output list so it kinda resembles the output list of glm
lout <- list(
coefficients=coefficients,
residuals=residuals,
fitted.values=fitted.values,
deviance=deviance,
dispersion=dispersion,
null.deviance=null.deviance,
weights=weights,
prior.weights=weights0,
weighted=use.weights,
df.residual=df.residual,
df.null=df.null,
y=data, y.err=data.err, X=design, x=x,
intercept=intercept)
if(intercept) {
lout$coef.hat <- coefsHat
}
class(lout) <- c("mwlsr", class(lout))
return(lout)
}
#' print.mwlsr
#'
#' Override of generic \link{print} method for mwlsr objects.
#'
#' @export
print.mwlsr <- function(x, ...) {
cat("\n")
cat("Multiple LS regression result (list)\n")
cat("\n")
cat("Members:\n")
print(names(x))
}
#' mwlsr.rSquared
#'
#' Calculate r-squared for each model in fit
#'
#' @param fit Result of mwlsr fit
#' @return Input mwlsr object (list) with \code{rquared} element attached
#' @export
#'
mwlsr.rSquared <- function(fit) {
# calculate r^2 for the fit and attach it to the object
rsq <- 1-fit$deviance/fit$null.deviance
fit$rsquared <- rsq
return(fit)
}
#' mwlsr.Fstatistic
#'
#' Calculate F-statistic for each model.
#'
#' @param fit mwlsr fit object
#' @return Input mwlsr object with \code{F} and \code{F.pval} elements
#' attached
#' @export
#'
mwlsr.Fstatistic <- function(fit) {
if(is.null(fit$rsquared)) {
fit <- mwlsr.rSquared(fit)
}
dff <- nrow(fit$X)-1
fit$F <- with(fit, (rsquared*df.residual)/((1-rsquared)*(dff-df.residual)))
fit$F.pval <- with(fit, pf(F, dff-df.residual, df.residual, lower.tail=FALSE))
return(fit)
}
#' mwlsr.Ftest
#'
#' Calculates F-statistic and p-values for all models in the fit. Returns
#' a table of the results. This is only sensible if your design included
#' an intercept.
#'
#' @param fit mwlsr fit object
#' @return data.frame with F-test results
#' @export
mwlsr.Ftest <- function(fit) {
if(is.null(fit$F)) {
fit <- mwlsr.Fstatistic(fit)
}
vid <- colnames(fit$coefficients)
if(is.null(vid)) {
vid <- 1:ncol(fit$coefficients)
}
df <- data.frame(varid=vid, df.null=fit$df.null, df=fit$df.residual,
null.deviance=fit$null.deviance, deviance=fit$deviance, change=fit$null.deviance-fit$deviance,
r2=fit$rsquared, F=fit$F, F.pval=fit$F.pval, F.padj=p.adjust(fit$F.pval, method="BH"))
rownames(df) <- vid
return(df)
}
#' mwlsr.overallFstatistic
#'
#' Calculates F-statistic and p-value for all models. In this test we
#' sum all of the residual deviance and compare it to the total sum of
#' null deviance.
#'
#' @param fit mwlsr fit object
#' @return Vector containing the results
#' @export
mwlsr.overallFstatistic <- function(fit) {
dd <- sum(fit$deviance)
dd0 <- sum(fit$null.deviance)
F <- ((dd0-dd)/(fit$df.null-fit$df.residual))/(dd/fit$df.residual)
pval <- pf(F, fit$df.null-fit$df.residual, fit$df.residual, lower.tail=FALSE)
cout <- c(fit$df.null, fit$df.residual, dd0, dd, dd0-dd, F, pval)
names(cout) <- c("df.null", "df.residual", "null.deviance", "deviance", "change", "F", "pval")
return(cout)
}
#' mwlsr.coefStats
#'
#' Calculate coefficient standard errors, t-values and p-values. Results
#' are appended to the input mwlsr object.
#'
#' @param fit mwlsr fit object
#' @return mwlsr fit object with coefficient statistics results appended
#' @export
mwlsr.coefStats <- function(fit) {
# if weights then we have to calculate all of the weighted pseudo-inverse vectors
if(fit$weighted) {
n <- ncol(fit$coefficients)
sccm <- sapply(1:n, function(i) {
xhat <- t(fit$X) %*% diag(fit$weights[, i]) %*% fit$X
return(diag(chol2inv(chol(xhat))))
})
fit$sccm <- sccm
fit$coef.stderr <- sqrt(sccm %*% diag(fit$dispersion))
fit$coef.tvals <- fit$coefficients/fit$coef.stderr
fit$coef.pvals <- pt(abs(fit$coef.tvals), fit$df.residual, lower.tail=FALSE)*2
} else {
sccm <- diag(chol2inv(chol(crossprod(fit$X))))
fit$sccm <- sccm
fit$coef.stderr <- sqrt(sccm %*% matrix(fit$dispersion, 1))
fit$coef.tvals <- fit$coefficients / fit$coef.stderr
fit$coef.pvals <- pt(abs(fit$coef.tvals), fit$df.residual, lower.tail=FALSE)*2
}
dimnames(fit$coef.stderr) <- dimnames(fit$coef.tvals) <- dimnames(fit$coef.pvals) <- dimnames(fit$coefficients)
return(fit)
}
#' mwlsr.groupStats
#'
#' If your design was based on a single multi-level factor then you can
#' use this function to calculate per-group deviance, variance (dispersion),
#' and standard error. Can be handy if you need to calculate group
#' level variances.
#'
#' @param fit mwlsr fit object
#' @return mwlsr fit object with group-level statistics appended
#' @importFrom MASS ginv
#' @export
mwlsr.groupStats <- function(fit) {
wresid <- fit$weights * fit$residuals^2
werr <- fit$weights * fit$y.err
# sum of weighted squared residuals per group plus the sum of the weighted
# errors per group
group.deviance <- (t(fit$X) %*% wresid) + (t(fit$X) %*% werr)
group.rel <- (t(fit$X) %*% wresid)/group.deviance
# make group dispersions
n <- colSums(fit$X)-1
if(any(n==0)) {
n[n==0] <- 1
}
group.dispersion <- ginv(diag(n)) %*% group.deviance
n <- colSums(fit$X)
group.stderr <- ginv(diag(n)) %*% group.dispersion
rownames(group.rel) <- rownames(group.stderr) <- rownames(group.deviance) <- rownames(group.dispersion) <- colnames(fit$X)
fit$group.deviance <- group.deviance
fit$group.dispersion <- group.dispersion
fit$group.stderr <- group.stderr
fit$group.rel <- group.rel
return(fit)
}
#' mwlsr.contrastModelMatrix
#'
#' Transforms a design matrix to a contrast matrix. Instead of calculating
#' the contrast result directly, as in \link{mwlsr.contrastTest},
#' this method would be used to create "reduced" model with a contrast
#' matrix and then you can evaluate significant associations with and LRT.
#' The code for this function is based on code within edgeR::glmLRT.
#'
#' @param design Full design matrix
#' @param contrast Single column contrast matrix indicating which levels
#' of the full design to contrast against one other.
#' @return New, reduced, design matrix
#'
#' @export
mwlsr.contrastModelMatrix <- function(design, contrast) {
##
# NOTE: the bulk, if not all, of this code is from edgeR::glmLRT
contrast0 <- contrast
contrast <- as.matrix(contrast)
if(nrow(contrast) != ncol(design)) stop("contrast does not match design matrix dimension")
coef.names <- colnames(design)
nlibs <- ncol(design)
qrc <- qr(contrast)
ncontrasts <- qrc$rank
if(ncontrasts==0) stop("contrasts are all zero")
coef <- 1:ncontrasts
if(ncontrasts > 1) {
coef.name <- paste("LR test on", ncontrasts, "degrees of freedom")
} else {
contrast <- drop(contrast)
i <- contrast != 0
coef.name <- paste(paste(contrast[i], coef.names[i], sep="*"), collapse=" ")
}
Dvec <- rep.int(1, nlibs)
Dvec[coef] <- diag(qrc$qr)[coef]
Q <- qr.Q(qrc, complete=TRUE, Dvec=Dvec)
design <- design %*% Q
design0 <- design[, -coef]
colnames(design0) <- paste("coef", 1:ncol(design0), sep="")
attr(design0, "contrast") <- contrast0
attr(design0, "coef.name") <- coef.name
return(design0)
}
#' mwlsr.contrastTest
#'
#' Contrast one or more levels of the design factors against one or more
#' other levels. This kind of test uses the full model's dispersions
#' as a basis for the comparison of the means of two conditions. Useful for
#' comparing levels of a single multi-level factor, such as different
#' groups in an RNA-Seq experiment, to one another to check for statistical
#' difference in means.
#'
#' @param fit mwlsr fit object
#' @param contrast Single-column contrast matrix
#' @param coef Coefficient to test (for intercept models)
#' @param ncomps Sidak post-hoc correction factor. Default behavior is
#' no correction.
#' @param squeeze.var Employ limma's 'squeeze.var' method which not only
#' adjusts the model's dispersion but also the residual degrees of freedom
#' @return data.frame with results of the test
#' @importFrom limma squeezeVar
#'
#' @export
mwlsr.contrastTest <- function(fit, contrast=NULL, coef=NULL, ncomps=NULL,
squeeze.var=FALSE) {
if(is.null(coef) & is.null(contrast)) {
stop("Must specify either a coefficient or a contrast")
}
# figure out number of comparisons within model for Sidak correction
if(is.null(ncomps)) {
nconds <- length(levels(fit$x))
ncomps <- nconds*(nconds-1)/2
}
if(squeeze.var) {
if(!require(limma)) stop("Missing package 'limma'. Cannot perform 'squeeze.var' without it.")
out <- limma::squeezeVar(fit$dispersion, fit$df.residual)
fit$df.prior <- fit$df.residual
fit$df.residual <- fit$df.residual + out$df.prior
fit$dispersion.prior <- fit$dispersion
fit$dispersion <- out$var.pos
}
if(!missing(coef)) {
# return single coefficient statistics
if(coef > ncol(fit$X)) {
stop("Coefficient is beyond the design dimension")
}
if(is.null(fit$coef.stderr) || is.null(fit$coef.tvals) || is.null(fit$coef.pvals)) {
message("Calculating coefficient statistics...")
fit <- mwlsr.coefStats(fit)
}
mref <- fit$coefficients[1, ]
mtarget <- mref+fit$coefficients[coef, ]
tnum <- fit$coefficients[coef, ]
tstat <- fit$coef.tvals[coef, ]
tdenom <- fit$coef.stderr[coef, ]
pval <- mwlsr.p.adjust(fit$coef.pvals[coef, ], n.comps=ncomps, method="sidak")
baseMean <- (mref+mtarget)/2
} else if(!missing(contrast)) {
# return contrast test statistics
coefs <- fit$coefficients
design <- fit$X
if(fit$intercept) {
# use adjusted coefficients if we had an intercept
coefs <- fit$coef.hat
# if it is a factor based design..
if(all(design==0 | design==1)) {
idx_fix <- which(rowSums(design) > 1)
design[idx_fix, 1] <- 0
}
}
contrast <- matrix(drop(contrast))
n <- ncol(fit$coefficients)
# numerator for t stat also the change between the
# conditions being contrasted
tnum <- drop(t(contrast) %*% coefs)
if(fit$weighted) {
# weighted - one calculation per model
tdenom <- sapply(1:n, function(i) {
wt <- fit$weights[, i]
sscm <- chol2inv(chol((t(design) %*% diag(wt) %*% design)))
rres <- t(contrast) %*% sscm %*% contrast
return(rres)
})
} else {
# no weights so we can do this more fasterer
tdenom <- drop(t(contrast) %*% chol2inv(chol(crossprod(design))) %*% contrast)
}
# finish the standard error calculation
tdenom <- sqrt(fit$dispersion * tdenom)
# tstaistic and pvalue
tstat <- tnum/tdenom
pval <- pt(abs(tstat), fit$df.residual, lower.tail=FALSE)*2
pval <- mwlsr.p.adjust(pval, n.comps=ncomps, method="sidak")
# make the group means
tmp <- contrast > 0
ctmp <- contrast
ctmp[tmp] <- 0
mref <- drop(t(abs(ctmp)) %*% coefs)
tmp <- contrast < 0
ctmp <- contrast
ctmp[tmp] <- 0
mtarget <- drop(t(ctmp) %*% coefs)
baseMean <- drop(t(abs(contrast)) %*% coefs)/2
}
# output table
dres <- data.frame(row.names=colnames(fit$coefficients), id=colnames(fit$coefficients),
baseMean=baseMean, condA=mref, condB=mtarget, change=tnum,
stderr=tdenom, tstat=tstat, pval=pval, padj=p.adjust(pval, method="BH"), stringsAsFactors=FALSE)
dres$status <- "n.s."
tmp <- sapply(dres$padj, pval2stars)
mm <- tmp != "N.S."
if(any(mm)) {
mhat <- mm & dres$change < 0
if(any(mhat)) {
dres$status[mhat] <- paste("sig.neg", tmp[mhat], sep="")
}
mhat <- mm & dres$change > 0
if(any(mhat)) {
dres$status[mhat] <- paste("sig.pos", tmp[mhat], sep="")
}
}
if(!missing(coef)) {
names(dres)[3:4] <- colnames(fit$X)[c(1, coef)]
}
# lout <- list(result=dres)
# class(lout) <- c("mwlsrContrastResult", "list")
# coming soon!
# return(newDEResult(lout))
return(dres)
}
#' mwlsr.p.adjust
#'
#' Performs multi-contrast within model p-value adjustment. This adjustment
#' is performed at each p-value and is based on the number of contrasts
#' being tested in the model.
#'
#' @param p Vector of p-values
#' @param n.comps Number of contrasts being tested within the model. Defaults
#' to the total number of possible pairwise contrasts (probably excessive)
#' @param n.levels Number of levels in the factor design (or columns of the
#' design matrix.
#' @param n.samples Required for "scheffe" method.
#' @param method Adjustment method. Sidak is the default.
#'
#' @return Adjusted p-values
#'
#' @export
mwlsr.p.adjust <- function(p, n.comps=NA, n.levels=NA, n.samples=NA, method=c("sidak", "scheffe", "bonferroni")) {
method <- match.arg(method)
if(is.na(n.comps) & is.na(n.levels)) {
stop("You must specifiy either the number of comparisons (n.comp) or the number of factor levels in the model (n.levels)")
}
if(method=="sidak" | method=="bonferroni") {
if(is.na(n.comps)) {
n.comp <- n.levels*(n.levels-1)/2
}
} else if(method=="scheffe") {
if(is.na(n.levels)) {
stop("Cannot calculate scheffe correction without total number of levels (n.levels)")
}
if(is.na(n.samples)) {
stop("Cannot calculate scheffe correction without total number of samples (n.samples)")
}
}
if(method=="scheffe") {
if(!(any(p > 1))) {
message("WARNING: Input for scheffe correction is supposed to be the LSD (t) statistic")
}
}
p0 <- switch(method,
sidak={
# this is just a transform by scaling
1 - (1 - p)^n.comps
},
scheffe={
F <- p^2/(n.levels-1)
df1 <- n.levels-1
df2 <- n.samples-n.levels
# return p-values for the F-statistic
pf(F, df1, df2, lower.tail=FALSE)
},
bonferroni={
p*n.comps
})
if(any(p0==0)) {
idx <- p0==0
# set to smallest double such that 1-x != 1
p0[idx] <- .Machine$double.neg.eps
}
return(p0)
}
#' mwlsr.tukeyHSD
#'
#' Implementation of Tukey HSD per model. This only works for intercept
#' designs.
#'
#' @param fit mwlsr fit object
#' @return list with everything in it (TODO: explain results)
#'
#' @export
mwlsr.tukeyHSD <- function(fit) {
X <- fit$X
if(!all(X==1 | X==0)) {
stop("Unsure how to apply Tukey to this design")
}
terms <- colnames(X)
if(fit$intercept) {
terms[1] <- "Intercept"
}
f <- fit$x
means <- fit$coefficients
if(fit$intercept) {
# if intercept then add the intercept to all of the other
# rows so that each row is now a term mean
for(i in 2:nrow(means)) {
means[i, ] <- means[i, ] + means[1, ]
}
}
flevels <- levels(f)
nn <- table(f)
df.residual <- fit$df.residual
MSE <- fit$dispersion
pares <- combn(1:nrow(means), 2)
center <- t(apply(pares, 2, function(x) means[x[2], ]-means[x[1], ]))
onn <- apply(pares, 2, function(x) sum(1/nn[x]))
SE <- t(sqrt(matrix(MSE/2) %*% matrix(onn, 1)))
width <- qtukey(0.95, nrow(means), df.residual) * SE
est <- center/SE
pval <- ptukey(abs(est), nrow(means), df.residual, lower.tail=FALSE)
# setup condition comparison labels
lab0 <- apply(combn(1:length(flevels), 2), 2, function(x) flevels[rev(x)])
lab <- apply(lab0, 2, function(x) paste(x, collapse="-"))
# setup variable labels
vid <- colnames(fit$coefficients)
if(is.null(vid)) {
vid <- as.character(1:ncol(fit$coefficients))
}
# setup 95% ci boundaries
lower <- center - width
upper <- center + width
# build a list of tables - one for each comparison
lres <- vector(mode="list", length=length(lab))
names(lres) <- lab
for(i in 1:length(lab)) {
df <- data.frame(id=vid, change=center[i, ], lower=lower[i, ],
upper=upper[i, ], pval=pval[i, ], stringsAsFactors=FALSE)
names(df)[2] <- paste(lab[i], "change", sep=".")
lres[[i]] <- df
}
# build a list with everything
lout <- list(results=lres, change=center, lower=lower, upper=upper, pval=pval)
return(lout)
}
#' mols.coefs
#'
#' Multiple ordinary least squares coefficients. Used interally by
#' \link{mwlsr} to compute coefficients without weights.
#'
#' @param x Design matrix
#' @param y Response matrix
#' @param method Coefficient calculation method. \code{chol} is the fastest
#' and \code{svd} is said to be the most reliable but maybe the slowest.
#' @param tol Tolerance setting for the \code{svd} method.
#' @return Matrix of fit coefficients.
#' @importFrom MASS ginv
#' @export
mols.coefs <- function(x, y, method=c("chol", "ginv", "svd", "qr"), tol=1e-7) {
method <- match.arg(method)
if(!inherits(x, "matrix")) {
stop("Expected x to be a design matrix such as the output of model.matrix")
}
if(!inherits(y, "matrix")) {
y <- matrix(y)
}
ydim <- nrow(y)
# check dimensions of things
if(nrow(x) != ydim) {
stop("response dimension doesn't match design")
}
if(method=="qr") {
coefs <- qr.solve(x, y)
} else if(method=="svd") {
# use the SVD method - the most robust! this one doesn't care
# if your design matrix isn't invertable because all that has to be
# inverted are all eigenvalues > tol. it will, however, drop
# coefficients out if they correspond to a eigenvector with
# < tol variance
s <- svd(x)
r <- max(which(s$d > tol))
v1 <- s$v[, 1:r]
sr <- diag(s$d[1:r])
u1 <- s$u[, 1:r]
coefs <- v1 %*% (ginv(sr) %*% t(u1) %*% y)
} else {
# use either the chol method (fastest of all) or the
# classic pseudoinverse with the ginv function.
# ginv failes less frequently than 'solve'
sccm <- crossprod(x)
if(method=="chol") {
tmp <- chol2inv(chol(sccm))
} else if(method=="ginv") {
tmp <- ginv(sccm)
}
coefs <- tmp %*% t(x) %*% y
}
# annotate the rows and columns
colnames(coefs) <- colnames(y)
rownames(coefs) <- colnames(x)
return(coefs)
}
#' wls.coefs
#'
#' Calculates coefficients for a single response with weights.
#'
#' @param x Design matrix
#' @param y Response vector
#' @param weights Weights vector.
#' @param method Coefficient calculation method. See \link{mols.coefs}.
#' @param tol Tolerance for \code{svd} coefficient method.
#' @return Vector of fit coefficients
#' @importFrom MASS ginv
#' @export
wls.coefs <- function(x, y, weights=NULL, method=c("chol", "ginv", "svd", "qr"), tol=1e-7) {
method <- match.arg(method)
if(!inherits(x, "matrix")) {
stop("Expected x to be a design matrix such as the output of model.matrix")
}
if(!inherits(y, "matrix")) {
y <- matrix(y)
}
if(ncol(y) > 1) {
stop("y matrix must only be a single response")
}
ydim <- nrow(y)
# check dimensions of things
if(nrow(x) != ydim) {
stop("response dimension doesn't match design")
}
# check the weights out - don't normalize them leave that up to the
# calling codes
if(!missing(weights)) {
weights <- diag(drop(weights))
if(ncol(weights) != ydim) {
stop("weight dimension doesn't match response")
}
} else {
# no weights - just make a diagonal of 1's so the below calculations
# don't have to be changed
weights <- diag(rep(1, nrow(y)))
}
# we can pre-weight x and y with the square root of the weights. this
# makes it so we can use the same x and y in all of the below
# calculations
wt <- weights^0.5
xhat <- wt %*% x
yhat <- wt %*% y
if(method=="qr") {
# use the QR method, second fastest
wt <- weights^0.5
coefs <- qr.solve(xhat, yhat)
} else if(method=="svd") {
# use the SVD method - the most robust! this one doesn't care
# if your design matrix isn't invertable because all that has to be
# inverted are all eigenvalues > tol. it will, however, drop
# coefficients out if they correspond to a eigenvector with
# < tol variance
s <- svd(xhat)
r <- max(which(s$d > tol))
v1 <- s$v[, 1:r]
sr <- diag(s$d[1:r])
u1 <- s$u[, 1:r]
coefs <- v1 %*% (ginv(sr) %*% t(u1) %*% yhat)
} else {
# use either the chol method (fastest of all) or the
# classic pseudoinverse with the ginv function.
# ginv failes less frequently than 'solve'
sccm <- crossprod(xhat)
if(method=="chol") {
tmp <- chol2inv(chol(sccm))
} else if(method=="ginv") {
tmp <- ginv(sccm)
}
coefs <- tmp %*% t(xhat) %*% yhat
}
coefs <- drop(coefs)
names(coefs) <- colnames(x)
return(coefs)
}
#
# turns a design matrix into a factor vector if the design matrix was
# based on a factor type model. it just fails otherwise.
#' mwlsr.design2factor
#'
#' Derives a single multi-level factor vector from a design matrix.
#' This would produce senseless results for regression models.
#'
#' @param X Design matrix
#' @export
mwlsr.design2factor <- function(X) {
if(!all(X==0 | X==1)) {
return(NULL)
}
n <- ncol(X)
m <- nrow(X)
fac.levels <- colnames(X)
fac <- character(m)
if(sum(X[, 1])==m) {
# intercept!
fac <- rep("level0", m)
for(i in 2:n) {
idx <- which(X[, i]==1)
fac[idx] <- fac.levels[i]
}
} else {
for(i in 1:n) {
idx <- which(X[, i]==1)
fac[idx] <- fac.levels[i]
}
}
return(factor(fac))
}
#' mwlsr.LRT
#'
#' Perform likelihood ratio test (LRT) between two models (typically a
#' full model and a reduced model). Using an F based LRT on a full
#' model compared to an intercept-only model should give the same
#' results as \link{mwlsr.Ftest}.
#'
#' @param full.m Full model mwlsr object
#' @param reduced.m Reduced model mwlsr object
#' @param test Type of test to perform. For gaussian models, which is
#' all that mwlsr can do, you should use the F-test.
#'
#' @return data.frame with results of the test for all models.
#'
#' @export
mwlsr.LRT <- function(full.m, reduced.m, test=c("F", "LRT")) {
# f is the original model, f0 is the reduced model
# make sure these are sorted
ltests <- list(full.m, reduced.m)
# resid
dfresid <- sapply(ltests, function(x) x$df.residual)
o <- order(dfresid)
full.m <- ltests[[o[1]]]
reduced.m <- ltests[[o[2]]]
varnames <- colnames(full.m$coefficients)
if(is.null(varnames)) {
varnames <- as.character(1:ncol(full.m$coefficients))
}
test <- match.arg(test)
deviance <- reduced.m$deviance - full.m$deviance
df.test <- reduced.m$df.residual - full.m$df.residual
dispersion <- full.m$deviance/full.m$df.residual
if(test=="F") {
LR <- (deviance/df.test)/dispersion
pval <- pf(LR, df.test, full.m$df.residual, lower.tail=FALSE)
} else if(test=="LRT") {
LR <- deviance/dispersion
pval <- pchisq(LR, df.test, lower.tail=FALSE)
# edgeR defines the LR as what I have as deviance in this code. their
# p-value looks like this:
# pval <- pchisq(deviance, df.test, lower.tail=FALSE)
}
padj <- p.adjust(pval, method="BH")
dout <- data.frame(variable=varnames, deviance=deviance,
df=rep(df.test, length(deviance)),
LR=LR, pval=pval, padj=padj, stringsAsFactors=FALSE)
return(dout)
}
#' mwlsr.contrastCoefficients
#'
#' Calculate the coefficients of a contrast fit.
#'
#' @param fit mwlsr fit object
#' @param contrast Single-column contrast matrix
#' @return mwlsr fit object with results appended
#'
#' @export
mwlsr.contrastCoefficients <- function(fit, contrast) {
contrast0 <- contrast
contrast <- drop(contrast)
# split contrast vector into positive and negative
pmask <- ifelse(contrast > 0, 1, 0)
nmask <- ifelse(contrast < 0, -1, 0)
coef.pos <- drop(matrix(contrast*pmask, 1) %*% fit$coefficients)
coef.neg <- drop(matrix(contrast*nmask, 1) %*% fit$coefficients)
cout <- rbind(coef.neg, coef.pos)
colnames(cout) <- colnames(fit$coefficients)
# combine names of levels
names.pos <- paste(rownames(fit$coefficients)[which(pmask != 0)], collapse=":")
names.neg <- paste(rownames(fit$coefficients)[which(nmask != 0)], collapse=":")
rownames(cout) <- c(names.neg, names.pos)
fit$contrast <- contrast0
fit$contrasts.coefs <- cout
return(fit)
}
#' mwlsr.makeContrast
#'
#' Create a contrast matrix for computing statistical difference
#' in means between levels of a design. For example if your model
#' is based on a single multi-level factor and you want to compare
#' the average values of one level verses another within the context
#' of the full model then you'd specify those levels in this function
#' and then call on \link{mwlsr.contrastTest} to perform the test.
#'
#' @param y First level or levels to contrast.
#' @param lvls Either the factor levels or a full length factor vector or
#' the design matrix from the mwlsr object or the mwlsr object itself.
#' @param x Factor level or levels to contrast against those specified in
#' \code{y}. If this is omitted then the level or levels specified in
#' \code{y} are contrasted against all other levels.
#' @return Single-column matrix specifying the contrast
#' @details Levels specified for \code{y} or \code{x} must match levels
#' in the design matrix associated with the model you plan to perform
#' the contrast test within.
#'
#' @export
mwlsr.makeContrast <- function(y, lvls, x=NULL) {
if(inherits(lvls, "mwlsr")) {
lvls <- colnames(lvls$X)
} else if(inherits(lvls, "factor")) {
lvls <- levels(factor)
} else if(inherits(lvls, "matrix")) {
# assuming this is a design matrix
lvls <- colnames(lvls)
} else if(is.null(dim(lvls)) & length(lvls) > 0) {
lvls <- levels(factor(lvls))
}
# nvalid <- lvls != make.names(lvls)
# if(any(nvalid)) {
# stop("The levels must be valid names (use make.names)")
# }
tmp <- c(y, x)
if(!all(tmp %in% lvls))
stop("one or more of the specified levels for your contrast are not in the design")
ny <- length(y)
nyidx <- sapply(y, function(a) { match(a, lvls) })
idx <- 1:length(lvls)
if(!is.null(x)) {
nx <- length(x)
nxidx <- sapply(x, function(a) { match(a, lvls) })
} else {
nx <- length(lvls)-ny
nxidx <- idx[-nyidx]
}
ci <- rep(0, length(lvls))
ci[nyidx] <- 1/ny
ci[nxidx] <- -1/nx
# build contrast string
o <- order(ci, decreasing=TRUE)
ci.tmp <- ci[o]
lvls.tmp <- lvls[o]
i <- ci.tmp != 0
sz <- paste(paste(round(ci.tmp[i], 4), lvls.tmp[i], sep="*"), collapse=" ")
ci <- matrix(ci, dimnames=list(Levels=lvls, Contrast="coefs"))
attr(ci, "contrast") <- sz
attr(ci, "levels") <- lvls
return(ci)
}
pval2stars <- function(p, max.stars=4) {
psig <- p < 0.05
rres <- ifelse(psig, "*", "N.S.")
if(any(p==0)) {
rres[p==0] <- paste(rep("*", max.stars+1), collapse="")
}
mm <- p <= 0.1
plog <- -log10(p)
if(any(mm)) {
for(i in which(mm)) {
rres[i] <- paste(rep("*", min(c(max.stars, plog[i]))), collapse="")
}
}
return(rres)
}
|
93824d8ef51806d960d48270a373ca8d9e4ec26b
|
a56d8e3fda8c1f94e96ae34a84a660856df9af75
|
/src/fun_findCommonSubjects.R
|
710ea818d6a270173e7c75bae39d9c9ea9a9180a
|
[] |
no_license
|
lchonghua/statistical_analysis
|
701a53a95b002c501d41e78fd746ce9f0c2c4c1d
|
6617c3215dd4a9a92bfc5c9b5c0e11bcbbd6385b
|
refs/heads/master
| 2020-12-02T21:00:20.431030
| 2017-07-04T18:20:55
| 2017-07-04T18:20:55
| 96,243,269
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 658
|
r
|
fun_findCommonSubjects.R
|
# this function returns the common subjects from the path lists generated in getFilePaths
# inputs are the results from getFilePaths
# for example: input1 - cd_pp_files; input2 - nd_pp_files; input3 - cd_stm_files
# return: a list of subject IDs that can be found in all three input lists
findCommonSubjects = function(pathList1, pathList2, pathList3){
list1 = sapply(strsplit(pathList1, "/"), "[", 2)
list2 = sapply(strsplit(pathList2, "/"), "[", 2)
list3 = sapply(strsplit(pathList3, "/"), "[", 2)
print("ss nd stm subject counts:")
print(c(length(list1), length(list2), length(list3)))
return(Reduce(intersect, list(list1, list2, list3)))
}
|
9bc741359f1333c8373632a5825dbacd24edec84
|
c521ef367302aebb5636c23f5b33fd6c300d8193
|
/ECG/BigData2_P2_3_HR_100-master/HR110_chulma/HR112_chulma_fileMerge_bk.R
|
7dbfb6185fed64d634ab3cf558cbba624ec3e1ad
|
[] |
no_license
|
swjo207/r-practice
|
d2ee7cc579dcaac5da9e2026d2afa478ca791cfe
|
a3477c7d61960a86175209dd73030cff18fa2c8e
|
refs/heads/master
| 2021-01-10T21:14:11.157477
| 2015-08-03T08:47:59
| 2015-08-03T08:47:59
| 38,350,375
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 975
|
r
|
HR112_chulma_fileMerge_bk.R
|
## 2011
filenames <- list.files(path = "./chulma/busan/2011")
setwd("./chulma/busan/2011")
fileMerge <- do.call("rbind", lapply(filenames, read.csv, fileEncoding="CP949"))
write.csv(fileMerge, "../../chulma_busan_2011.txt")
setwd("../../../")
## 2012
filenames <- list.files(path = "./chulma/busan/2012")
setwd("./chulma/busan/2012")
fileMerge <- do.call("rbind", lapply(filenames, read.csv, fileEncoding="CP949"))
write.csv(fileMerge, "../../chulma_busan_2012.txt")
setwd("../../../")
## 2013
filenames <- list.files(path = "./chulma/busan/2013")
setwd("./chulma/busan/2013")
fileMerge <- do.call("rbind", lapply(filenames, read.csv, fileEncoding="CP949"))
write.csv(fileMerge, "../../chulma_busan_2013.txt")
setwd("../../../")
## 2014
filenames <- list.files(path = "./chulma/busan/2014")
setwd("./chulma/busan/2014")
fileMerge <- do.call("rbind", lapply(filenames, read.csv, fileEncoding="CP949"))
write.csv(fileMerge, "../../chulma_busan_2014.txt")
setwd("../../../")
|
6c3043a0332c22abb15b1963fcbf7a69efb50ed4
|
0e9ccd33792d2199788ab197bfaa52273c89ae37
|
/man/print.Rd
|
c719fdc826de52deb4cffd26253274cdc3f3ba61
|
[] |
no_license
|
jaredhuling/bigFastlm
|
83e6dcef58d90afd12843db469b568c673ce9d33
|
cdf0f099454733e0336f72564c1efd8d6e98ef74
|
refs/heads/master
| 2021-01-19T10:36:33.839448
| 2017-07-14T15:09:38
| 2017-07-14T15:09:38
| 60,560,799
| 13
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 436
|
rd
|
print.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fast_big_lm.R
\name{print.bigLm}
\alias{print.bigLm}
\alias{print.summary.bigLm}
\title{print method for bigLm objects}
\usage{
\method{print}{bigLm}(x, ...)
\method{print}{summary.bigLm}(x, ...)
}
\arguments{
\item{x}{a "summary.bigLm" object}
\item{...}{not used}
}
\description{
print method for bigLm objects
print method for summary.bigLm objects
}
|
40447b84b41534231fe13c834b1d2c15563ab64e
|
95e4719f4a30797093131bed398f47424112314c
|
/plot2.R
|
dae4aeb8c2ccc1727be5923eb89ad1cc8c7ac90e
|
[] |
no_license
|
andreakreif/ExData_Plotting1
|
fb12539f76028f982d14083faa1eb3bf25d38ba0
|
1069a8985075bf623460c98555dfafc33b93346e
|
refs/heads/master
| 2021-01-18T07:19:52.159131
| 2015-11-08T01:27:52
| 2015-11-08T01:27:52
| 31,631,509
| 0
| 0
| null | 2015-03-04T01:34:03
| 2015-03-04T01:34:03
| null |
UTF-8
|
R
| false
| false
| 698
|
r
|
plot2.R
|
## Plot2.R
## Andrea Reif 11/7/2015
## Exploratory Data Analysis - Course Project 1
##Read in data assuming file is in current directory
fname <- "household_power_consumption.txt"
dat <- read.csv(file=fname, na.strings="?", header=TRUE, sep=";",colClasses=c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
dat <- subset(dat, Date =='1/2/2007' | Date =='2/2/2007')
dat$DateTime <- with(dat,strptime(paste(Date,Time,sep=' '),"%d/%m/%Y %H:%M:%S"))
##Create plot in png file
png(file="plot2.png")
plot(dat$DateTime,dat$Global_active_power,type="l",xlab="", ylab="Global Active Power (kilowatts)")
axis(side=1,at=dat$Time=="00:00:00")
dev.off()
|
93988f56b0b99e27f34414c473805a7d5bd7d570
|
6f924da7cf09dd3fe38502fee0641b981faf4a81
|
/man/get_single_stock.Rd
|
6d1e99ddec80a5642da4de095a7a3fb8d9a72389
|
[] |
no_license
|
muiPomeranian/private_Rpackage
|
1ce3e3c814df14a56625b894b5d3b5d989a7188a
|
07150ba8a2de331978c89a5cf61459017666b816
|
refs/heads/master
| 2022-01-24T01:34:36.665504
| 2019-05-16T20:57:04
| 2019-05-16T20:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 639
|
rd
|
get_single_stock.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_single_stock.R
\name{get_single_stock}
\alias{get_single_stock}
\title{get the single stock from source(only closing price information)}
\usage{
get_single_stock(start, end, ticker_name, source_name)
}
\value{
closestockprice
}
\description{
get the single stock from source(only closing price information)
}
\details{
this function get the <<single>> stock information from source, default true for getting close price. You can customize the source(where to get the data)
}
\examples{
stock_1 = get_single_Stock('2001-03-12','2003-04,22','AAPL','yahoo')
}
|
19729dd59c71fd72bbb832a0a26bc66035683e65
|
de787dcdd023016489eb211030c97126027baa6b
|
/Modules/centralesRiesgoServer.R
|
d646427c5af721054b86f7c3b3fb71264b1ccfc7
|
[] |
no_license
|
juanvf-dann/centralesRiesgoK2
|
63b4d8aa0001b7a0c26a2172bd0b46c92c2cb873
|
2d114b14844e34d08eaf9818ef2c6e87d81f0b73
|
refs/heads/master
| 2023-03-26T09:22:57.867287
| 2021-03-26T16:55:51
| 2021-03-26T16:55:51
| 351,853,298
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,160
|
r
|
centralesRiesgoServer.R
|
# MODULE UI ----
LHSchoices <- c("X1", "X2", "X3", "X4")
#------------------------------------------------------------------------------#
# MODULE SERVER ----
# MODULE SERVER ----
variablesServer <- function(input, output, session){
ns = session$ns
output$variable <- renderUI({
selectInput(
inputId = ns("variable"),
label = paste0("Variable ", strsplit(x = ns(""), split = "-")),
choices = c("Choose" = "", LHSchoices)
)
})
output$value <- renderUI({
numericInput(
inputId = ns('value'),
label = paste0("Value ", strsplit(x = ns(""), split = "-")),
value = NULL
)
})
output$NuevaEntidad <- renderUI({
pickerInput(
inputId = ns(NuevaEntidad),
label = "Entidad",
choices = tabla.entidades$ENTIDAD,
selected = NULL,
options = pickerOptions(
liveSearch = T,
dropdownAlignRight = F
)
)
})
}
fluidRow(
box(status = "primary",
width = "65%",
collapsible = TRUE,
solidHeader =T,
title = p(paste0("Entidad #",id_add),tags$span(" "),
actionButton(remove_id, "Remove", icon = icon("trash-alt"), class = "btn-xs", title = "Update")
)
# selectInput(paste0("clienteConsulta_NuevaEntidad_", id_add), "Entidad",
# c("Option 1", "Option 2", "Option 3")),
flowLayout(
numericInput(paste0("clienteConsulta_CupoAprobado_", id_add),"Cupo aprobado",value = NA, min=0),
numericInput(paste0("clienteConsulta_SaldoActual_", id_add),"Saldo actual",value = NA, min=0),
numericInput(paste0("clienteConsulta_score_", id_add),"Score",value = NA,width = "50%", min=0,max=1000),
numericInput(paste0("clienteConsulta_valorGarantias_", id_add),"valor garantías",value = NA, min=0),
numericInput(paste0("clienteConsulta_SaldoDann_", id_add),"Saldo en Dann Regional",value = NA, min=0)
),
radioGroupButtons(
inputId = paste0("clienteConsulta_CCATII_",id_add),
label = "Calificacón Trimestre II",
choices = c("AA","A" ,"BB","B", "CC","C", "INC"),
status = "primary",
justified = T,
checkIcon = list(
yes = icon("ok", lib = "glyphicon"))
),
radioGroupButtons(
inputId = paste0("clienteConsulta_CCATI_",id_add),
label = "Calificacón Trimestre I",
choices = c("AA","A" ,"BB","B", "CC","C", "INC"),
status = "primary",
justified = T,
checkIcon = list(
yes = icon("ok", lib = "glyphicon"))
),
radioGroupButtons(
inputId = paste0("clienteConsulta_CCAActual_",id_add),
label = "Calificacón Actual",
choices = c("AA","A" ,"BB","B", "CC","C", "INC"),
status = "primary",
justified = T,
checkIcon = list(
yes = icon("ok",lib = "glyphicon")
)
),
flowLayout(
div(
strong(p("Castigado como deudor")),
div(style="text-align:center;",
prettyToggle(
inputId = paste0("clienteConsulta_KDeudor_", id_add),
label_on = "Sí",
icon_on = icon("exclamation-triangle"),
status_on = "danger",
status_off = "success",
label_off = "No",
icon_off = icon("check"),
shape = c("round"),
bigger = T
)
)
)
,
div(
strong(p("Castigado como codeudor")),
div(style="text-align:center;",
prettyToggle(
inputId = paste0("clienteConsulta_KCodeudor_", id_add),
label_on = "Sí",
icon_on = icon("exclamation-triangle"),
status_on = "danger",
status_off = "success",
label_off = "No",
icon_off = icon("check"),
shape = c("round"),
bigger = T
)
)
)
)
#
# textInput(paste0("TXT_",id_add),"Texto" ),
# textInput(paste0("saldo_",id_add),"Saldo" ),
)
)
|
17332fb26126c33c7f96421390cc9e64d386902f
|
3d24b0a5e379b8a5a7bde1bba3780f2ee354de8e
|
/ui.R
|
6369200aa6343081d64abd26e1adfa4fa0c60298
|
[
"MIT"
] |
permissive
|
rivas-lab/gbe-power-app
|
702fdf73a9e379a3c77b33ea461a41f07880ae0e
|
3fb68d82a2d536523094c7fbf27256c9d20b0698
|
refs/heads/master
| 2021-03-27T11:58:34.611238
| 2018-03-24T04:49:10
| 2018-03-24T04:49:10
| 97,519,262
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,154
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
genes = as.character(read.table("data/shiny_genes.tsv",
header=FALSE)$V1)
shinyUI(
bootstrapPage(
tabPanel("PTV Power",
sidebarLayout(
sidebarPanel(
selectInput("gene", "Gene", choices=genes, selectize=TRUE, selected="PCSK9"),
sliderInput("pD", "Disease prevalence", value=0.1, min=0, max=1, step=0.01),
numericInput("RRAa", "Heterozygous relative risk", value=2, min=1),
numericInput("nCase", "Number of cases", value=25000, min=0),
numericInput("nControl", "Number of controls", value=25000, min=0),
numericInput("alpha", "Type I error rate", value=2e-6, min=0, max=1),
checkboxInput("unselected", label = "Unselected controls", value = FALSE)
),
mainPanel(
#h3("PTV Association Power", style = "font-size: 32px;"),
#HTML("<p>Details"),
tabsetPanel(
tabPanel("Plot", plotOutput("plot.gene")),
tabPanel("Table", tableOutput("table.gene"))
#tabPanel("Design Summary Table", tableOutput("table.summary"))
)
)
)
)
)
)
|
606a5b9fc57f99f89f04a2b22d9a92a710f69dac
|
1230124bdac62ce04a7f62d43adca3452881edb1
|
/examples/whole_blood/genrerate_cor_medecom_and_refernce_datasets.R
|
4fcf94731b0235bc27519f4f243855b348d1036c
|
[] |
no_license
|
lutsik/DecompPipeline
|
732639b94cf5766f28489ee759a79cbd7a3c7789
|
e38e3311391d8afc4fe035cfdf71760e5390e001
|
refs/heads/master
| 2020-09-16T00:22:46.675862
| 2019-10-11T13:57:26
| 2019-10-11T13:57:26
| 89,242,755
| 1
| 1
| null | 2019-04-10T14:58:46
| 2017-04-24T13:22:28
|
R
|
UTF-8
|
R
| false
| false
| 1,325
|
r
|
genrerate_cor_medecom_and_refernce_datasets.R
|
suppressPackageStartupMessages(library(RnBeads))
library(MeDeCom)
library(pheatmap)
########## working directory /sctrach/divanshu/Correlation
filteredrnb.set <- readRDS("./filteredrnb.set.rds")
md.res <- readRDS("./medecomoutk_5_15.rds")
a <- md.res@outputs[[1]]
Tmed <- a$T
meth.data <- meth(filteredrnb.set,row.names = TRUE)
sds<-apply(meth.data, 1, sd)
sortedsdsrownumb <-order(sds, decreasing=TRUE)
selectedmeth.data <- meth.data[sortedsdsrownumb[1:25000],]
rnames <- rownames(selectedmeth.data)
generatedrnb.set <- readRDS("./completehealtyset.rds")
generatedmeth.data <- meth(generatedrnb.set,row.names = TRUE)
selectedfromgenerated <- generatedmeth.data[rnames,]
ph <- pheno(generatedrnb.set)
a <- unique(ph$celltype)
Trefofhealthy <- matrix(data = 0, nrow = 25000, ncol = length(a))
colnames(Trefofhealthy) <- a
for(i in 1:length(a)){
b <- which(ph$celltype == a[i])
if(length(b) >= 2)
Trefofhealthy[,i] <- rowMeans(selectedfromgenerated[,b],na.rm = TRUE)
else
Trefofhealthy[,i] <- selectedfromgenerated[,b]
}
MeDeCom:::components.heatmap(Tmed[[numb]],Trefofhealthy,centered = TRUE)
numb <- 44
cormatbw_Tmedecom_and_Trefofhealthy <- cor(Trefofhealthy,Tmed[[numb]], use = "complete.obs")
png('try till success part 2 .png')
pheatmap(as.matrix(cormatbw_Tmedecom_and_Trefofhealthy))
dev.off()
|
e72091bbcf6cff5ed55b45c099db7e66af9b6d36
|
94104c24f6a879ec7135aae2d27c86270ed329ed
|
/Plot2.R
|
ae7fd59f639d07e93f8e2504d00cd029c5f1acec
|
[] |
no_license
|
u04cv12/ExData_Plotting1
|
77d9f3c6d82ea7584d2adb541e78ec79a5a76712
|
767b8cb37b41b15f9203a456a1a79774642e8784
|
refs/heads/master
| 2020-12-11T03:51:40.233532
| 2014-12-07T23:07:54
| 2014-12-07T23:07:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 442
|
r
|
Plot2.R
|
power.data<-read.delim("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
power.subset <- subset(power.data, Date == "1/2/2007" | Date == "2/2/2007")
datetime <- strptime(paste(power.subset$Date, power.subset$Time), format="%d/%m/%Y %H:%M:%S")
png(filename="Plot2.png", width=480, height=480)
plot(datetime, power.subset[,"Global_active_power"], type="l",ylab="Global Active Power (kilowatts)", xlab="", main="")
dev.off()
|
c41778fd9e094bb58a2fb4105fb9b599c85594ec
|
36ed93e0ab7767d73262bd38374d97e549f0b5f1
|
/man/DM.Rpart.Rd
|
078715f8a1b9088adfca6a2e90c01707788ff1eb
|
[] |
no_license
|
cran/HMP
|
16678dbeb20fdda6fcf5422a7047c3c89f4af0ce
|
30dadecea268319438aeb41a23441535624d247c
|
refs/heads/master
| 2021-01-21T01:53:01.212756
| 2019-08-31T10:00:06
| 2019-08-31T10:00:06
| 17,679,740
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,819
|
rd
|
DM.Rpart.Rd
|
\name{DM.Rpart}
\alias{DM.Rpart}
\alias{DM.Rpart.Base}
\alias{DM.Rpart.CV}
\alias{DM.Rpart.CV.Consensus}
\title{Dirichlet-Multinomial RPart}
\description{
This function combines recursive partitioning and the Dirichlet-Multinomial distribution to identify homogeneous
subgroups of microbiome taxa count data.
}
\usage{DM.Rpart(data, covars, plot = TRUE, minsplit = 1, minbucket = 1, cp = 0, numCV = 10,
numCon = 100, parallel = FALSE, cores = 3, use1SE = FALSE, lowerSE = TRUE)}
\arguments{
\item{data}{A matrix of taxonomic counts(columns) for each sample(rows).}
\item{covars}{A matrix of covariates(columns) for each sample(rows).}
\item{plot}{When 'TRUE' a tree plot of the results will be generated.}
\item{minsplit}{The minimum number of observations to split on, see \link[rpart]{rpart.control}.}
\item{minbucket}{The minimum number of observations in any terminal node, see \link[rpart]{rpart.control}.}
\item{cp}{The complexity parameter, see \link[rpart]{rpart.control}.}
\item{numCV}{The number folds for a k-fold cross validation. A value less than 2 will return the rpart result without any cross validation.}
\item{numCon}{The number of cross validations to repeat to achieve a consensus solution.}
\item{parallel}{When this is 'TRUE' it allows for parallel calculation of consensus. Requires the package \code{doParallel}.}
\item{cores}{The number of parallel processes to run if parallel is 'TRUE'.}
\item{use1SE}{See details.}
\item{lowerSE}{See details.}
}
\value{
The 3 main things returned are:
\item{fullTree}{An rpart object without any pruning.}
\item{bestTree}{A pruned rpart object based on use1SE and lowerSE's settings.}
\item{cpTable}{Information about the fullTree rpart object and how it splits.}
The other variables returned include surrogate/competing splits, error rates and a plot of the bestTree if plot is TRUE.
}
\details{
There are 3 ways to run this function. The first is setting numCV to less than 2, which will run rpart once
using the DM distribution and the specified minsplit, minbucket and cp. This result will not have any kind
of branch pruning and the objects returned 'fullTree' and 'bestTree' will be the same.
The second way is setting numCV to 2 or greater (we recommend 10) and setting numCon to less than 2. This will
run rpart several times using a k-fold cross validation to prune the tree to its optimal size. This is the best method to use.
The third way is setting both numCV and numCon to 2 or greater (We recommend at least 100 for numCon). This will
repeat the second way numCon times and build a consensus solution. This method is ONLY needed for low sample sizes.
When the argument 'use1SE' is 'FALSE', the returned object 'bestTree' is the pruned tree with the lowest MSE.
When it is 'TRUE', 'bestTree' is either the biggest pruned tree (lowerSE = FALSE) or the smallest pruned tree (lowerSE = TRUE),
that is within 1 standard error of the lowest MSE.
}
\examples{
data(saliva)
data(throat)
data(tonsils)
### Create some covariates for our data set
site <- c(rep("Saliva", nrow(saliva)), rep("Throat", nrow(throat)),
rep("Tonsils", nrow(tonsils)))
covars <- data.frame(Group=site)
### Combine our data into a single object
data <- rbind(saliva, throat, tonsils)
### For a single rpart tree
numCV <- 0
numCon <- 0
rpartRes <- DM.Rpart(data, covars, numCV=numCV, numCon=numCon)
\dontrun{
### For a cross validated rpart tree
numCon <- 0
rpartRes <- DM.Rpart(data, covars, numCon=numCon)
### For a cross validated rpart tree with consensus
numCon <- 2 # Note this is set to 2 for speed and should be at least 100
rpartRes <- DM.Rpart(data, covars, numCon=numCon)
}
}
|
613aa79d2f1d7ea3d75a6be8793d5656ee535294
|
9ef9472ccc308406a4693ad6756a37b86593ccaa
|
/man/scatterplots.Rd
|
97f497ae92075c30461afd36b934b497c55dfff1
|
[] |
no_license
|
tsekara/scatterplots
|
3b8b6031c32fce98edb814d7a7cc99e3bb0c744e
|
31a62055b205225b503a0ba4d32551ddfad765cc
|
refs/heads/master
| 2020-06-24T15:40:02.607428
| 2019-07-26T11:38:30
| 2019-07-26T11:38:30
| 199,003,846
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 375
|
rd
|
scatterplots.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scatterplots.R
\name{scatterplots}
\alias{scatterplots}
\title{scatterplots}
\usage{
scatterplots(file, directory = getwd())
}
\arguments{
\item{expr.matrix}{the raw or normlaised count matrix}
}
\value{
plots
}
\description{
Generates the pairwise correlation between the samples
}
\examples{
}
|
fdc400d0a15dbc39f2f60b74d5f304121e048bfe
|
768411a39703ce54c6d2e161baf63e22d6f5641c
|
/datediff2.R
|
58c01f2f34f8034b44144a63e5072a7011c31153
|
[
"MIT"
] |
permissive
|
joelonsql/coronalyzer
|
ef092df181bff32a408ffcb8fedf08661e29194e
|
4253f2fd9fbda212a8dfe2f29b2fde972052d557
|
refs/heads/master
| 2021-05-20T02:40:47.969881
| 2020-04-24T13:04:54
| 2020-04-24T13:04:54
| 252,151,329
| 15
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,156
|
r
|
datediff2.R
|
library(tidyverse)
library(plotly)
first_date <- as.Date("2020-03-11")
last_date <- as.Date("2020-04-15")
n_days <- as.integer(last_date - first_date)
data <- rbind(
data.frame(
report_date = as.Date("2020-04-02"),
death_date = seq(first_date,first_date+22,by=1),
deaths = c(0,0,1,1,2,2,1,6,7, 9,8,11,8,16,22,27,31,26,25,26,26,13,5)
),
data.frame(
report_date = as.Date("2020-04-03"),
death_date = seq(first_date,first_date+23,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,9,8,11,9,16,22,27,31,29,27,30,33,23,23,2)
),
data.frame(
report_date = as.Date("2020-04-04"),
death_date = seq(first_date,first_date+24,by=1),
deaths = c(1,0,1,1,2,2,1,6,7, 9,8,11,9,16,23,27,31,29,28,30,36,25,36,18,1)
),
data.frame(
report_date = as.Date("2020-04-05"),
death_date = seq(first_date,first_date+25,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,9,8,11,9,16,24,27,32,29,29,30,36,31,43,22,6,1)
),
data.frame(
report_date = as.Date("2020-04-06"),
death_date = seq(first_date,first_date+26,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,9,8,11,10,16,24,28,33,29,31,32,36,35,47,34,17,23,13)
),
data.frame(
report_date = as.Date("2020-04-07"),
death_date = seq(first_date,first_date+27,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,9,8,11,11,17,24,30,33,31,32,38,37,40,55,49,40,49,37,2)
),
data.frame(
report_date = as.Date("2020-04-08"),
death_date = seq(first_date,first_date+28,by=1),
deaths = c(1,0,1,1,2,2,2,6,7,10,7,11,11,18,25,29,33,31,34,38,36,42,59,54,48,58,55,36,6)
),
data.frame(
report_date = as.Date("2020-04-09"),
death_date = seq(first_date,first_date+29,by=1),
deaths = c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,45,65,58,54,67,66,53,47,3)
),
data.frame(
report_date = as.Date("2020-04-10"),
death_date = seq(first_date,first_date+30,by=1),
deaths = c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,47,67,64,57,75,74,60,67,20,3)
),
data.frame(
report_date = as.Date("2020-04-11"),
death_date = seq(first_date,first_date+31,by=1),
deaths = c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,47,67,65,57,75,74,60,70,23,13,0)
),
data.frame(
report_date = as.Date("2020-04-12"),
death_date = seq(first_date,first_date+32,by=1),
deaths = c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,47,67,65,57,75,74,60,70,24,14,8,2)
),
data.frame(
report_date = as.Date("2020-04-13"),
death_date = seq(first_date,first_date+33,by=1),
deaths = c(1,0,1,1,2,2,2,6,7,10,7,12,11,20,25,30,32,34,37,41,42,47,67,65,57,75,74,60,70,26,17,14,9,2)
),
data.frame(
report_date = as.Date("2020-04-14"),
death_date = seq(first_date,first_date+34,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,30,32,35,38,42,43,48,69,68,59,76,71,65,77,43,31,26,33,21,5)
),
data.frame(
report_date = as.Date("2020-04-15"),
death_date = seq(first_date,first_date+35,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,30,32,35,38,42,43,49,68,69,60,78,82,70,90,55,52,50,54,45,31,6)
),
data.frame(
report_date = as.Date("2020-04-16"),
death_date = seq(first_date,first_date+36,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,30,32,35,38,42,43,50,68,71,61,79,85,75,97,63,62,61,62,55,49,41,10)
),
data.frame(
report_date = as.Date("2020-04-17"),
death_date = seq(first_date,first_date+37,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,29,32,35,38,43,44,50,67,75,66,79,87,76,99,66,62,63,67,56,56,45,38,4)
),
data.frame(
report_date = as.Date("2020-04-18"),
death_date = seq(first_date,first_date+38,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,29,32,35,38,44,45,50,67,78,68,81,88,77,101,73,73,73,76,62,60,55,59,20,2)
),
data.frame(
report_date = as.Date("2020-04-19"),
death_date = seq(first_date,first_date+39,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,29,32,35,38,44,45,51,67,79,68,81,90,78,102,75,75,74,79,63,60,56,61,23,9,1)
),
data.frame(
report_date = as.Date("2020-04-20"),
death_date = seq(first_date,first_date+40,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,29,32,35,38,44,45,51,67,79,68,81,90,78,102,76,75,74,79,63,60,57,63,30,19,17,2)
),
data.frame(
report_date = as.Date("2020-04-21"),
death_date = seq(first_date,first_date+41,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,20,25,29,32,35,39,44,45,52,67,81,69,82,90,81,106,79,78,84,86,72,67,77,78,49,51,43,21,3)
),
data.frame(
report_date = as.Date("2020-04-22"),
death_date = seq(first_date,first_date+42,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,10,7,12,11,21,24,29,32,35,39,44,46,51,69,81,71,84,89,82,110,85,84,93,94,84,81,96,96,57,59,53,46,18,5)
),
data.frame(
report_date = as.Date("2020-04-23"),
death_date = seq(first_date,first_date+43,by=1),
deaths = c(1,0,1,1,2,2,1,6,7,9,8,12,11,20,23,31,32,35,39,43,47,52,69,78,71,86,91,83,111,84,89,96,95,84,89,102,99,64,63,60,54,26,26,3)
)
)
write_csv(data, "fhm.csv")
data <- data %>%
group_by(death_date) %>%
mutate(new_deaths = deaths - coalesce(lag(deaths, order_by = report_date),0))
data$lag_effect <- as.numeric(data$report_date - data$death_date)
min_date <- min(data$report_date)
max_date <- max(data$report_date)
data <- data %>% mutate(lag_effect = if_else(report_date > min_date, lag_effect, 0))
ggplot(data %>% filter(new_deaths > 0 & report_date > min_date)) +
geom_point(aes(x=death_date, y=report_date, size=new_deaths, color=lag_effect)) +
theme_minimal() +
labs(x = "Avliden_datum", color = "Eftersläpning", y = "Rapportdatum", size="Nya dödsfall") +
ggtitle("Folkhälsomyndigheten - Covid19 Historik Excel - Avlidna per dag") +
scale_color_gradientn(colours = terrain.colors(10)) +
scale_y_date(breaks = "1 day")
data$report_date <- as.factor(data$report_date)
ggplot(data, aes(x=death_date)) +
geom_line(aes(y=deaths, color=report_date)) +
theme_minimal() +
ggtitle("Folkhälsomyndigheten - Covid19 - Avlidna per dag") +
labs(x = "Datum avliden", color = "Rapportdatum", y = "Antal avlidna")
data$lag_effect <- if_else(data$lag_effect < 7, data$lag_effect, 7)
data$lag_effect <- as.factor(data$lag_effect)
plot <- ggplot(data, aes(x=death_date)) +
geom_col(aes(y=new_deaths, fill=lag_effect), position = position_stack(reverse = TRUE)) +
theme_minimal() +
labs(x = "Datum avliden", fill = "Eftersläpning", y = "Antal avlidna") +
ggtitle("Folkhälsomyndigheten - Covid19 - Avlidna per dag") +
geom_label(data=data.frame(death_date=as.Date("2020-04-06")), aes(y=30, label="6/4: Fallen ligger på knappt 30 om dan."), hjust = "inward") +
geom_label(data=data.frame(death_date=as.Date("2020-04-07")), aes(y=40, label="7/4: Vi ligger på ett snitt på 40 fall per dygn."), hjust = "inward") +
geom_label(data=data.frame(death_date=as.Date("2020-04-08")), aes(y=45, label="8/4: Nu ligger vi på 45 eller högre."), hjust = "inward") +
geom_label(data=data.frame(death_date=as.Date("2020-04-20")), aes(y=60, label="20/4: Vi ligger i snitt på 60 fall om dagen."), hjust = "inward") +
scale_y_continuous(breaks = seq(0,100,by=10))
plot
ggplotly(plot)
|
5e53414a5a459a6543ef300f34c1278ee2c49341
|
e0befb94afbe9fadf27e6870d99b2d345efdd134
|
/R/un_gdp_ex.R
|
392e241ab902d8b8785a78f6be085b0636350389
|
[] |
no_license
|
cartazio/R-Samplers
|
e491bf931d8718d3cb1c7a307ec0f875fa62fafb
|
f863636b0a41ff2a996283a8ab06c072c3e8f752
|
refs/heads/master
| 2020-08-13T00:02:21.461052
| 2020-02-09T22:01:15
| 2020-02-09T22:01:15
| 214,870,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,053
|
r
|
un_gdp_ex.R
|
## Example
library(tidyverse)
library(janitor)
library(gridExtra)
library(stargazer)
source("./R/one_dim_ex.R")
## Read in data
undata <- read_csv("./data/country_profiles.csv") %>%
clean_names() %>%
mutate_at(vars(3:50), as.numeric) %>%
dplyr::select(gdp = gdp_gross_domestic_product_million_current_us) %>%
na_if(-99) %>%
drop_na(.) %>%
mutate(gdp = scale(gdp)) %>%
as.matrix(.) %>% as.vector(.)
three_levels <- Gauss1dim(k = 3, x = undata)
df <- three_levels$TheProposalRecord %>% as_tibble(.name_repair = "unique")
all_mu_3 <- df %>% dplyr::select(grep("mu", colnames(.))) %>%
mutate(mu_num = row.names(.)) %>%
gather(key = "t", value = "mu", -mu_num) %>%
spread(mu_num, mu) %>%
mutate(t = 1:nrow(.)) %>%
rename(mu_1 = 2, mu_2 = 3, mu_3 = 4)
all_pi_3 <- df %>% dplyr::select(grep("pi", colnames(.))) %>%
mutate(pi_num = row.names(.)) %>%
gather(key = "t", value = "pi", -pi_num) %>%
spread(pi_num, pi) %>%
mutate(t = 1:nrow(.)) %>%
rename(pi_1 = 2, pi_2 = 3, pi_3 = 4)
all_z_3 <- df %>% dplyr::select(grep("z", colnames(.))) %>%
mutate(z_num = row.names(.)) %>%
gather(key = "t", value = "z", -z_num) %>%
spread(z_num, z) %>%
mutate(t = 1:nrow(.)) %>%
rename(z_1 = 2, z_2 = 3, z_3 = 4)
all_post <- left_join(all_mu_3, all_pi_3) %>%
left_join(., all_z_3)
mu_1 <- all_mu_3 %>%
ggplot(aes(x = t, y = mu_1)) + geom_line() +
theme_bw() +
labs(x = "Marcov Chain states", y = "MU 1")
mu_2 <- all_mu_3 %>%
ggplot(aes(x = t, y = mu_2)) + geom_line() +
theme_bw() +
labs(x = "Marcov Chain states", y = "MU 2")
mu_3 <- all_mu_3 %>%
ggplot(aes(x = t, y = mu_3)) + geom_line() +
theme_bw() +
labs(x = "Marcov Chain states", y = "MU 3")
pi_1 <- all_pi_3 %>%
ggplot(aes(x = t, y = pi_1)) + geom_line() +
theme_bw() +
labs(x = "Marcov Chain states", y = "PI 1")
pi_2 <- all_pi_3 %>%
ggplot(aes(x = t, y = pi_2)) + geom_line() +
theme_bw() +
labs(x = "Marcov Chain states", y = "PI 2")
pi_3 <- all_pi_3 %>%
ggplot(aes(x = t, y = pi_3)) + geom_line() +
theme_bw() +
labs(x = "Marcov Chain states", y = "PI 3")
pdf("hw1_plots.pdf")
grid.arrange(mu_1, mu_2, mu_3, pi_1, pi_2, pi_3, nrow = 2)
dev.off()
# deviance
# log p(x1:n, z1:n, µ1:K, pi1:K)
# prob( X, ClusterCenters, Z, PI )
# Prob (A , B , C , D) = P (A | B ,C ,D) * P(B | C ,D) * P (C | D) * P (D)
# post_pi <- all_post %>%
# dplyr::select(pi_1, pi_2, pi_3) %>% as.matrix()
#
# sum(post_pi[1,])
#
# MCMCpack::ddirichlet(post_pi, c(1/3, 1/3, 1/3))
uncountry <- read_csv("./data/country_profiles.csv") %>%
clean_names() %>%
dplyr::select(country, gdp_gross_domestic_product_million_current_us) %>%
na_if(-99) %>%
drop_na(.) %>%
dplyr::select(country)
labeled <- cbind(uncountry, t(three_levels$FinalState$z) %>% as_tibble())
labeled %>% filter(V1 == 1) %>% select(country) %>% stargazer(summary = F)
labeled %>% filter(V2 == 1) %>% select(country) %>% stargazer(summary = F)
labeled %>% filter(V3 == 1) %>% select(country) %>% stargazer(summary = F)
knitr::kable()
|
08ffdf3bde256dbd42c8bce716e4402b86333db7
|
dc3642ea21337063e725441e3a6a719aa9906484
|
/DevInit/R/P20/cwi_depth_plus_mics.R
|
298913930aa66da1dd9cfb08d170dc2520596d99
|
[] |
no_license
|
akmiller01/alexm-util
|
9bbcf613384fe9eefd49e26b0c841819b6c0e1a5
|
440198b9811dcc62c3eb531db95abef8dbd2cbc7
|
refs/heads/master
| 2021-01-18T01:51:53.120742
| 2020-09-03T15:55:13
| 2020-09-03T15:55:13
| 23,363,946
| 0
| 7
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,127
|
r
|
cwi_depth_plus_mics.R
|
library(Hmisc)
library(data.table)
library(foreign)
library(descr)
library(plyr)
setwd("D:/Documents/Data/DHSmeta")
load("DHS_cwi.RData")
fileName <- "depth_of_cwi_final.csv"
load("D:/Documents/Data/MICSmeta/global_mics_cwi.RData")
cwi$weights <- cwi$sample.weights/1000000
cwi$iso2[which(cwi$iso2=="BU")] <- "BI"
cwi$iso2[which(cwi$iso2=="DR")] <- "DO"
cwi$iso2[which(cwi$iso2=="IA")] <- "IN"
cwi$iso2[which(cwi$iso2=="KY")] <- "KG"
cwi$iso2[which(cwi$iso2=="LB")] <- "LR"
cwi$iso2[which(cwi$iso2=="MD")] <- "MG"
cwi$iso2[which(cwi$iso2=="MB")] <- "MD"
cwi$iso2[which(cwi$iso2=="NM")] <- "NA"
cwi$iso2[which(cwi$iso2=="NI")] <- "NE"
mics_isos <- read.csv("D:/Documents/Data/MICSmeta/isos.csv")
mics.cwi <- join(
mics.cwi
,mics_isos
,by="filename"
)
mics.cwi <- subset(mics.cwi,!is.na(iso2))
mics.cwi$weights <- mics.cwi$sample.weights
data <- rbind(cwi,mics.cwi)
data <- data.frame(data)
data$year <- NULL
all.years <- read.csv("D:/Documents/Data/MICSmeta/all.years.csv")
data <- join(
data
,all.years
,by="filename"
)
weighted.percentile <- function(x,w,prob,na.rm=TRUE){
df <- data.frame(x,w)
if(na.rm){
df <- df[which(complete.cases(df)),]
}
#Sort
df <- df[order(df$x),]
sumw <- sum(df$w)
df$cumsumw <- cumsum(df$w)
#For each percentile
cutList <- c()
cutNames <-c()
for(i in 1:length(prob)){
p <- prob[i]
pStr <- paste0(round(p*100,digits=2),"%")
sumwp <- sumw*p
df$above.prob <- df$cumsumw>=sumwp
thisCut <- df$x[which(df$above.prob==TRUE)[1]]
cutList <- c(cutList,thisCut)
cutNames <- c(cutNames,pStr)
}
names(cutList) <- cutNames
return(cutList)
}
latest_surveys <- c(
"alhr50dt", "amhr61dt", "aohr61dt", "azhr52dt", "bdhr70dt", "bfhr70dt"
,"bjhr61dt", "bohr51dt", "buhr61dt", "cdhr61dt", "cghr60dt"
,"cihr61dt", "cmhr60dt", "cohr61dt", "drhr61dt", "eghr61dt"
,"ethr61dt", "gahr60dt", "ghhr70dt", "gmhr60dt", "gnhr61dt", "gyhr5idt"
,"hnhr62dt", "hthr61dt", "iahr52dt", "idhr63dt", "johr6cdt"
,"kehr7hdt","khhr72dt", "kmhr61dt"
# , "kyhr61dt"
, "lbhr6adt", "lshr61dt"
# ,"mbhr53dt"
, "mdhr6hdt", "mlhr6hdt", "mvhr51dt", "mwhr71dt"
,"mzhr62dt", "nghr6adt", "nihr61dt", "nmhr61dt"
# , "nphr60dt"
,"pehr6idt","phhr61dt","pkhr61dt"
,"rwhr70dt","slhr61dt","snhr70dt", "sthr50dt"
# , "szhr51dt"
,"tghr61dt", "tjhr61dt", "tlhr61dt","tzhr6adt"
# , "uahr51dt"
,"ughr72dt"
# , "vnhr52dt"
, "yehr61dt", "zmhr61dt"
# , "zwhr62dt"
#MICS
,"Afghanistan_MICS4_Datasets","Algeria_MICS4_Datasets"
,"Barbados_MICS4_Datasets","Belarus_MICS4_Datasets"
,"Belize_MICS4_Datasets","Bhutan_MICS4_Datasets"
,"Bosnia and Herzegovina_MICS4_Datasets","Central African Republic_MICS4_Datasets"
,"Chad_MICS4_Datasets","Costa Rica_MICS4_Datasets","Georgia MICS 2005 SPSS Datasets"
,"Guinea-Bissau MICS 2006 SPSS Datasets","Iraq_MICS4_Datasets","Jamaica_MICS4_Datasets"
,"Kazakhstan_MICS4_Datasets","Kosovo under UNSC res. 1244_MICS5_Datasets"
,"Kyrgyzstan MICS5 Datasets","Lao People's Democratic Republic_LSIS_Datasets"
# ,"Lebanon (Palestinians)_MICS4_Datasets"
,"Macedonia, The former Yugoslav Republic of_MICS4_Datasets","Mauritania_MICS4_Datasets"
,"Moldova_MICS4_Datasets","Mongolia_MICS5_Datasets","Montenegro_MICS5_Datasets"
,"Nepal_MICS5_Datasets"
# ,"Pakistan (Punjab)_MICS5_Datasets"
,"Serbia_MICS5_Datasets"
# ,"Somalia (Northeast Zone)_MICS4_Datasets"
# ,"Somalia (Somaliland)_MICS4_Datasets"
,"Somalia MICS 2006 SPSS Datasets"
,"South Sudan_MICS4_Datasets"
,"Sudan_MICS5_Datasets"
,"St.Lucia_MICS4_Datasets","State of Palestine_MICS5_Datasets","Suriname_MICS4_Datasets"
,"Swaziland_MICS4_Datasets","Syria MICS 2006 SPSS Datasets","Thailand_MICS4_Datasets"
,"Trinidad and Tobago MICS 2006 SPSS Datasets","Tunisia_MICS4_Datasets"
,"Turkmenistan_MICS3_Datasets","Ukraine_MICS4_Datasets","Uruguay_MICS4_Datasets"
,"Uzbekistan MICS 2006 SPSS Datasets","Vanuatu MICS 2007 SPSS Datasets","Viet Nam_MICS5_Datasets"
,"Zimbabwe_MICS5_Datasets"
)
cwi <- subset(data,filename %in% latest_surveys)
cwi <- cwi[order(cwi$cwi),]
# quints <- weighted.percentile(cwi$cwi,cwi$weights,prob=seq(0,1,length=6))
#
# for(i in 2:length(quints)){
# quint <- quints[i]
# quintName <- paste0("quint.",(i-1)*20)
# cwi[[quintName]] <- (cwi$cwi <= quint)
# }
#
# decs <- weighted.percentile(cwi$cwi,cwi$weights,prob=seq(0,1,length=11))
# cwi$dec.50 <- (cwi$cwi <= decs[6])
quints <- c(-0.06008803)
names(quints) <- c("20%")
cwi$quint.20 <- (cwi$cwi <= -0.06008803)
cwi.table <- data.table(cwi)
cwi.collapse <- cwi.table[
,.(p20=weighted.mean(quint.20,weights,na.rm=TRUE))
, by=.(filename)]
p20.table <- data.table(subset(cwi,quint.20==TRUE))
p20.collapse <- p20.table[
,.(pov.gap=weighted.mean((quints[["20%"]]-cwi),weights,na.rm=TRUE)
,pov.gap.sqr=weighted.mean((quints[["20%"]]-cwi)*(quints[["20%"]]-cwi),weights,na.rm=TRUE))
,by=.(filename)]
data <- join(cwi.collapse,p20.collapse,by="filename")
setwd("D:/Documents/Data/DHSmeta")
write.csv(data,fileName,row.names=FALSE,na="")
|
044d9dc2650300ae0ffa4e0f5d25a577c64e00a2
|
61724385280119f516c9b42d140914dcc7f5f492
|
/Erasmus/2019/Kwidzyn/MZM1.R
|
dcee28180c1e26d8d3014d18ec0294e06b2389a3
|
[] |
no_license
|
hrpunio/Z-MISC
|
bf286c4d8d03c572a4550c5423e43a6e6aa17e3a
|
3be1c83913455f733ac3760140271e92886c75d9
|
refs/heads/master
| 2021-05-07T20:36:37.185699
| 2020-02-27T20:54:08
| 2020-02-27T20:54:08
| 108,959,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 511
|
r
|
MZM1.R
|
library(ggplot2)
library(ggpubr)
d <- read.csv("MZM.csv", sep = ';', header=T, na.string="NA");
d
p1 <- ggplot(d, aes(x = as.Date(data))) +
geom_line(aes(y = razem, colour = "razem"), size=2) +
geom_line(aes(y = krajowi, colour = "krajowi"), size=2) +
geom_line(aes(y = zagraniczni, colour = "zagraniczni"), size=2) +
ylab(label="") +
labs(colour = "") +
theme(legend.position="top") +
ggtitle("") +
theme(plot.title = element_text(hjust = 0.5)) +
theme(legend.text=element_text(size=12));
p1
|
c5d52b0fa2c6b676a0388748556854cdf921790e
|
e12053363a0ae08f56084f1eac15fc6b0b0b4a19
|
/R/data-lists.R
|
f38ba3e07ca8ed501c18d7543abcdee6789d73d1
|
[] |
no_license
|
DrRoad/mycroftr
|
e6dad359c81a947176625dae67a845bcbee75b7f
|
d25f63582afd29eac3d185211af84640657c690a
|
refs/heads/master
| 2020-03-27T23:03:32.733218
| 2016-05-14T11:50:20
| 2016-05-14T11:50:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,149
|
r
|
data-lists.R
|
#' Function calling a list of stocks for which to call data
#'
#' @return A list with the stock identifiers for Quandl
#'
#' @export
#'
get_stock_list <- function() {
list(
America = list(
`Dow Jones Industrial Average` = "YAHOO/INDEX_DJI",
`S&P 500 Index` = "YAHOO/INDEX_GSPC",
`Bovespa index` = "YAHOO/INDEX_BVSP"
#`NASDAQ Composite Index` = "NASDAQOMX/COMP"
#`MERVAL Index` = "YAHOO/INDEX_MERV"
),
Europe = list(
#`FTSE 100 Index` = "",
`EURO STOXX 50` = "YAHOO/INDEX_STOXX50E",
`DAX` = "YAHOO/INDEX_GDAXI"
#`CAC 40 Index` = "YAHOO/INDEX_FCHI",
#`RTSI Index` = "YAHOO/INDEX_RTS_RS",
#`OMX Copenhagen 20` = "NASDAQOMX/OMXC20"
),
Asia = list(
`Nikkei 225` = "YAHOO/INDEX_N225",
`Hong Kong Hang Seng Index` = "YAHOO/INDEX_HSI",
`Shanghai Shenzhen CSI 300 Index` = "YAHOO/INDEX_SSEC"
#`KOSPI Composite Index` = "YAHOO/INDEX_KS11",
#`Straits Times Index` = "YAHOO/INDEX_STI",
#`Taiwan Weighted Index` = "YAHOO/INDEX_TWII",
#`SENSEX` = "YAHOO/INDEX_BSESN",
#`All Ordinaries Index` = "YAHOO/INDEX_AORD"
)
)
}
|
4695719e69c6328c686b19a072c34a2a2bbb822f
|
08313c529a4d1ffecb9f6463534a537d670d775c
|
/man/make.base.res.bw.Rd
|
79fa5f0a2864a17f849d36753fd37cabf9a4a8c0
|
[] |
no_license
|
emdann/hexamerModel
|
5a95ac93852915f51cb4f11917919d9566586e00
|
5c6bf140d9841fa0dcf572b8acf789c9b82caaf0
|
refs/heads/master
| 2021-09-21T21:03:45.694694
| 2018-08-31T12:22:46
| 2018-08-31T12:22:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 422
|
rd
|
make.base.res.bw.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compare_peaks.r
\name{make.base.res.bw}
\alias{make.base.res.bw}
\title{Genomic ranges to base resolution}
\usage{
make.base.res.bw(bw)
}
\arguments{
\item{bw}{GRanges object}
}
\value{
GRanges object of base resolution track
}
\description{
Breaks GRanges object to single base resolution, giving same score to every base from a same region
}
|
299298abcb39642fea675f288ef34e5ffd912c5f
|
6b691d3f4c158b3b71927f4f5692520f4add09ae
|
/VAtask2.R
|
2c49010bb28cb871af7ee97bda9f6b0de953e7fb
|
[] |
no_license
|
Panthini/VisualAnalytics_R
|
9657a23cf080437d0e9a3d2170aea7d2ece82ca3
|
1fdafe0cfa912b0b6da47102083c007959a5e262
|
refs/heads/main
| 2023-04-20T06:09:03.478092
| 2021-05-04T07:28:59
| 2021-05-04T07:28:59
| 364,172,881
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,214
|
r
|
VAtask2.R
|
#TASK 2.1
library(ggplot2)
library(ggthemes)
install.packages("RColorBrewer")
library("RColorBrewer")
install.packages("dplyr")
library(dplyr)
#load the data set
bigstocks <- read.csv(file = "F:/Panthini PC/Study MS/Subjects/Sem 2/Visual Analytics/Assignments/Assignment 2/bigstocks.csv",
header = TRUE, sep = ",")
#changing the date format
bigstocks$date <- as.Date(bigstocks$date, "%d/%m/%Y")
#subsetting the top 4 companies
big4comp <- subset(bigstocks, company == 'Apple' |
company == 'Amazon' |
company == 'Google' |
company == 'Facebook')
#plotting the line graph
ggplot(big4comp, aes(x = date, y = close_price,
fill = factor(company),
color = factor(company))) +
ggtitle('Shares performance of big 4 company over time') +
xlab('Time') +
ylab('Share Price') +
geom_line() +
scale_color_manual(values = c("Apple" = "orange", "Amazon" = "purple",
"Google" = "red", "Facebook" = "blue")) +
theme_bw()
#TASK 2.2
library(plyr)
#changing the date format
bigstocks$date <- as.Date(bigstocks$date, "%d/%m/%Y")
#create seperate column for Year to get data of year 2013 to 2015
big4comp$year = as.numeric(format(big4comp$date, "%Y"))
#subsetting the top 4 companies based on year 2013 to 2015
big4comp_2 <- subset(big4comp, year == '2013' |
year == '2014' |
year == '2015' )
#specify the median value
meds <- ddply(big4comp_2, .(company), summarise, med = median(volume))
#plotting the graph
ggplot(big4comp_2, aes(x = company, y = volume),
color = factor(company)) +
ggtitle('Distribution of Share Volume of big 4 companies',
subtitle = 'Traded between 2013 and 2015') +
geom_boxplot(col=c('orange', 'black', 'orange', 'orange')) +
scale_color_manual(values = c("Apple" = "orange", "Google" = "NA",
"Facebook" = "NA", "Amazon" = "NA")) +
geom_text(data = meds, aes(x = company, y = med, label = med),
size = 3, vjust = -0.5) +
theme_classic()
|
e795f180344b4f9bdbbd8fa375dcf582c2a5abfb
|
d38ed3b625ee57acebeae39da6dee43c92e1a7b1
|
/man/plot_tsne.Rd
|
feaf7d2f7d9316005afcdb9548125d1465efa5e2
|
[] |
no_license
|
markgene/yamatClassifier
|
2739ec98dafd5a44bdba592e04689606207a6e86
|
8dd6037db4b7db385573088862980670196a8396
|
refs/heads/master
| 2020-04-06T08:59:31.195113
| 2018-11-17T18:28:38
| 2018-11-17T18:28:38
| 157,325,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 427
|
rd
|
plot_tsne.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tsne.R
\name{plot_tsne}
\alias{plot_tsne}
\title{Plot tSNE result.}
\usage{
plot_tsne(tsne_res, ...)
}
\arguments{
\item{tsne_res}{The returned value of \code{\link[RTsne]{RTsne}},
which is a list.}
\item{...}{Any arguments passed to \code{\link[ggplot2]{aes}}}
}
\value{
A \code{\link[ggplot2]{ggplot}} object.
}
\description{
Plot tSNE result.
}
|
d945df553ee5ef48155473ffa75a49778a005dfd
|
7455b090d93288b0c8408ae9ffedefaebf4c386b
|
/data_extraction_script_multiple_floors.R
|
cf225a4a4f176c81177195b4756757cef5c2394e
|
[] |
no_license
|
adi-gillani/scout_restAPI
|
469ce6e8d62ae3cd98cf8801a74d0ef2d2988e70
|
0b7beb0f0a5aeb61cd75ada91a2538db20744f59
|
refs/heads/master
| 2020-09-08T00:55:25.496301
| 2019-12-30T09:14:19
| 2019-12-30T09:14:19
| 220,963,251
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,162
|
r
|
data_extraction_script_multiple_floors.R
|
library(jsonlite)
library(dplyr)
#use the script below to extract data from NeedInsights RestAPI
# PART 1 - Extracting data for Daily Unique Footfall Count - both first_floor and ground_floor
#extracting data for ground_floor store daily footfall
#feeding the API URL, extracting data and data framing it!
daily_ground_floor_footfall_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_count_unique_days&zone_code=9590&time_start=2019-10-21&time_stop=2019-11-24&format=json"
ground_floor_footfall_daily <- fromJSON(daily_ground_floor_footfall_url)
ground_floor_footfall_daily_df <- as.data.frame(ground_floor_footfall_daily)
#renaming columns
ground_floor_cols <- c("date", "zone_code", "zone_name", "mac_count", "ground_floor_footfall")
colnames(ground_floor_footfall_daily_df) <- ground_floor_cols
#feed, extract, frame
daily_first_floor_footfall_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_count_unique_days&zone_code=9591&time_start=2019-10-21&time_stop=2019-11-24&format=json"
first_floor_store_daily <- fromJSON(daily_first_floor_footfall_url)
first_floor_footfall_daily_df <- as.data.frame(first_floor_store_daily)
#renaming columns
first_floor_cols <- c("date", "zone_code", "zone_name", "mac_count", "first_floor_footfall")
colnames(first_floor_footfall_daily_df) <- first_floor_cols
#combining first_floor and ground_floor visitors
daily_footfall_daily <- merge(ground_floor_footfall_daily_df, first_floor_footfall_daily_df, by = "date")
daily_footfall_daily <- daily_footfall_daily[,c("date","first_floor_footfall","ground_floor_footfall")]
#calculating conversion
daily_footfall_daily <- mutate(daily_footfall_daily, conversion = (as.numeric(daily_footfall_daily$first_floor_footfall)/as.numeric(daily_footfall_daily$ground_floor_footfall)))
#assigning appropriate column types
daily_footfall_daily$date <- as.Date(daily_footfall_daily$date)
daily_footfall_daily$first_floor_footfall <- as.numeric(daily_footfall_daily$first_floor_footfall)
daily_footfall_daily$ground_floor_footfall <- as.numeric(daily_footfall_daily$ground_floor_footfall)
daily_footfall_daily$conversion <- as.numeric(daily_footfall_daily$conversion)
#exporting the data to a csv
write.csv(daily_footfall_daily, "D:/Sen Heng/Scout Data - October 21 - November 24/daily_footfall_trend.csv", row.names = FALSE)
#PART 2 - Extracting data for Hourly Unique Footfall Count
#feed, extract, frame - ground_floor STORE
ground_floor_store_hourly_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_count_unique_hours&zone_code=9590&time_start=2019-10-21&time_stop=2019-11-24&format=json"
ground_floor_store_hourly <- fromJSON(ground_floor_store_hourly_url)
ground_floor_store_hourly_df <- as.data.frame(ground_floor_store_hourly)
#renaming columns
hourly_ground_floor_cols <- c("time", "zone_code", "zone_name", "count_macs", "ground_floor_footfall")
colnames(ground_floor_store_hourly_df) <- hourly_ground_floor_cols
#feed, extract, frame - first_floor STORE
first_floor_store_hourly_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_count_unique_hours&zone_code=9591&time_start=2019-10-21&time_stop=2019-11-24&format=json"
first_floor_store_hourly <- fromJSON(first_floor_store_hourly_url)
first_floor_store_hourly_df <- as.data.frame(first_floor_store_hourly)
#renaming columns
first_floor_hourly_cols <- c("time", "zone_code", "zone_name", "count_macs", "first_floor_footfall")
colnames(first_floor_store_hourly_df) <- first_floor_hourly_cols
#combining first_floor and ground_floor hourly visitors
hourly_footfall_trend <- inner_join(ground_floor_store_hourly_df, first_floor_store_hourly_df, by = "time")
hourly_footfall_trend <- hourly_footfall_trend[,c("time", "ground_floor_footfall", "first_floor_footfall")]
#calculating conversion
hourly_footfall_trend <- mutate(hourly_footfall_trend, conversion = as.numeric(hourly_footfall_trend$first_floor_footfall)/as.numeric(hourly_footfall_trend$ground_floor_footfall))
#exportinf the data to a csv
write.csv(hourly_footfall_trend, "D:/Sen Heng/Scout Data - October 21 - November 24/hourly_footfall_trend.csv", row.names = FALSE)
# Extracting Average Duration per Week
#feed, extract, frame - ground_floor STORE
ground_floor_avg_duration_weekly_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_duration_average_weeks&zone_code=9590&time_start=2019-10-21&time_stop=2019-11-24&format=json"
ground_floor_avg_duration_weekly <- fromJSON(ground_floor_avg_duration_weekly_url)
ground_floor_avg_duration_weekly_df <- as.data.frame(ground_floor_avg_duration_weekly)
#renaming columns
ground_floor_avg_duration_weekly_cols <- c("date", "zone_code", "zone_name", "avg_duration_ground_floor", "count_macs")
colnames(ground_floor_avg_duration_weekly_df) <- ground_floor_avg_duration_weekly_cols
#feed, extract, frame - first_floor STORE
first_floor_avg_duration_weekly_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_duration_average_weeks&zone_code=9591&time_start=2019-10-21&time_stop=2019-11-24&format=json"
first_floor_avg_duration_weekly <- fromJSON(first_floor_avg_duration_weekly_url)
first_floor_avg_duration_weekly_df <- as.data.frame(first_floor_avg_duration_weekly)
#renaming columns
first_floor_avg_duration_weekly_cols <- c("date", "zone_code", "zone_name", "avg_duration_first_floor", "count_macs")
colnames(first_floor_avg_duration_weekly_df) <- first_floor_avg_duration_weekly_cols
#combining first_floor and ground_floor store avg weekly duration
avg_duration_weekly <- inner_join(ground_floor_avg_duration_weekly_df, first_floor_avg_duration_weekly_df, by = "date")
avg_duration_weekly <- avg_duration_weekly[,c("date", "avg_duration_ground_floor", "avg_duration_first_floor")]
#exporing the data to a csv
write.csv(avg_duration_weekly, "D:/Sen Heng/Scout Data - October 21 - November 24/avg_duration_weekly.csv", row.names = FALSE)
#PART 4 - Extracting Zone Visit Frequency per Week split in Bins
#feed, extract, frame - first_floor STORE
first_floor_visit_frequency_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_frequency_weeks&zone_code=9591&time_start=2019-10-21&time_stop=2019-11-24&format=json"
first_floor_visit_frequency <- fromJSON(first_floor_visit_frequency_url)
first_floor_visit_frequency_df <- as.data.frame(first_floor_visit_frequency)
#renaming columns
first_floor_visit_frequency_cols <- c("date", "zone_code", "zone_name", "visit_frequency", "label", "count_macs", "first_floor_percentage")
colnames(first_floor_visit_frequency_df) <- first_floor_visit_frequency_cols
#feed, extract, frame - ground_floor STORE
ground_floor_visit_frequency_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_frequency_weeks&zone_code=9590&time_start=2019-10-21&time_stop=2019-11-24&format=json"
ground_floor_visit_frequency <- fromJSON(ground_floor_visit_frequency_url)
ground_floor_visit_frequency_df <- as.data.frame(ground_floor_visit_frequency)
#renaming columns
ground_floor_visit_frequency_cols <- c("date", "zone_code", "zone_name", "visit_frequency", "label", "count_macs", "ground_floor_percentage")
colnames(ground_floor_visit_frequency_df) <- ground_floor_visit_frequency_cols
#combining data for first_floor and ground_floor store
visit_frequency <- inner_join(ground_floor_visit_frequency_df, first_floor_visit_frequency_df, by = c("date" = "date", "label" = "label"))
visit_frequency <- visit_frequency[,c("date", "label", "first_floor_percentage", "ground_floor_percentage")]
#exporting data to a csv
write.csv(visit_frequency, "D:/Sen Heng/Scout Data - October 21 - November 24/visit_frequency.csv",row.names = FALSE)
# PART 5 - Extracting Cell Phone Brands - Weekly
ground_floor_brands_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_brand_weeks&zone_code=9590&time_start=2019-10-21&time_stop=2019-11-24&format=json"
ground_floor_brands <- fromJSON(ground_floor_brands_url)
ground_floor_brands_df <- as.data.frame(ground_floor_brands)
#renaming columns
ground_floor_brands_col <- c("date", "zone_code", "zone_name", "brand_name", "count_macs", "percentage")
colnames(ground_floor_brands_df) <- ground_floor_brands_col
#exporting data to a csv
write.csv(ground_floor_brands_df, "D:/Sen Heng/Scout Data - October 21 - November 24/phone_brands_ground_floor.csv", row.names = FALSE)
#feed, extract, frame - first_floor STORE
first_floor_brands_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_brand_weeks&zone_code=9591&time_start=2019-10-21&time_stop=2019-11-24&format=json"
first_floor_brands <- fromJSON(first_floor_brands_url)
first_floor_brands_df <- as.data.frame(first_floor_brands)
#renaming columns
colnames(first_floor_brands_df) <- ground_floor_brands_col
#exporting data to a csv
write.csv(first_floor_brands_df, "D:/Sen Heng/Scout Data - October 21 - November 24/phone_brands_first_floor.csv", row.names = FALSE)
#PART 6 - Extracting Duration per Week divided in Slots
#feed, extract, frame - ground_floor STORE
ground_floor_zone_duration_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_duration_weeks&zone_code=9590&time_start=2019-10-21&time_stop=2019-11-24&format=json"
ground_floor_zone_duration <- fromJSON(ground_floor_zone_duration_url)
ground_floor_zone_duration_df <- as.data.frame(ground_floor_zone_duration)
#renaming columns
ground_floor_zone_cols <- c("date", "zone_code", "zone_name", "duration_interval", "duration_label", "count_macs", "ground_floor_percentage")
colnames(ground_floor_zone_duration_df) <- ground_floor_zone_cols
#feed, extract, frame - first_floor STORE
first_floor_zone_duration_url <- "https://customer.needinsights.com/rest/?api_key=4d76114e78ed1db951cea3fdc6178016644c374b&metric=zone_duration_weeks&zone_code=9591&time_start=2019-10-21&time_stop=2019-11-24&format=json"
first_floor_zone_duration <- fromJSON(first_floor_zone_duration_url)
first_floor_zone_duration_df <- as.data.frame(first_floor_zone_duration)
#renaming columns
first_floor_zone_cols <- c("date", "zone_code", "zone_name", "duration_interval", "duration_label", "count_macs", "first_floor_percentage")
colnames(first_floor_zone_duration_df) <- first_floor_zone_cols
#combining first_floor and ground_floor store weekly zone duration
zone_duration_weekly <- inner_join(ground_floor_zone_duration_df, first_floor_zone_duration_df, by =c("date" = "date", "duration_label" = "duration_label"))
zone_duration_weekly <- zone_duration_weekly[, c("date", "duration_label", "ground_floor_percentage", "first_floor_percentage")]
#exporting data to a csv
write.csv(zone_duration_weekly, "D:/Sen Heng/Scout Data - October 21 - November 24/zone_duration_weekly.csv", row.names = FALSE)
|
10aef2a39b7170e0440883a8bd4a7c9906d4b549
|
e474719efb71b7621dcc632f7c0ce24c7d02f516
|
/FGSEA_Hallmark_mRNA.R
|
bc90702bafd7ad2846be9b9ded888c855c941ba8
|
[
"Apache-2.0"
] |
permissive
|
ssun1116/m6a_Mazter_Study
|
7c519627048be86590d8d2626cd40d07bac778b7
|
bbd755346f728d57a3ccf682c959151b29fee798
|
refs/heads/master
| 2023-09-05T07:27:15.021699
| 2021-11-15T06:58:01
| 2021-11-15T06:58:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,928
|
r
|
FGSEA_Hallmark_mRNA.R
|
options(stringsAsFactors = F)
library(readxl)
library(tidyverse)
library(dplyr)
library(writexl)
library(AnnotationDbi)
library(org.Hs.eg.db)
library(fgsea)
pp <- fgsea::gmtPathways('Resources/h.all.v7.4.symbols.gmt')
## Load data
d1 = read.delim('Data_distance_30/U2OS2-9_output_single.0622Aligned.out.Rdata_clvEffTable.txt') %>% rename_all( ~ paste0(.x, '_KD'))
d2 = read.delim('Data_distance_30/U2OSNT2_output_single.0622Aligned.out.Rdata_clvEffTable.txt') %>% rename_all( ~ paste0(.x, '_WT'))
## Filter no cleavage reported
d1 = d1 %>% filter(!is.na(clvEff_5_KD) & !is.na(clvEff_3_KD) & !is.na(avgClvEff_KD))
d2 = d2 %>% filter(!is.na(clvEff_5_WT) & !is.na(clvEff_3_WT) & !is.na(avgClvEff_WT))
## Merge data
d = merge(d1 %>%
dplyr::select(-c('X_KD', 'end_KD', 'score_KD', 'coorNames_KD', 'seqs_KD')),
d2 %>%
dplyr::select(-c('X_WT', 'end_WT', 'score_WT', 'coorNames_WT', 'seqs_WT')),
by.x='name_KD', by.y ='name_WT')
## Calculate the difference: KD - WT
d$avgClvEff_diff = d$avgClvEff_KD - d$avgClvEff_WT
d$transcript_id = do.call(rbind.data.frame, strsplit(d$name_KD, '_', fixed = T))[[1]]
## Load TX annotation
option_tx = F
if (option_tx){
gm = rtracklayer::import('~/Dropbox/Resources/Gencode_hg38_v32/gencode.v32.primary_assembly.annotation.gtf.gz')
gm1 = as.data.frame(gm) %>% filter(type=='transcript') %>% mutate(tss=ifelse(strand=='+', start, end)) %>% dplyr::select(transcript_id, gene_id, gene_name, tss, gene_type, transcript_type)
gm1$transcript_id = do.call(rbind.data.frame, strsplit(gm1$transcript_id, '.', fixed = T))[[1]]
gm1$gene_id = do.call(rbind.data.frame, strsplit(gm1$gene_id, '.', fixed = T))[[1]]
saveRDS(gm1, 'genes.Rdata')
} else{
gm1 = readRDS('Resources/genes.Rdata')
}
gm1 = gm1 %>% dplyr::select(transcript_id, gene_name, transcript_type)
d = d %>% dplyr::select(transcript_id, avgClvEff_diff)
d_merged = merge(d, gm1, by = "transcript_id") %>% dplyr::select(avgClvEff_diff, gene_name) %>% unique
d_merged$abs_avgClvEff_diff <- abs(d_merged$avgClvEff_diff)
d_merged_abs = d_merged %>% group_by(gene_name) %>% filter(abs_avgClvEff_diff == max(abs_avgClvEff_diff))
##abs value
gg = d_merged_abs %>% dplyr::select(gene_name, abs_avgClvEff_diff) %>% unique
gg = gg[order(gg$abs_avgClvEff_diff),]
gg1 = gg %>% pull(abs_avgClvEff_diff)
names(gg1) <- gg %>% pull(gene_name) %>% toupper()
df = fgsea(pathways = pp, stats = gg1, scoreType = "pos", eps = 0)
df = df[order(df$NES), ]
df$leadingEdge <- vapply(df$leadingEdge, paste, collapse = ", ", character(1L))
df = df %>% arrange(padj)
write.table(df, paste("Tables/table_fgsea_gmt.h.all_mRNA_abs.0705.txt", sep = "_"), quote=F, sep='\t', row.names = F, col.names = T)
write_xlsx(df, paste("Tables/table_fgsea_gmt.h.all_mRNA_abs.0705.xlsx", sep = "_"), col_names = T)
plotEnrichment(pp[["HALLMARK_MITOTIC_SPINDLE"]], gg1) + labs(title = "HALLMARK_MITOTIC_SPINDLE")
# dfRes = df %>% dplyr::filter(padj <= 0.1 & size >= 10)
# dfRes = dfRes[order(dfRes$NES), ]
# dfRes$leadingEdge <- vapply(dfRes$leadingEdge, paste, collapse = ", ", character(1L))
# dfRes = dfRes %>% arrange(padj)
# write.table(dfRes, paste("Tables/table_fgsea_gmt.h.all_mRNA_abs.0705.txt", sep = "_"), quote=F, sep='\t', row.names = F, col.names = T)
## non-abs value
gg = d_merged_abs %>% dplyr::select(gene_name, avgClvEff_diff) %>% unique
gg = gg[order(gg$avgClvEff_diff),]
gg1 = gg %>% pull(avgClvEff_diff)
names(gg1) <- gg %>% pull(gene_name) %>% toupper()
df = fgsea(pathways = pp, stats = gg1, scoreType = "pos", eps = 0)
df = df[order(df$NES), ]
df$leadingEdge <- vapply(df$leadingEdge, paste, collapse = ", ", character(1L))
df = df %>% arrange(padj)
write.table(df, paste("Tables/table_fgsea_gmt.h.all_mRNA_non.abs.0705.txt", sep = "_"), quote=F, sep='\t', row.names = F, col.names = T)
write_xlsx(df, paste("Tables/table_fgsea_gmt.h.all_mRNA_non.abs.0705.xlsx", sep = "_"), col_names = T)
|
363e8acd748d23e97503464f61825603512a02f6
|
85300fe0cbb0165b3be7a51186b60ba406cf574c
|
/functions.R
|
3af9926d18d1786efee1ecd16ce7fe6de95b15d4
|
[] |
no_license
|
PawinData/Prostheses
|
103b0b86262d77b754989deda218b19d2585b79e
|
81736292fae21f0b45933d3d7f26eb78249e4d43
|
refs/heads/master
| 2021-03-31T09:12:59.436124
| 2020-05-02T01:54:18
| 2020-05-02T01:54:18
| 248,095,157
| 0
| 3
| null | 2020-04-21T14:52:48
| 2020-03-17T23:36:38
|
R
|
UTF-8
|
R
| false
| false
| 571
|
r
|
functions.R
|
# define a function that look beforehead if come across NA values
pre_val <- function(vector, location)
{
if (is.na(vector[location]))
{
return(pre_val(vector, location-1))
} else
{
return(vector[location])
}
}
# define a function that look afterhead if come across NA values
post_val <- function(vector, location)
{
if (!is.na(vector[location]))
{
return(vector[location])
} else if (location < length(vector))
{
return(post_val(vector, location+1))
} else
{
return(pre_val(vector, location-1))
}
}
|
0d7012308381ce3bc39946266a601e0d419b3ad6
|
e6b9765b89662b694b4767d0ad87120d23386d75
|
/man/write_firebase_functions.Rd
|
b92359fd885b236b0ae2bc0a0561bd00e3db2cd3
|
[
"MIT"
] |
permissive
|
axionbio/polished
|
0ff04ffa5c17ff3f29a89ae9afcef6ce52383b2e
|
591365abd5da3d51f90a8c49a0e678db6a49aa58
|
refs/heads/master
| 2021-02-25T08:28:41.820694
| 2020-02-22T00:03:27
| 2020-02-22T00:03:27
| 245,451,607
| 0
| 0
|
MIT
| 2020-03-06T15:17:24
| 2020-03-06T15:17:23
| null |
UTF-8
|
R
| false
| true
| 985
|
rd
|
write_firebase_functions.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_firebase_functions.R
\name{write_firebase_functions}
\alias{write_firebase_functions}
\title{write js file for polished Firebase Functions}
\usage{
write_firebase_functions(path = "functions/index.js", overwrite = TRUE)
}
\arguments{
\item{path}{"functions/index.js" by default. The file path of the created file.}
\item{overwrite}{TRUE by default. Should the existing file be overwritted.}
}
\description{
write js file for polished Firebase Functions
}
\details{
By default this function will create a "functions/index.js" file which
contains the Polished Firebase Functions. If you are using custom Firebase functions,
then change the `path` argument to something like "functions/polished.js", and make sure
to add `require(./polished)` in your "functions/index.js" file.
}
\examples{
# must make functions folder
write_firebase_functions()
write_firebase_functions("functions/my_file.js")
}
|
de7bf3a71c46ab000722608c5321dfac2e744cc7
|
b2a9136e6858e4cfe806c264fb2827ae73264620
|
/man/isUTF8.Rd
|
11e216b3f8e2df413b6a9ce18c4469a15dd24964
|
[] |
no_license
|
hetong007/pullword
|
2f8e46f10451c4f6614d2eedb1a504b74d306a18
|
cfbdda4d25260dccf46f907c9b7a6335e1aaae53
|
refs/heads/master
| 2021-07-17T21:40:04.922246
| 2021-07-13T07:46:29
| 2021-07-13T07:46:29
| 30,846,213
| 22
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 475
|
rd
|
isUTF8.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/isUTF8.R
\name{isUTF8}
\alias{isUTF8}
\title{Indicate whether the encoding of input string is UTF-8.}
\usage{
isUTF8(string, combine = FALSE)
}
\arguments{
\item{string}{A character vector.}
\item{combine}{Whether to combine all the strings.}
}
\value{
Logical value.
}
\description{
Indicate whether the encoding of input string is UTF-8.
}
\author{
Jian Li <\email{rweibo@sina.com}>
}
|
f53537ae9e8e5be73b9aa9391e939ab9b04fda8e
|
ab3b943abf724c52167bfeaaf65a196abd6461be
|
/data-raw/abs.R
|
576d93cbddd3033e1bf37c93b4a53d8054576cf3
|
[] |
no_license
|
mdsumner/ozmaps
|
97d7caa571d282e047bdba76063ee1c41c9e3978
|
1617235f0d189a903acc374976d34d4e55df4cdd
|
refs/heads/master
| 2021-08-01T02:02:22.624507
| 2021-07-25T11:42:46
| 2021-07-25T11:42:46
| 219,730,319
| 17
| 3
| null | 2021-04-08T00:21:59
| 2019-11-05T11:36:41
|
R
|
UTF-8
|
R
| false
| false
| 1,624
|
r
|
abs.R
|
## ozmaps.data version 0.0.1
##these are simplified versions of a core set, see ozmaps.data for details
f <- list.files("../ozmaps.data/data", pattern = "^abs.*\\.rda$", full.names = TRUE)
fs <- c(grep("ced",f, value = TRUE),
grep("lga",f, value = TRUE),
grep("ste",f,value = TRUE))
file.copy(fs,
"data/")
library(ozmaps)
fixup <- function(x) {
sf::st_set_geometry(x, structure(unname(sf::st_geometry(x)), class = c("sfc_MULTIPOLYGON", "sfc", "list" )))
}
abs_ced <- fixup(abs_ced)
abs_lga <- fixup(abs_lga)
abs_ste <- fixup(abs_ste)
fixup2 <- function(x) {
sf::st_set_crs(x, sf::st_crs(x))
}
abs_ced <- fixup2(abs_ced)
abs_lga <- fixup2(abs_lga)
abs_ste <- fixup2(abs_ste)
usethis::use_data(abs_ced, abs_lga, abs_ste, overwrite = TRUE, compress = "xz", version = 2)
# library(ozmaps)
# library(tibble)
# abs_ced <- sf::st_as_sf(as_tibble(abs_ced))
# abs_ced <- sf::st_as_sf(as_tibble(abs_ced))
# abs_gccsa <- sf::st_as_sf(as_tibble(abs_gccsa))
# abs_ireg <- sf::st_as_sf(as_tibble(abs_ireg))
# abs_lga <- sf::st_as_sf(as_tibble(abs_lga))
# abs_ra <- sf::st_as_sf(as_tibble(abs_ra))
# abs_sa2 <- sf::st_as_sf(as_tibble(abs_sa2))
# abs_sa3 <- sf::st_as_sf(as_tibble(abs_sa3))
# abs_sa4 <- sf::st_as_sf(as_tibble(abs_sa4))
# abs_sed <- sf::st_as_sf(as_tibble(abs_sed))
# abs_ste <- sf::st_as_sf(as_tibble(abs_ste))
# ozmap_country <- sf::st_as_sf(as_tibble(ozmap_country))
# ozmap_states <- sf::st_as_sf(as_tibble(ozmap_states))
#
# usethis::use_data(abs_ced, abs_gccsa, abs_ireg, abs_lga, abs_ra, abs_sa2, abs_sa3, abs_sa4, abs_sed, abs_ste, ozmap_country, ozmap_states, overwrite = TRUE)
|
0f055728a415f5bcad24dd0a1e7e1fd91a64e74a
|
6057615fbf3dae016ddf7e34b93b9a8b8ccfdfb2
|
/Data-project-2/ui.R
|
3362f8e9ac3a3570015288824b3a964ea8642da1
|
[] |
no_license
|
kerrmcintosh/acme_inc_shiny_app
|
572a8d37734a6bbe645b4b616e2d4e55b0eb2f74
|
2fbeff81ed1852fd68c625534edc38c0f8e61bd3
|
refs/heads/master
| 2022-12-23T21:57:16.867135
| 2020-09-22T20:49:51
| 2020-09-22T20:49:51
| 295,159,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,496
|
r
|
ui.R
|
# UI section
ui <- fluidPage( tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "style.css")
),
theme = shinytheme("simplex"),
navbarPage(
title = div(img(src="codeclanlogo.jpeg", id = "logo"), "An Analysis of Acme Inc's Website Traffic")),
tabsetPanel(
# Tab 1
tabPanel("About", div(class = "separator"),
fluidRow(
column(6, h4("Brief"),
div(class = "separator"),
tags$p("We were asked to define catchment areas for each of Acme Inc's regional sales outlets
and assign web traffic in Scotland to the correct catchment. We were then to create
visualisations of Acme Inc website performance,comparing the three catchment areas Edinburgh, Glasgow and Inverness."),
div(class = "separator"),
h4("Planning"),
div(class = "separator"),div(class = "separator"),
tags$p("To define the catchment area we decide to consolidate Scotlands counties
by distance from Acme Inc's 3 Scottish outlets. This split the country up well with the
North of Scotland assign to Inverness, the West to Glasgow and the East to Edinburgh."),
div(class = "separator"),
tags$p("We defined website performance by multiple aspects. Firstly the amount of
users and sessions the website receives. Secondly the websites traffic and the its
source. Finally the completion of goals 2, 9 and 11 which are course application submissions."),
div(class = "separator"),
h4("Execution"),
div(class = "separator"),
tags$p("This dashboard displays various visualisation of the different aspects of the website
performance of the three defined catchment areas and have a brief description of
what they show."),
div(class = "separator"),
tags$p("The map to the left shows the three defined catchment area with the total number of
users and sessions it has received. It also shows the distribution of the users and
sessions throughout Scotland. ")),
column(6, leafletOutput("scotland_leaflet", height = 800, width = 700)
)
)
# End Tab 1
),
# Tab2
tabPanel("Overall Site Traffic", div(class = "separator"),
fluidRow(column(9,
tags$p(class = "indent", "This page summarises total website traffic split in to Acme Inc's
3 Scottish Regions: Edinburgh, Glasgow and Inverness. The data was taken from
Google Analytics. Where the catchment is given as Scotland uncategorised, no locational
was data was provided by Google Analytics other than that the user was in Scotland.
Source Mediums (facebook, organic searches, blog posts, etc) have been grouped together
in to categories."),
tags$div(class="header_container",
tags$div(class ="div_in_topr",
sliderInput("date_tab2",
tags$b("Select time period to view"),
min = min(ai_ga_data_all$yearMonth), max = max(ai_ga_data_all$yearMonth),
value = c(min(ai_ga_data_all$yearMonth), max(ai_ga_data_all$yearMonth)),timeFormat="%Y-%m")),
tags$div(class ="div_in_topr",
radioButtons("usersesh",
tags$b("How would you like to view traffic?"),
choices = c("users", "sessions"))
)),
div(id = "separator"),
fluidRow(
tags$div(class="st_container",
tags$div(class="center",
tags$div(id = "separator"),
tags$div(class="div_in",
tags$b("Edinburgh"),
tableOutput("ed_users")),
tags$div(class="div_in",
tags$b("Glasgow"),
tableOutput("gl_users")),
tags$div(class="div_in",
tags$b("Inverness"),
tableOutput("iv_users")),
tags$br(class="clearBoth" )))),
div(class = "separator"),
plotOutput("total_plot")
)
)
# End Tab 2
),
# Tab 3
tabPanel("Site Traffic by Catchment and Source", div(class = "separator"),
fluidRow(column(11,
tags$p(class = "indent", "This page analyses how traffic came to the Acme Inc's website for Acme Inc's 3 Scottish Regions: Edinburgh, Glasgow and Inverness.
The data was taken from Google Analytics. Where the catchment is given as Scotland uncategorised, no locational data was provided by
Google Analytics other than that the user was in Scotland. Source Mediums (facebook, organic searches, blog posts, etc) have been
grouped together in to categories.")),
column(9,
tags$div(class="header_container",
tags$div(class ="div_in_topr",
sliderInput("date_tab3",
tags$b("Select time period to view"),
min = min(ai_ga_data_all$yearMonth), max = max(ai_ga_data_all$yearMonth),
value = c(min(ai_ga_data_all$yearMonth), max(ai_ga_data_all$yearMonth)),
timeFormat="%Y-%m")),
tags$div(class ="div_in_topr",
selectInput("medium",
tags$b("Select the medium by which user came to the Acme Inc website"),
choices = sort(unique(ai_source_regrouped$ai_source)),
selected = "Social Media")
)))),
fluidRow(
column(9,
tags$div(class ="plot_cont",
plotOutput("source_bar_plot")),
tags$div(class ="plot_cont",
plotOutput("source_plot")),
fluidRow(
tags$div(class ="container_tab3",
tags$div(class="center",
tags$div(class="div_in",
tags$b("Top 5 Performing GA Campaigns", tags$br(), "for Edinburgh"),
tableOutput("medium_campaign_ed")),
tags$div(class="div_in_topl",
tags$b("Top 5 Performing GA Campaigns", tags$br(), "for Glasgow"),
tableOutput("medium_campaign_gl")),
tags$div(class="div_in_topl",
tags$b("Top 5 Performing GA Campaigns", tags$br(), "for Inverness"),
tableOutput("medium_campaign_iv")))
))),
column(3,
tags$div(class = "side_container_blue",
tags$div(class = "side_center",
tags$b(textOutput("traf_med")),
tableOutput("grp_traf"))),
tags$div(class = "side_container_pink",
tags$div(class = "side_center",
tags$b(textOutput("ed_traf_med")),
tableOutput("medium_detail_ed"),
tags$b(textOutput("gl_traf_med")),
tableOutput("medium_detail_gl"),
tags$b(textOutput("iv_traf_med")),
tableOutput("medium_detail_iv"))
)
))
),
# Tab 4
tabPanel("Goal Completions by Catchment", div(class = "separator"),
fluidRow(column(11,
tags$p(class = "indent", "This page looks at the goal conversions defined in google analytics with
regard to website traffic from Scottish catchments")),
column(9,
tags$div(class="header_container",
tags$div(class ="div_in_topr",
sliderInput("date_tab4",
tags$b("Select time period to view"),
min = min(ai_ga_data_all$yearMonth), max = max(ai_ga_data_all$yearMonth),
value = c(min(ai_ga_data_all$yearMonth), max(ai_ga_data_all$yearMonth)),
timeFormat="%Y-%m")),
tags$div(class ="div_in_topr",
selectInput("goal",
tags$b("Select the goal to view"),
choices = c("Info Requested", "Appointment Booked", "Confirmed Sale", "All Goals"))),
))),
fluidRow(column(9,
tags$div(class ="container_tab4",
tags$div(class="center",
tags$div(class="div_in",
tags$b("GA Goal Conversions"),
tableOutput("table_conv")))),
tags$div(class ="plot_cont",
plotOutput("conv_plot"))
)
)
)
),
div(class = "separator"),
tags$footer(class = "footer_text", h5("Produced by CodeClan Group 2"),
tags$div(class = "separator"))
)
|
a1ff681b7d343854bf9944bdbca43e3e5344b489
|
53757063ca956fbdbfe922bfdb7d256d28d98d31
|
/shiny_app/server.R
|
60511ab9cc86639f532aa4f4054d86f18fb1b78c
|
[] |
no_license
|
rachai/shiny_dashboard_template_with_modules
|
f579d6bf9b665d77943fc6876745101a7d01d33f
|
6bb358a9a7eb3c57b4022b1d92a96793145bd27e
|
refs/heads/master
| 2022-11-29T19:03:25.299731
| 2020-07-28T00:26:59
| 2020-07-28T00:26:59
| 282,938,822
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,137
|
r
|
server.R
|
# Define server logic
server <- function(input, output, session) {
#Switch from panel 1 to panel 2 when button in panel 1 is clicked
observeEvent(input$panel1_to_panel2, {
updateTabsetPanel(session, "tabset1", selected = "Panel 2")
})
#Switch from panel 2 to panel 1 when button in panel 2 is clicked
observeEvent(input$panel2_to_panel1, {
updateTabsetPanel(session, "tabset1", selected = "Panel 1")
})
#Switch from panel 3 to panel 4 when button in panel 3 is clicked
observeEvent(input$panel3_to_panel4, {
updateTabsetPanel(session, "tabset2", selected = "Panel 4")
})
#Switch from panel 4 to panel 3 when button in panel 4 is clicked
observeEvent(input$panel4_to_panel3, {
updateTabsetPanel(session, "tabset2", selected = "Panel 3")
})
#Server code for panel 1 module
callModule(tab1_item1_server, "tab1_item1")
#Server code for panel 1 module
callModule(tab1_item2_server, "tab1_item2")
#Server code for panel 1 module
callModule(tab2_item1_server, "tab2_item2")
#Server code for panel 1 module
callModule(tab2_item2_server, "tab2_item2")
}
|
8e797dbbb60284f8fe017664a2a0e6f1c69dbc15
|
2934216aaa8f90053f7bb50963893799182bb7fb
|
/datastructure.R
|
57c215ce281f8fa6ca2dc6b385550f6075f120b6
|
[] |
no_license
|
vijaymv/analytics1
|
67917b9d460385e7f51d90e75c4cba1e37c9784f
|
a52caec8f80e6456ac599ee0376bbebd8121b47d
|
refs/heads/master
| 2020-04-02T16:32:45.957750
| 2018-10-29T16:32:54
| 2018-10-29T16:32:54
| 154,617,168
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
r
|
datastructure.R
|
#datastructure
#vectors
x = 1:10
x
x1 = 1:20
x1
(x1=1:30)
(x2=(c(1,4,5,5)))
x2
(x3=letters[1:10]) # to print a to j
class(x3)
LETTERS[1:26] # TO PRINT A TO Z using LETTERS FUNCTION
(x3b = c('a',"dhiraj","4")) # cannot mix data types in vectors. output will be all charactor
class(x3b)
(x4=c(T,FALSE,TRUE,T,F))
class(x4)
x2b = c(2L, 3L, 4L) #to get numeric vector not integer
x2b
LETTERS[seq(1,26,2)]
# access elements
(x6 = seq(0,100,by=3))
length(x6) #to find the number of elements in the variable
x6[3] # access the 3rd value
x6
x6[seq(1, length(x6),2)]
x6[-1] # access al but 1st element
rev(x6) # to print reverse
x6[c(2.4,3.54)] # real number truncation
x6[-c(1,5,20)]
x7 = c(x6,x2)
x7
(x6 = sample(1:20)) # to get random samples
sort(x6[-c(1,2)])
set.seed(12)
(x6 = sample(1:20))
sort(x6[-c(2,4)])
(x = -3:2)
x[2]= 10 # modify second element
x
x = 1:50
x< 5
x[x<4 | x>6] # to ge values less than 4 and greater than 6
x[x<4 | x>6] = 100
x
###################### matrix #######################
(m1 = matrix(100:111, nrow =4))
(m2 = matrix(100:111, ncol =3))
m3 = matrix(1:50, ncol=6)
class(m3)
attributes(m3)
m3 = matrix(x,ncol=6)
m1
m1[1,] # 1st row
m1[1,2:3]
m1[c(1,3)] # 1st and 3rd element of 1st column
paste("c","d",sep="-") # to concatinate two chars with -
(colnames(m1) = paste('c',1:3, sep='')) # to give name to the columns
m1
(rownames(m1) = paste('a',1:4,sep =' '))
m1
colSums(m1) # to ge the sum of columns
colMeans(m1) #to ge the mean of columns
colMeans(m1) ;rowMeans(m1) # to print both
attributes(m1)
t(m1) # transpose
m1
sweep(m1, MARGIN = 1, STATS = c(2,3,4,5), FUN="+")
sweep(m1, MARGIN = 2, STATS = c(2,3,4), FUN="+") # ROWWISE
m1
addmargins(m1,margin = 2,sum) #add column wise
m1
addmargins(m1,1,mean) # to get the value at centre
cbind(m1,rowSums(m1)) # to add columnwise
m1
round(addmargins(m1,1,sd),2) #colwise functn
addmargins(m1,c(1,2),mean) # row and col wise functn
addmargins(m1,c(1,2),list(list(mean,sum,max), list(var,sd)))
|
a3285917d830879ccd65d36f8461975a2c2c5c4f
|
8b91fbae2914335c30e7959e6fd76c614d53b73e
|
/commodityEDA-dashboard1/overview.R
|
7941611282a0fbfbb4cadf38a8441e39287b58dc
|
[] |
no_license
|
erichseamon/shiny-apps-agmesh
|
20126baf672eb365b83b2f0b559c9c99d0c7da61
|
9df635ddb47571df64bef428faadcfab632532c8
|
refs/heads/master
| 2021-03-22T04:49:22.412404
| 2020-01-16T22:32:40
| 2020-01-16T22:32:40
| 83,274,088
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 981
|
r
|
overview.R
|
function(){
conditionalPanel(condition="input.conditionedPanels==1",
tabPanel("Help", value = 2, id="conditionedPanels",
HTML('
<h3><p><strong>DMINE Agriculture Dashboard: Insurance Crop Claim State Frequency</strong><br/></h3>
</p>')),
HTML('
<p style="text-align:justify">The Regression and Modeling Analysis Dashboard gives a general overview of a dataset, with pairwise correlation results, regression analysis, as well as some other predictive modeling techniques (decision tree, neural networks). These analytics are operating on a pre-constructed dataset of insurance claim records, summarized by county and year, for the palouse region of Idaho, Washington, and Oregon - from 2007-2015. Climate data were associated with each summarized record, using a algorithm to match up previous climate data with each record. For more info on this methodology, please see our DMINE methods page. </p>'),
value="about"
)
}
|
44f04b2d8724d69364a7b012a55489644ed64dcd
|
ae11641f955bb54dbab5fe1607326d494e7c3058
|
/R/startup.R
|
890dd916cf6053bfe62536296d7141ec0a935a42
|
[] |
no_license
|
asrenninger/tinkering
|
b226bded8530a8bc540bfc567c40ead74a9b1d0b
|
4c216bc20ce7e64fb082aedde4d05bff2bbe35bc
|
refs/heads/master
| 2023-08-17T17:13:30.340732
| 2023-08-14T20:35:29
| 2023-08-14T20:35:29
| 199,517,783
| 6
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,175
|
r
|
startup.R
|
########################################
## Unicorns
########################################
## packages
library(tidyverse)
library(sf)
library(rvest)
library(ggmap)
library(ggshadow)
library(ggnewscale)
## unicorns
wiki <- read_html("https://en.wikipedia.org/wiki/List_of_unicorn_startup_companies")
elem <- html_nodes(wiki,"#mw-content-text > div.mw-parser-output > table:nth-child(17)")
unicorns <-
rvest::html_table(elem) %>%
magrittr::extract2(1) %>%
tibble::as_tibble() %>%
janitor::clean_names()
links <-
reduce(map(1:nrow(unicorns), function(x){
rvest::html_node(elem, glue("tbody > tr:nth-child({x+1}) > td > a")) %>%
rvest::html_attr(name = "href") }
),
c)
unicorns_linked <-
unicorns %>%
mutate(reference = links) %>%
mutate(link = case_when(str_sub(reference, 1, 1) == "/" ~ str_c("https://en.wikipedia.org", reference),
TRUE ~ reference)) %>%
mutate(link = case_when(str_detect(link, "redlink") ~ "none",
TRUE ~ link)) %>%
mutate(link = na_if(link, "none")) %>%
drop_na()
unicorns_filtered <- filter(unicorns_linked, str_detect(link, "wikipedia"))
unicorns_info <-
map_df(1:nrow(unicorns_filtered), possibly(function(x){
link <- unicorns_filtered$link[x]
wiki <- read_html(link)
elem <- rvest::html_nodes(wiki,"#mw-content-text > div.mw-parser-output > table.infobox.vcard")
tibl <-
elem %>%
rvest::html_table() %>%
magrittr::extract2(1) %>%
as_tibble() %>%
set_names(c("field", "value")) %>%
mutate(company = unicorns_filtered$company[x])
return(tibl)
},
tibble("field" = NA, "value" = NA, company = unicorns_filtered$company[x])
)
)
unicorns_geocoded <-
unicorns_info %>%
filter(field == "Headquarters") %>%
select(-field) %>%
rename(headquarters = value) %>%
mutate_geocode(location = headquarters, source = "google", output = "latlon")
unicorns_geocoded %>% drop_na(lon, lat) %>% st_as_sf(coords = c("lon", "lat"), crs = 4326) %>% mapview::mapview()
## decacorns
wiki <- read_html("https://en.wikipedia.org/wiki/List_of_unicorn_startup_companies")
elem <- rvest::html_nodes(wiki,"#mw-content-text > div.mw-parser-output > table:nth-child(20)")
decacorns <-
rvest::html_table(elem) %>%
magrittr::extract2(1) %>%
tibble::as_tibble() %>%
janitor::clean_names()
links <-
reduce(map(1:nrow(decacorns), function(x){
rvest::html_node(elem, glue("tbody > tr:nth-child({x+1}) > td > a")) %>%
rvest::html_attr(name = "href") }
),
c)
decacorns_linked <-
decacorns %>%
mutate(reference = links) %>%
mutate(link = case_when(str_sub(reference, 1, 1) == "/" ~ str_c("https://en.wikipedia.org", reference),
TRUE ~ reference)) %>%
mutate(link = case_when(str_detect(link, "redlink") ~ "none",
TRUE ~ link)) %>%
mutate(link = na_if(link, "none")) %>%
drop_na()
decacorns_filtered <- filter(decacorns_linked, str_detect(link, "wikipedia"))
decacorns_info <-
map_df(1:nrow(decacorns_filtered), possibly(function(x){
link <- decacorns_filtered$link[x]
wiki <- read_html(link)
elem <- rvest::html_nodes(wiki,"#mw-content-text > div.mw-parser-output > table.infobox.vcard")
tibl <-
elem %>%
rvest::html_table() %>%
magrittr::extract2(1) %>%
as_tibble() %>%
set_names(c("field", "value")) %>%
mutate(company = decacorns_filtered$company[x])
return(tibl)
},
tibble("field" = NA, "value" = NA, company = decacorns_filtered$company[x])
)
)
unicorns_geocoded <-
unicorns_info %>%
filter(field == "Headquarters") %>%
mutate(value = str_remove_all(value, "\\n")) %>%
mutate(value = str_remove_all(value, "\\[.*?\\]")) %>%
mutate(value = str_remove_all(value, "\\(.*")) %>%
mutate(value = str_remove_all(value, "and.*")) %>%
mutate(value = str_remove_all(value, ".mw.*")) %>%
select(-field) %>%
rename(city = value) %>%
mutate(headquarters = glue("{company}, {city}")) %>%
mutate_geocode(location = headquarters, source = "google", output = "latlon")
decacorns_geocoded <-
decacorns_info %>%
filter(field == "Headquarters") %>%
mutate(value = str_remove_all(value, "\\n")) %>%
mutate(value = str_remove_all(value, "\\[.*?\\]")) %>%
mutate(value = str_remove_all(value, "\\(.*")) %>%
mutate(value = str_remove_all(value, "and.*")) %>%
select(-field) %>%
rename(city = value) %>%
mutate(headquarters = glue("{company}, {city}")) %>%
mutate_geocode(location = headquarters, source = "google", output = "latlon")
bind_rows(decacorns_geocoded, unicorns_geocoded) %>% write_csv("startups.csv")
info <-
bind_rows(unicorns %>%
transmute(company, valuation = valuation_us_billion),
decacorns %>%
transmute(company, valuation = last_valuation_us_b))
## plot it
theme_bm_legend <- function () {
theme_void() +
theme(plot.background = element_rect(fill = 'black', colour = 'black'),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
axis.line.x = element_blank(),
axis.line.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
legend.title = element_text(colour = 'grey50'),
legend.text = element_text(colour = 'white'),
plot.title = element_text(face = 'bold', colour = 'grey50'),
plot.subtitle = element_text(face = 'plain', colour = 'white', size = 15),
panel.grid.major = element_line(size = NA),
panel.grid.minor = element_line(size = NA),
legend.position = 'bottom',
plot.margin = margin(10, 10, 10, 10),
)
}
bind_rows(decacorns_geocoded, unicorns_geocoded) %>%
drop_na(lon, lat) %>%
left_join(info) %>%
st_as_sf(coords = c("lon", "lat"), crs = 4326) %>%
st_transform("+proj=robin +lon_0=0 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m no_defs") %>%
ggplot() +
geom_sf(data = coastline,
aes(),
colour = '#c7c7c7', size = 0.1, linetype = 3) +
geom_glowpoint(aes(geometry = geometry, size = parse_number(valuation)),
alpha = .8,
color = "#6bb857",
shadowcolour = "#0062ff",
shadowalpha = .1,
stat = "sf_coordinates",
show.legend = FALSE) +
scale_size(range = c(.1, 1.5)) +
new_scale("size") +
geom_glowpoint(aes(geometry = geometry, size = parse_number(valuation)),
alpha = .6,
shadowalpha = .05,
color = "#ffffff",
stat = "sf_coordinates",
show.legend = FALSE) +
scale_size(range = c(0.1, 0.7)) +
labs(title = 'Technology \"Unicorns\"',
subtitle = "Exited or valued above $1 billion") +
theme_bm_legend() +
ggsave(filename = "startups.png", height = 6, width = 10.37, dpi = 300)
|
c064bb5a92ea2e7ae0b16022a6be6684fed2a6ce
|
e3d8bbcc3424296f5d3e0cc3b516b31173776c2f
|
/R/Day 4 - Hazard rate actual vs expected & Exposure over time.R
|
e533301b1cce50a211d88917b7c38027127f3715
|
[] |
no_license
|
gbisschoff/grad-training
|
5ed911f5d8e14ff02caf34e57f688aa4b132ef07
|
586db2156ee787841c109b1668d750c981f11535
|
refs/heads/master
| 2021-05-14T11:43:35.395413
| 2018-01-24T13:10:56
| 2018-01-24T13:10:56
| 116,390,664
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,215
|
r
|
Day 4 - Hazard rate actual vs expected & Exposure over time.R
|
library(tidyverse)
library(sparklyr)
spark_home_set("C:/Spark/spark-2.2.1-bin-hadoop2.7")
sc<-spark_connect(master="local") # Create a connection to spark
data<-spark_read_csv(sc,"loans_data","Data/tranition_data.csv",memory = FALSE)
# Exposure over time
data%>%
group_by(age)%>%
summarise(
contractual_total=sum(balance_contractual),
actual_total=sum(balance_actual)
)%>%
collect()%>%
plotly::plot_ly(x=~age)%>%
plotly::add_lines(y=~contractual_total,name="Contractual")%>%
plotly::add_lines(y=~actual_total,name="Actual")
# Hazard Rate calculation
hazard<-data%>%
arrange(id,age)%>%
group_by(id)%>%
mutate(lead_flag=lead(made_payment,1))%>%
filter(!is.na(lead_flag) & made_payment==1)%>%
group_by(age,made_payment,lead_flag)%>%
summarise(
total=sum(balance_actual),
n=n()
)%>%
mutate(
prop=total/sum(total),
prop.n=n/sum(n)
)%>%
filter(lead_flag==0)%>%
collect()
#Actual vs Expected
PD<-dlnorm(seq(0.05,3,by = 0.05), meanlog = 0, sdlog = 1, log = FALSE)/6
hazard%>%
plotly::plot_ly()%>%
plotly::add_markers(
x=~age,
y=~prop,
name="Simulated"
)%>%
plotly::add_lines(
x=1:60,
y=~PD,
name="Actual"
)
|
07f1d4df1dab3d57562aaf607e4f982a305fd517
|
29477df2cfc3c0477b3d11e2ebb46c7930994565
|
/spark.R
|
f82ae499a82d545fb75f10ff2727a17bff597158
|
[] |
no_license
|
vikram-rawat/handsOnMachineLearning
|
5f9f0359e0b105d5680696389bf4a6542dc0373a
|
bf5772f896db28e1f904516445a64503d89469b7
|
refs/heads/master
| 2021-07-25T01:58:02.358255
| 2021-01-28T15:15:41
| 2021-01-28T15:15:41
| 240,430,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,925
|
r
|
spark.R
|
# load libraries ----------------------------------------------------------
library("sparklyr")
library("data.table")
library("magrittr")
library("ggplot2")
library("DBI")
library("dplyr")
library("arrow")
library("inspectdf")
library("plotluck")
library("skimr")
library("ggfortify")
library("dbplot")
library("modeldb")
library("corrr")
# set defaults ------------------------------------------------------------
setDTthreads(0L)
theme_set(theme_bw())
# use Spark ---------------------------------------------------------------
spark <- spark_connect(master = "local",
version = "2.4.5")
# sc <- spark_connect(
# master = "local",
# version = "2.4.4",
# config = list(sparklyr.gateway.address = "127.0.0.1")
# )
# getOption('timeout')
# options(timeout = 1e5L)
# options(download.file.method = "curl")
# options(download.file.method = "libcurl")
# options(download.file.mode = "a")
# spark_versions()
# spark_available_versions()
# spark_installed_versions()
# spark_uninstall(version = "2.4.4", hadoop_version = "2.7")
# spark_install(
# version = "2.4.5",
# hadoop_version = "2.7",
# verbose = TRUE,
# reset = TRUE,
# logging = TRUE
# )
# spark_uninstall(version = "3.0.0-preview",hadoop_version = "3.2")
# spark_install(version = "3.0.0-preview",hadoop_version = "3.2")
# spark_web(spark)
cars <- copy_to(spark, mtcars,overwrite = TRUE)
## use SQL Directly
spark %>%
dbGetQuery("select
gear,
am,
vs,
carb,
count(*)
from mtcars
group by
gear,
am,
vs,
carb")
## Use Dplyr Directly
cars %>%
select(hp, mpg) %>%
collect() %>%
plotluck(hp ~ mpg,
opts = plotluck.options(
verbose = TRUE
)
)
model <- ml_linear_regression(cars, mpg ~ hp)
model %>%
summary()
model %>%
ml_predict(
copy_to(spark,
data.frame(hp = 250 + 10 * 1:10)
)
) %>%
transmute(hp = hp, mpg = prediction) %>%
full_join(select(cars, hp, mpg)) %>%
collect() %>%
plotluck(hp ~ mpg)
# spark_write_csv(x = cars,
# path = "folder/cars.csv",
# header = TRUE,
# delimiter = ",")
# stream <- stream_read_csv(spark, "input/") %>%
# select(mpg, cyl, disp) %>%
# stream_write_csv("output/")
# stream_stop(stream)
spark_log(spark)
summarize_all(cars, mean) %>%
show_query()
cars %>%
mutate( transmition =
if_else(am == 0, "automatic", "manual")
) %>%
group_by(transmition) %>%
summarise_all(mean)
cars %>%
summarise(mpg_percentile = percentile(mpg, 0.25)) %>%
show_query()
cars %>%
summarise(mpg_percentile = sum(mpg)) %>%
show_query()
cars %>%
summarise(mpg_percentile =
percentile(mpg,
array(0.25, 0.5, 0.75)
)
) %>%
collect()
summarise(cars, mpg_percentile =
percentile(mpg,
array(0.25, 0.5, 0.75)
)
) %>%
mutate(mpg_percentile = explode(mpg_percentile))
ml_corr(cars)
correlate(cars,
use = "pairwise.complete.obs",
method = "pearson") %>%
shave() %>%
rplot()
ggplot(aes(as.factor(cyl), mpg), data = mtcars) + geom_col()
car_group <- cars %>%
group_by(cyl) %>%
summarise(mpg = sum(mpg, na.rm = TRUE)) %>%
collect() %>%
print()
cars %>%
dbplot_histogram(mpg, binwidth = 3) +
labs(title = "MPG Distribution",
subtitle = "Histogram over miles per gallon")
dbplot_raster(cars, mpg, wt, resolution = 16)
cached_cars <- cars %>%
mutate(cyl = paste0("cyl_", cyl)) %>%
compute("cached_cars")
# spark_disconnect(spark)
# spark_disconnect(sc)
# download.file(
# "https://github.com/r-spark/okcupid/raw/master/profiles.csv.zip",
# "okcupid.zip")
#
# unzip("okcupid.zip", exdir = "data")
# unlink("okcupid.zip")
|
4088802c4b73d348b4602e5cd09171292160f8ff
|
be9957269371581153afc5a6d6fe36e6e38bdddb
|
/Master_do_files/lmb_1.R
|
b44f09be24c8111eab8754687979624dfcdedb9c
|
[
"MIT"
] |
permissive
|
snowdj/causal-inference-class
|
a0250f94772e37da40f2e47c9e53284b5cbe84d2
|
14171fd74340d748db6426a2c8124f7d7ad3bc8b
|
refs/heads/master
| 2023-02-05T13:11:52.904853
| 2020-12-24T05:35:37
| 2020-12-24T05:35:37
| 279,981,060
| 0
| 0
|
MIT
| 2020-12-24T05:35:38
| 2020-07-15T21:17:16
| null |
UTF-8
|
R
| false
| false
| 618
|
r
|
lmb_1.R
|
library(tidyverse)
library(haven)
library(estimatr)
read_data <- function(df)
{
full_path <- paste("https://raw.github.com/scunning1975/mixtape/master/",
df, sep = "")
df <- read_dta(full_path)
return(df)
}
lmb_data <- read_data("lmb-data.dta")
lmb_subset <- lmb_data %>%
filter(lagdemvoteshare>.48 & lagdemvoteshare<.52)
lm_1 <- lm_robust(score ~ lagdemocrat, data = lmb_subset, clusters = id)
lm_2 <- lm_robust(score ~ democrat, data = lmb_subset, clusters = id)
lm_3 <- lm_robust(democrat ~ lagdemocrat, data = lmb_subset, clusters = id)
summary(lm_1)
summary(lm_2)
summary(lm_3)
|
a48840eb190a5d17981d607021fb559a824d6211
|
7e5dfa53a52b3aad29a22ebb12916a41a0e62e6b
|
/R/summary.ccc.R
|
a6a7dde376741f4bc45e6890425db74cf39d4192
|
[] |
no_license
|
cran/cccrm
|
3a12b3fd0f834c302006e691d37547d9d41b9f79
|
302e8dc1f85494f28ac3b39c52e5c2f3de9c0136
|
refs/heads/master
| 2022-09-14T18:01:05.004119
| 2022-08-31T16:30:02
| 2022-08-31T16:30:02
| 17,695,003
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 151
|
r
|
summary.ccc.R
|
summary.ccc<-
function(object,...){
print(object$model)
cat("\n")
cat("CCC estimated by variance compoments \n")
print(object$ccc[1:4])
}
|
2cbed670ed30770eddf9c5e3389ac4c3bed832a8
|
9695c4dedf245c782990038a6172296dd02499c4
|
/public/scripts/temp/my_Rscript.luo.R
|
d02e0fe7dee41302f9512772035922a7771de277
|
[] |
no_license
|
gauravp99/pathviewdev
|
e8f66a0e1a48843b7f9a8b610d7216e8160d3e97
|
9bd69a14e1af33b20a00c3fbf1dd2861c57610b5
|
refs/heads/master
| 2021-01-22T02:18:24.926244
| 2017-05-17T14:40:28
| 2017-05-17T14:40:28
| 92,350,733
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,247
|
r
|
my_Rscript.luo.R
|
#to get the arguments readable in R code
args <- commandArgs(TRUE)
# @arguments are split on comma as they are passed to Rscript with comma separated
library(pathview)
arg.v = strsplit(args[1],split=";|:")[[1]]
idx=seq(1, length(arg.v), by=2)
args1=arg.v[idx+1]
names(args1)=arg.v[idx]
logic.idx=c("kegg", "layer", "split", "expand", "multistate", "matchd", "gdisc", "cdisc")
num.idx=c("offset", "glmt", "gbins", "clmt", "cbins", "pathidx")
#num.idx=c("offset", "gbins", "cbins", "pathidx")
cn.idx=c("generef", "genesamp", "cpdref", "cpdsamp")
#args2=as.list(args1)
args2=strsplit(args1, ",")
args2[logic.idx]=lapply(args2[logic.idx],as.logical)
args2[num.idx]=lapply(args2[num.idx],as.numeric)
args2[cn.idx]=lapply(args2[cn.idx], function(x){
if(length(x)==0) return(NULL)
if(x[1]=="NULL") return(NULL)
else return(as.numeric(x))
})
#pvwdir = Sys.getenv("pvwdir")
pvwdir = paste0(getwd(), "/public/")
setwd(args2$targedir)
save.image("workenv.RData")
#path.ids = strsplit(args2$pathway,split=";")[[1]]
#args2$glmt = as.numeric(strsplit(args2$glmt,split=";")[[1]])
#args2$clmt = as.numeric(strsplit(args2$clmt,split=";")[[1]])
args2$cpdid=tolower(args2$cpdid)
#setwd(args2$targedir)
zz <- file("errorFile.Rout", open = "wt")
sink(zz,type = "message")
if(!is.null(args2$geneextension) && length(args2$geneextension) > 0){
if(args2$geneextension == "txt"){
a=read.delim(args2$filename, sep="\t")
} else if(args2$geneextension == "csv"){
a=read.delim(args2$filename, sep=",")
} else stop(paste(args2$geneextension, ": unsupported gene data file type!"), sep="")
if(ncol(a)>1){
gene.d=as.matrix(a[,-1])
if(!is.null(args2$generef[1])){
ngsamp=length(args2$genesamp)
ngref=length(args2$generef)
if(args2$genecompare=="paired" & ngsamp==ngref) gene.d=gene.d[,args2$genesamp]- gene.d[,args2$generef]
else if (ngref==1) gene.d=gene.d[,args2$genesamp]- gene.d[,args2$generef]
else gene.d=gene.d[,args2$genesamp]- rowMeans(gene.d[,args2$generef])
}
gene.d=cbind(gene.d)
rownames(gene.d)=make.unique(as.character(a[,1]))
} else if(ncol(a)==1) {
a=as.matrix(a)
gene.d=a[,1]
if(is.null(names(gene.d))) gene.d=as.character(gene.d)
} else stop("Empty gene data file!")
} else gene.d=NULL
if(!is.null(args2$cpdextension) && length(args2$cpdextension) > 0){
if(args2$cpdextension == "txt"){
a1=read.delim(args2$cfilename, sep="\t")
} else if(args2$cpdextension == "csv"){
a1=read.delim(args2$cfilename, sep=",")
} else stop(paste(args2$cpdextension, ": unsupported compound data file type!"), sep="")
if(ncol(a1)>1){
cpd.d=as.matrix(a1[,-1])
if(!is.null(args2$cpdref[1])){
ncsamp=length(args2$cpdsamp)
ncref=length(args2$cpdref)
if(args2$cpdcompare=="paired" & ncsamp==ncref) cpd.d=cpd.d[,args2$cpdsamp]- cpd.d[,args2$cpdref]
else if (ncref==1) cpd.d=cpd.d[,args2$cpdsamp]- cpd.d[,args2$cpdref]
else cpd.d=cpd.d[,args2$cpdsamp]- rowMeans(cpd.d[,args2$cpdref])
}
cpd.d=cbind(cpd.d)
rownames(cpd.d)=make.unique(as.character(a1[,1]))
} else if(ncol(a1)==1) {
a1=as.matrix(a1)
cpd.d=a1[,1]
if(is.null(names(cpd.d))) cpd.d=as.character(cpd.d)
} else stop("Empty compound data file!")
} else cpd.d=NULL
# code removed for static folder location 23 indicates the file name /public/a;;/uniq identification number
kegg.dir=paste(substr(getwd(),1,nchar(getwd())-23),paste("/Kegg/", args2$species, sep=""),sep="")
#if (!dir.exists(kegg.dir)) dir.create(kegg.dir)
system(paste("mkdir -p", kegg.dir))
save.image("workenv.RData")
source(paste(pvwdir,"scripts/kg.map.R",sep=""))
kg.map(args2$species)
kg.cmap()
gm.fname=paste0(mmap.dir1, args2$species, ".gene.RData")
cm.fname=paste0(mmap.dir1, "cpd", ".RData")
load(gm.fname)
load(cm.fname)
path.ids=args2$pathway
pv.run=sapply(path.ids, function(pid){
pv.out <- try(pathview(gene.data = gene.d,gene.idtype = args2$geneid,cpd.data = cpd.d,cpd.idtype=args2$cpdid, pathway.id = pid,species = args2$species,out.suffix = args2$suffix,kegg.native = args2$kegg, sign.pos =args2$pos,same.layer = args2$layer,keys.align = args2$align,split.group = args2$split,expand.node = args2$expand,multi.state=args2$multistate, match.data = args2$matchd ,node.sum=args2$nsum,key.pos = args2$kpos,cpd.lab.offset= args2$offset,limit = list(gene = args2$glmt, cpd = args2$clmt), bins = list(gene = args2$gbins, cpd= args2$cbins),low = list(gene = args2$glow, cpd = args2$clow),mid = list(gene = args2$gmid, cpd = args2$cmid), high = list(gene = args2$ghigh, cpd =args2$chigh),discrete = list(gene = args2$gdisc, cpd = args2$cdisc),kegg.dir =kegg.dir))
if(class(pv.out) =="list"){
if(!is.null(gene.d) & !is.null(pv.out$plot.data.gene)) {
gids=pv.out$plot.data.gene$all.mapped
gids=strsplit(gids, ",")
lens=sapply(gids, length)
idx2=cumsum(lens)
ln=length(idx2)
idx1=c(0,idx2[-ln])+1
gids.v=unlist(gids)
gsymb.v=eg2symbs[gids.v]
gsymbs=sapply(1:ln, function(i) paste(gsymb.v[idx1[i]:idx2[i]], collapse=","))
gsymbs[idx1>idx2]=""
ncg=ncol(pv.out$plot.data.gene)
pvg=cbind(pv.out$plot.data.gene[,1:3], all.mapped.symb=gsymbs, pv.out$plot.data.gene[,4:ncg])
write.table(pvg,file=paste(paste(paste("genedata.",args2$species,sep=""),pid,sep=""),".txt",sep=""),quote = FALSE, sep="\t")
}
if(!is.null(cpd.d) & !is.null(pv.out$plot.data.cpd)) {
cids=pv.out$plot.data.cpd$all.mapped
cnames=cids
eidx=cnames>""
cnames[eidx]=cid2name[cnames[eidx]]
ncc=ncol(pv.out$plot.data.cpd)
pvc=cbind(pv.out$plot.data.cpd[,1:3], all.mapped.name=cnames, pv.out$plot.data.cpd[,4:ncc])
write.table(pvc,file=paste(paste(paste("cpddata.",args2$species,sep=""),pid,sep=""),".txt",sep=""),quote = FALSE, sep="\t")
}
} else print(paste("error using pawthway id",pid,sep=":"))
})
|
97c79b5e11a9939a415db84758b59177f9ff4f07
|
83f41fbb8203711b1e2d27f51ee0303665bfc5de
|
/create_SRA_deposition.R
|
a745eedfc5dc77800a188dda8a500bb2728417da
|
[] |
no_license
|
helixscript/Canine_hemophilia_AAV2
|
8919f177889582d7125f986311ba1810640287e6
|
b16bc574f22b1068703ab109bc21cead1a188e0a
|
refs/heads/master
| 2023-08-03T08:58:02.359461
| 2021-08-18T19:53:14
| 2021-08-18T19:53:14
| 274,838,629
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,749
|
r
|
create_SRA_deposition.R
|
library(dplyr)
library(openxlsx)
library(parallel)
d <- read.table('AAVengeR/configs/Sabatino.samples.config', sep = ',', header = TRUE)
d <- select(d, subject, sample)
a <- read.table('data/sampleDetails.tsv', sep = '\t', header = TRUE)
d <- left_join(d, a, by = 'sample')
cluster <- makeCluster(30)
d$n <- 1:nrow(d)
# Create mock quality scores because AAVenger does not save this information.
# Raw reads and AAVenger software archived at Zenodo.
invisible(parLapply(cluster, split(d, d$n), function(x){
library(ShortRead)
setwd('/home/everett/canine_hemophilia_AAV')
sampleReads <- list.files('/home/everett/canine_hemophilia_AAV/AAVengeR/outputs/canFam3/sampleReads')
system(paste0('cp AAVengeR/outputs/canFam3/sampleReads/',
sampleReads[grepl(x$sample, sampleReads) & grepl('\\.breakReads\\.', sampleReads)], ' SRA/',
x$sample, '.R1.fasta'))
o <- readFasta(paste0('SRA/', x$sample, '.R1.fasta'))
write(paste0('@', as.character(o@id), '\n', as.character(o@sread), '\n+\n', unlist(lapply(width(o), function(x) paste0(rep('I', x), collapse = '')))),
file = paste0('SRA/', x$sample, '.R1.fastq'))
system(paste0('cp AAVengeR/outputs/canFam3/sampleReads/',
sampleReads[grepl(x$sample, sampleReads) & grepl('\\.virusReads\\.', sampleReads)], ' SRA/',
x$sample, '.R2.fasta'))
o <- readFasta(paste0('SRA/', x$sample, '.R2.fasta'))
write(paste0('@', as.character(o@id), '\n', as.character(o@sread), '\n+\n', unlist(lapply(width(o), function(x) paste0(rep('I', x), collapse = '')))),
file = paste0('SRA/', x$sample, '.R2.fastq'))
}))
system('gzip SRA/*.fastq')
system('rm SRA/*.fasta')
write.xlsx(d, file = 'SRA/sampleData.xlsx')
|
2e8319e9d2633c26ef611e63deb7a5787e9d46bb
|
64ca029adab7bdb67778f32a24ec3948fdd4581e
|
/lab4/plot.r
|
49c4ec2c43779030250fa47178ad0c5247cd288a
|
[] |
no_license
|
EasternSauce/mownit-homework
|
d12c26182588e6acdb7fb07fe7fca7633255591d
|
c3beb1484f7620510ba267160813830f203ce320
|
refs/heads/master
| 2021-06-03T08:12:59.862841
| 2016-09-14T20:50:01
| 2016-09-14T20:50:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
plot.r
|
results1 = read.csv("inter1.txt")
results2 = read.csv("inter2.txt")
results3 = read.csv("inter3.txt")
options(scipen=999)
pdf(file="wykres.pdf", height=4, width=4, bg="white")
plot(results1$x, results1$y, type="l", xlab="x", ylab="y", col="red")
lines(results2$x, results2$y, col="green")
lines(results3$x, results3$y, col="blue")
legend("topleft", c("GSL","Lagrange","Newton"),lty=c(1,1,1,1),lwd=c(2.5,2.5,2.5,2.5),col=c("red","green","blue"), cex=0.5)
title("Interpolations of f(x) = x + 0.5 * sin (x)")
dev.off()
|
a848bc020d15180cd03ef0f1b4dc4818dbf55dc4
|
46dd13f8d3dfb0aea99af8e201fe6b8e0fc220e5
|
/Pattern recognition/Random Forest/Predicting wine quality using Random Forests.R
|
2a102536af8ba3862b5e373e54ef20205b38f6fd
|
[] |
no_license
|
arkada38/R-statistics
|
26d85036cbb64212c312dfe81225254938710b91
|
5e84263d15477538a3c2cc63a3f6da35ae566411
|
refs/heads/master
| 2021-06-14T12:18:19.560684
| 2017-03-22T03:36:01
| 2017-03-22T03:36:01
| 64,801,300
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,554
|
r
|
Predicting wine quality using Random Forests.R
|
# Predicting wine quality using Random Forests
# https://www.r-bloggers.com/predicting-wine-quality-using-random-forests/
library(randomForest)
url <- 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv'
wine <- read.table(url, sep = ";", dec = ".", header = T)
head(wine)
barplot(table(wine$quality))
wine$taste <- ifelse(wine$quality < 6, 'bad', 'good')
wine$taste[wine$quality == 6] <- 'normal'
wine$taste <- as.factor(wine$taste)
# Let’s look at the distribution
table(wine$taste)
# bad good normal
# 1640 1060 2198
set.seed(123)
samp <- sample(nrow(wine), 0.6 * nrow(wine))
train <- wine[samp, ]
test <- wine[-samp, ]
# We can use ntree and mtry to specify the total number of trees to build (default = 500),
# and the number of predictors to randomly sample at each split respectively.
model <- randomForest(taste ~ . - quality, data = train)
model
# Call:
# randomForest(formula = taste ~ . - quality, data = train)
# Type of random forest: classification
# Number of trees: 500
# No. of variables tried at each split: 3
#
# OOB estimate of error rate: 29.82%
# Confusion matrix:
# bad good normal class.error
# bad 671 18 284 0.3103803
# good 17 402 230 0.3805855
# normal 221 106 989 0.2484802
pred <- predict(model, newdata = test)
table(pred, test$taste)
# pred bad good normal
# bad 481 12 128
# good 13 247 81
# normal 173 152 673
# We can test the accuracy as follows:
(481 + 247 + 673) / nrow(test)
# 0.7147959
|
c5e2da829066fcd279e85a9a9c0c9994913e0d0b
|
c9211f1533d949bfd0c2f5050cfbc6f2e7592c04
|
/01_earnshare_measures and sample.R
|
743a554c39ea38f1fc34b386bd34be5bc5da5180
|
[] |
no_license
|
jrpepin/ACS_Share-of-Earnings
|
7869a2f0fa2320114f06ddb37fde1f463382070d
|
ea04eac1d8218e02873650ebf9673ebcef3249c2
|
refs/heads/master
| 2020-06-01T14:32:03.271073
| 2019-06-11T16:40:00
| 2019-06-11T16:40:00
| 190,816,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,832
|
r
|
01_earnshare_measures and sample.R
|
#####################################################################################
# Set-up the environment
## Set-up the Directories
repoDir <- "C:/Users/Joanna/Dropbox/Repositories/ACS_Share-of-Earnings" # This should be your master project folder (Project GitRepository)
subDir1 <- "data" # This will be the name of the folder where data output goes
subDir2 <- "figures" # This will be the name of the folder where figures are saved
dataDir <- file.path(repoDir, subDir1)
figDir <- file.path(repoDir, subDir2)
## This will create sub-directory data folder in the master project directory if doesn't exist
if (!dir.exists(dataDir)){
dir.create(dataDir)
} else {
print("data directory already exists!")
}
## This will create sub-directory figures folder in the master project directory if doesn't exist
if (!dir.exists(figDir)){
dir.create(figDir)
} else {
print("figure directory already exists!")
}
setwd(file.path(repoDir)) # Set the working-directory to the master project folder
## Create a data extract using CPS
# Create a variable within the IPUMS data extract system that contains the income of a respondent's spouse by using
# the Attach Characteristics option. To do so, you should first select your samples and variables,
# which must contain INCTOT. Before submitting your extract, you will be given the option to choose "Attach characteristics" on the
# extract request screen. Check the box for "Spouse" on the INCTOT row. This will add a variable to your data extract request called
# INCTOT_SP. Now simply submit your extract. You should then add up inctot and inctot_sp for one spouse member.
# Samples: Respondents - 1960, 1970, 1980, 1990, 2000, 2001-2017
# Variables:
# "year" "datanum" "hhwt" "eldch" "sex" "age" "marst" "inctot"
# "sex_sp" "inctot_sp"
## Set up instructions for importing the data
# https://cran.r-project.org/web/packages/ipumsr/vignettes/ipums.html
# Updated ATUS Data
## Load libraries
library(ipumsr)
library(tidyverse, warn.conflicts = FALSE)
library(questionr)
library(ggplot2)
## Load ATUS Data into R
ddi <- read_ipums_ddi("usa_00013.xml") # This assumes your data extract was saved in the repoDir folder.
data <- read_ipums_micro(ddi)
## Make the variable names lowercase
data <- data %>% rename_all(tolower)
#####################################################################################
# Clean the data
## Change class from labelled
lapply(data, class) # Preview which variables are labelled
data <- data %>% # Did this in multiple steps for computer memory purposes.
mutate( eldch = as.integer(lbl_clean(eldch)),
sex = as_factor(lbl_clean(sex)),
sex_sp = as_factor(lbl_clean(sex_sp)))
data <- data %>%
mutate( age = as.integer(lbl_clean(age)),
marst = as_factor(lbl_clean(marst)))
data <- data %>%
mutate( inctot = as.numeric(lbl_clean(inctot)))
data <- data %>%
mutate( inctot_sp = as.numeric(lbl_clean(inctot_sp)))
earndat <- data # Create a new dataset in case anything goes wrong
#####################################################################################
# Measures & Sample
## Age of Eldest child in household
earndat <- earndat %>%
mutate(
kidu18 = case_when(
eldch < 17 ~ 1L,
eldch >= 18 ~ 0L))
earndat <- earndat %>% ## Keep only households with kid u 18 in HH
filter(kidu18 == 1)
## Limit to 1 person in the household
earndat <- earndat %>%
filter(pernum == 1)
## Marital status
earndat <- earndat %>%
mutate(
marsolo = case_when(
marst == "Married, spouse present" | marst == "Married, spouse absent" ~ "Married",
marst == "Never married/single" | marst == "Separated" |
marst == "Divorced" | marst == "Widowed" ~ "Solo",
TRUE ~ NA_character_
))
## Breadwinner
# 9999998 Missing.
# 9999999 = N.I.U. (Not in Universe).
earndat$inctot[earndat$inctot >= 9999998] <- NA
earndat$inctot_sp[earndat$inctot_sp >= 9999998] <- NA
earndat$inctot[earndat$inctot >= 9999999] <- 0
earndat$inctot_sp[earndat$inctot_sp >= 9999999] <- 0
### keep respondents with non-negative incomes & couples with positive total income
earndat <- earndat %>%
mutate(
nonneg = case_when(
(inctot + inctot_sp) >=0 ~ 1,
inctot >=0 ~ 1,
TRUE ~ 0))
earndat <- earndat %>%
filter(nonneg == 1)
## Create breadwinning categories (50% threshold)
earndat <- earndat %>%
mutate(
bwcat = case_when(
marsolo == "Solo" & sex == "Female" ~ "SoloFBW",
marsolo == "Married" & sex == "Female" & ((inctot/inctot_sp) > .5) ~ "MarFBW",
marsolo == "Married" & sex == "Male" & ((inctot/inctot_sp) < .5) ~ "MarFBW",
TRUE ~ "NotFBW"
))
## Descriptives
freq <- data.frame(wtd.table(earndat$year, earndat$bwcat, weights = earndat$hhwt, digits = 2))
earnavg <- freq %>%
group_by(Var1, Var2) %>%
summarise(n = sum(Freq)) %>%
mutate(percentage = n / sum(n))
earnavg$Var1 <- as.character(earnavg$Var1)
earnavg$Var1 <- as.numeric(earnavg$Var1)
## Figure
fig <- earnavg %>%
filter(Var2 != "NotFBW" & (Var1<= 2000 | Var1 == 2010 | Var1==2017)) %>%
ggplot((aes(x = Var1, y = percentage, fill = Var2))) +
geom_area(color = "black") +
geom_line(position="stack", linetype="dashed", size = 1.2, color = c("white")) +
scale_y_continuous(labels = scales::percent_format(accuracy = 1), limits = c(0, .45)) +
scale_x_continuous(limits = c(1960, 2017), breaks = c(1960,1970,1980,1990,2000,2010,2017)) +
scale_fill_manual(name="",
breaks=c("MarFBW", "SoloFBW"),
labels=c("Married-couple families",
"Mother-only families"),
values=c("#666767", "#CA5462")) +
labs(title = "Mothers as primary or sole earners, 1960-2017",
subtitle = "Percent of households with children under age 18 \nin which mothers are the primary or sole earner") +
labs(caption = "Data source: 1960-2000 Decennial Census \n2010-2017 American Community Surveys") +
theme_minimal() +
theme(panel.grid.major.x = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
legend.justification = "top",
legend.text = element_text(size=16),
plot.title = element_text(size = 20, face = "bold"),
axis.text = element_text(size = 14))
fig
ggsave("figures/momearn.png", fig, width = 10, height = 6, dpi = 300)
|
649c5ca3dd376cf2cf5562adf1f78df0cb78b0c6
|
080f12375a8a6afb5c0e1638d7ad48309f9705e0
|
/man/covid_global.Rd
|
953625df1de8bdb7bc19313f50918de36416d4bc
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
paternogbc/covid19br
|
64b8b53201d7bb95128d474795c543336be66e0f
|
5b1b49d0d868245e27a3c3bece61e944e95dcac2
|
refs/heads/master
| 2021-03-27T02:49:55.897048
| 2021-03-17T11:26:44
| 2021-03-17T11:26:44
| 247,778,689
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 601
|
rd
|
covid_global.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/document_data.R
\docType{data}
\name{covid_global}
\alias{covid_global}
\title{Historical number of covid-19 cases for the world.}
\format{A data frame with four variables:
\itemize{
\item{\strong{state}} {The name of the state (country)}
\item{\strong{pop}} {Total number of confirmed covid-19 cases}
\item{\strong{pop}} {Total number of confirmed covid-19 deaths}
\item{\strong{date}} {The date (yyyy-mm-dd)}
}}
\usage{
covid_global
}
\description{
Historical number of covid-19 cases for the world.
}
\keyword{datasets}
|
858efa789db21fba8daee08f4ebbdbb838bc84c8
|
76fbd560b0c3f3ae32dea6554685df336ebe0fac
|
/pga_statjoin_roughhhhh.R
|
930309b27b64cdb443eec3a2dcdb87e95e8e6db1
|
[] |
no_license
|
tn122609/dfs
|
1f43f2cc65bfa8ff5f33479bfff35adbab834920
|
6e4faf5f7238fa5bb184da32706b8a6a89485254
|
refs/heads/master
| 2021-10-09T14:35:10.118152
| 2021-10-07T21:12:49
| 2021-10-07T21:12:49
| 158,970,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,683
|
r
|
pga_statjoin_roughhhhh.R
|
cd /host-rootfs/sdcard/Download
sudo R
library(dplyr)
library(xml2)
library(stringi)
library(stringr)
library(rvest)
library('lpSolve')
library(corrplot)
library(lpSolveAPI)
FDSal2 <- read.csv('8_12_21_FDpga_merge_roughh2.csv', stringsAsFactors=FALSE)
FDSal2$Name <- FDSal2$Nickname
#FDSal$Nickname <- paste(FDSal$First.Name, FDSal$Last.Name)
#FDSal2 <-subset(FDSal, select = c(Nickname, Salary, Team, Id))
#names(FDSal2) <- c("Name", "Salary", "Team", "ID")
FDSal2$Name <- gsub("[',]", "", FDSal2$Name)
FDSal2$Name <- gsub("[.-]", "", FDSal2$Name)
FDSal2$Name <- gsub(" Jr| Sr| II| III| IV", "", FDSal2$Name)
FDSal2$Name <- as.character(FDSal2$Name)
uninamesx <- data.frame(table(FDSal2$Name))
uninamesx
write.csv(uninamesx, file='uninamesx.csv', row.names=FALSE)
plyr_sd <- read.csv('FD Top Golfers.csv')
plyr_sd$Name <- gsub("\\(PG\\)|\\(SG\\)|\\(SF\\)|\\(PF\\)|\\(C\\)", "", plyr_sd$Name)
plyr_sd$Name <- gsub("[\n]", "", plyr_sd$Name)
plyr_sd$Name <- gsub("[',]", "", plyr_sd$Name)
plyr_sd$Name <- gsub("[.-]", "", plyr_sd$Name)
plyr_sd$Name <- gsub("^\\s+|\\s+$", "", plyr_sd$Name)
plyr_sd$Name <- gsub(" Jr| Sr| II| III| IV", "", plyr_sd$Name)
rztar <- read.csv('pga2021bogeyavoidance (1).csv')
rztar$Name <- gsub("\\(PG\\)|\\(SG\\)|\\(SF\\)|\\(PF\\)|\\(C\\)", "", rztar$Name)
rztar$Name <- gsub("[\n]", "", rztar$Name)
rztar$Name <- gsub("[',]", "", rztar$Name)
rztar$Name <- gsub("[.-]", "", rztar$Name)
rztar$Name <- gsub("^\\s+|\\s+$", "", rztar$Name)
rztar$Name <- gsub(" Jr| Sr| II| III| IV", "", rztar$Name)
rzrush <- read.csv('pga2021drivingdistance (1).csv')
rzrush$Name <- gsub("\\(PG\\)|\\(SG\\)|\\(SF\\)|\\(PF\\)|\\(C\\)", "", rzrush$Name)
rzrush$Name <- gsub("[\n]", "", rzrush$Name)
rzrush$Name <- gsub("[',]", "", rzrush$Name)
rzrush$Name <- gsub("[.-]", "", rzrush$Name)
rzrush$Name <- gsub("^\\s+|\\s+$", "", rzrush$Name)
rzrush$Name <- gsub(" Jr| Sr| II| III| IV", "", rzrush$Name)
nfjoin <- read.csv('pga2021appfromgt200yd (1).csv', stringsAsFactors=FALSE)
nfjoin$Name <- gsub("[\n]", "", nfjoin$Name)
nfjoin$Name <- gsub("[',]", "", nfjoin$Name)
nfjoin$Name <- gsub("[.-]", "", nfjoin$Name)
nfjoin$Name <- gsub("^\\s+|\\s+$", "", nfjoin$Name)
nfjoin$Name <- gsub(" Jr| Sr| II| III| IV", "", nfjoin$Name)
nfjoin2 <- read.csv('1623736280599_datagolf_trends.csv', stringsAsFactors=FALSE)
nfjoin2$Name <- gsub("[\n]", "", nfjoin2$Name)
nfjoin2$Name <- gsub("[',]", "", nfjoin2$Name)
nfjoin2$Name <- gsub("[.-]", "", nfjoin2$Name)
nfjoin2$Name <- gsub("^\\s+|\\s+$", "", nfjoin2$Name)
nfjoin2$Name <- gsub(" Jr| Sr| II| III| IV", "", nfjoin2$Name)
nfjoin3 <- read.csv('CHTorreyPines_FarmersInsurance (1).csv', stringsAsFactors=FALSE)
nfjoin3$Name <- gsub("[\n]", "", nfjoin3$Name)
nfjoin3$Name <- gsub("[',]", "", nfjoin3$Name)
nfjoin3$Name <- gsub("[.-]", "", nfjoin3$Name)
nfjoin3$Name <- gsub("^\\s+|\\s+$", "", nfjoin3$Name)
nfjoin3$Name <- gsub(" Jr| Sr| II| III| IV", "", nfjoin3$Name)
nfjoin4 <- read.csv('EventHistoryUSOpen (1).csv', stringsAsFactors=FALSE)
nfjoin4$Name <- gsub("[\n]", "", nfjoin4$Name)
nfjoin4$Name <- gsub("[',]", "", nfjoin4$Name)
nfjoin4$Name <- gsub("[.-]", "", nfjoin4$Name)
nfjoin4$Name <- gsub("^\\s+|\\s+$", "", nfjoin4$Name)
nfjoin4$Name <- gsub(" Jr| Sr| II| III| IV", "", nfjoin4$Name)
joinedFD <- left_join(FDSal2, plyr_sd, by = "Name")
joinedFD$Name <- as.character(joinedFD$Name)
joinedyFD <- left_join(joinedFD, rztar, by = "Name")
joinedzFD <- left_join(joinedyFD, rzrush, by = "Name")
joinedqFD <- left_join(joinedzFD, nfjoin, by = "Name")
joinedxFD$Name <- as.character(joinedxFD$Name)
joined2FD <- left_join(joinedqFD, nfjoin2, by = "Name")
jz <- str_split_fixed(joined2FD$Name, " ", 2)
jz
nm <- substr(jz[,1], 0, 1)
joined2FD$Name <- paste(nm, jz[,2])
nfjoin3$Name <- as.character(nfjoin3$Name)
joined3FD <- left_join(joined2FD, nfjoin3, by = "Name")
joined4FD <- left_join(joined3FD, nfjoin4, by = "Name")
uninames <- data.frame(table(joinedxFD$Name))
uninames
write.csv(uninames, file='nfluninames.csv', row.names=FALSE)
write.csv(joinedFD, file='joinedFD.csv', row.names=FALSE)
#joinedxFD <- read.csv('joinedxFD (1).csv', stringsAsFactors=FALSE)
#joined2FD <- left_join(joinedxFD, nfjoin2, by = "Name")
joinedFDc <- left_join(FDSal2, plyr_sd, by = "Name")
joined2FDc <- left_join(joinedFDc, nfjoin, by = "Name")
df <- joined2FDc
new_DF <- df[rowSums(is.na(df)) > 0,]
head(new_DF, n=1)
new_DF$Ceil <- as.numeric(new_DF$Ceil)
DF <- new_DF
DF
#write.csv(joined2FD, file='nfldatajoinwk16snf.csv', row.names=FALSE)
write.csv(DF, file='DFwk16snf.csv', row.names=FALSE)
#54232
|
5f7d02116f6ee80b7d062d7a0ddfa55f899c70d1
|
eed8936c230507f5e8efc18b6d63d139a335cab5
|
/R/print.R
|
2ed883a6abc63522d917786214ac8b3dc15362a1
|
[] |
no_license
|
manoj8385/arulesCBA
|
2564c31b9408e79e844d17b182010c8eb2e0a564
|
c2a76c421994ffa2114164721a3aa61a79e1a120
|
refs/heads/master
| 2020-06-26T20:27:46.152764
| 2016-10-19T21:17:24
| 2016-10-19T21:17:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 186
|
r
|
print.R
|
print.CBA <- function(x, ...){
cat("CBA Object\n")
cat("Number of rules:", length(x$rules), "\n")
cat("Class labels:", x$levels, "\n")
cat("Default class:", x$default, "\n")
}
|
9dae1003f9a143eb305bb9ed0b652105fb503773
|
c5fe243e1c7f01c6217cc15f7f64a955f6561624
|
/R/x_math1.r
|
77259037ff6be7c31199537fa3a875890d12ba8e
|
[] |
no_license
|
cran/probhat
|
79c1ff28e9867565d80f2744e3a84f49aeba5dc6
|
ae21b43b0de331247713e4294acad3aa99c4fdb5
|
refs/heads/master
| 2021-06-09T16:27:06.236867
| 2021-05-12T08:40:02
| 2021-05-12T08:40:02
| 174,553,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,501
|
r
|
x_math1.r
|
#probhat: Multivariate Generalized Kernel Smoothing and Related Statistical Methods
#Copyright (C), Abby Spurdle, 2019 to 2021
#This program is distributed without any warranty.
#This program is free software.
#You can modify it and/or redistribute it, under the terms of:
#The GNU General Public License, version 2, or (at your option) any later version.
#You should have received a copy of this license, with R.
#Also, this license should be available at:
#https://cran.r-project.org/web/licenses/GPL-2
.n.unique = function (x) length (unique (x) )
.any.duplicates = function (x) .n.unique (x) != length (x)
.midpoints = function (x)
{ n = length (x)
n1 = n + 1
(x [1:n] + x [2:n1]) / 2
}
.cumsum2 = function (x, rev=FALSE)
{ if (rev)
{ y = rev (cumsum (rev (x) ) )
y [1] = 1
}
else
{ y = cumsum (x)
y [length (y)] = 1
}
y
}
.val.tail = function (str, m=1)
{ str = .val.params (m, str)
str = tolower (str)
if (all (str == "lower" | str == "upper") )
str
else
stop ("tail needs to be lower or upper")
}
auto.dbw = function (x, ..., bw.method="ph.default", smoothness=1)
{ bw = auto.cbw (x, ..., bw.method=bw.method, smoothness=smoothness)
bw = as.integer (round (bw) )
if (bw %% 2 == 0)
bw = bw + 1
bw
}
auto.cbw = function (x, ..., bw.method="ph.default", smoothness=1)
{ bw.method = tolower (bw.method)
if (! is.matrix (x) )
x = cbind (x)
if (bw.method == "ph.default")
{ m = ncol (x)
P = 0.66^(1 / m)
a = (1 - P) / 2
b = 1 - a
bw = numeric (m)
for (j in seq_len (m) )
bw [j] = diff (quantile (x [,j], c (a, b) ) )
}
else if (bw.method == "scott") bw = apply (x, 2, bw.nrd)
else if (bw.method == "silverman") bw = apply (x, 2, bw.nrd0)
else
stop ("bw.method needs to be ph.default, Scott or Silverman")
smoothness * bw
}
.midpoints = function (x)
{ n = length (x)
(x [-n] + x [-1]) / 2
}
.as.integer.matrix = function (x)
{ y = as.integer (x)
dim (y) = dim (x)
y
}
.as.numeric.matrix = function (x)
{ y = as.numeric (x)
dim (y) = dim (x)
y
}
.varname = function (x)
{ if (is.matrix (x) && ncol (x) == 1)
colnames (x)
else
"x"
}
.varnames = function (x, prefix="x", is.cond=FALSE)
{ if (is.matrix (x) )
.varnames.ext (ncol (x), colnames (x), prefix, is.cond)
else
"x"
}
.varnames.ext = function (m, variable.names, prefix="x", is.cond=FALSE)
{ defn = paste0 (prefix, 1:m)
if (is.null (variable.names) )
{ variable.names = defn
if (is.cond)
warning ("applying default variable names, to all variables")
}
else
{ if (m != .n.unique (variable.names) )
stop ("needs unique variable names")
I = (is.na (variable.names) | variable.names == "")
if (any (I) )
{ variable.names [I] = defn [I]
if (is.cond)
warning ("applying default variable names, to some variables")
}
}
variable.names
}
.blabs = function (x)
{ if (is.matrix (x) )
rownames (x)
else
names (x)
}
.val.k = function (k)
{ if (is (k, "Kernel") )
k
else
stop ("needs Kernel object")
}
.val.params = function (m, param)
{ nparam = length (param)
if (nparam == 1)
rep (param, m)
else if (nparam == m)
param
else
stop ("parameter needs to have length 1 or m")
}
.val.x.uv = function (x, one.or.more=FALSE)
{ attributes (x) = NULL
if (is.vector (x) )
{ x = as.numeric (x)
if (one.or.more && length (x) == 0)
stop ("x needs one or more values")
if (any (! is.finite (x) ) )
stop ("all x values need to be finite")
x
}
else
stop ("needs vector (or matrix)")
}
.val.x.mv = function (x)
{ if (! is.matrix (x) )
stop ("multivariate models need matrix")
x = .as.numeric.matrix (x)
if (nrow (x) < 1)
stop ("x needs one or more rows")
if (any (! is.finite (x) ) )
stop ("all x values need to be finite")
x
}
.val.x.uv.or.mv = function (x)
{ if (is.matrix (x) )
.val.x.mv (x)
else
cbind (.val.x.uv (x) )
}
.val.hvec = function (n, h)
{ h = as.numeric (h)
nh = length (h)
if (nh == 1)
h = rep (h, n)
else if (n != length (h) )
stop ("length (h) != number of bins/observations")
if (any (! is.finite (h) ) )
stop ("all h values need to be finite")
if (any (h < 0) )
stop ("all h value need to be >= 0")
h
}
.val.w = function (is.weighted, n, w, scale=TRUE)
{ if (is.weighted)
{ w = as.numeric (w)
if (n != length (w) )
stop ("length (w) != number of observations")
if (any (! is.finite (w) ) )
stop ("all w values need to be finite")
if (any (w <= 0) )
stop ("all w value need to be >= 0")
if (scale)
w = w / sum (w)
w
}
else
NA
}
.deflab = function (f, lab)
{ if (missing (lab) )
{ vname = names (f)
if (is.dpdc (f) || is.cpdc (f) )
paste (vname, "| ...")
else
vname
}
else
lab
}
.iterate.uv = function (f, ..., u)
{ n = length (u)
y = numeric (n)
for (i in seq_len (n) )
y [i] = f (..., u [i])
y
}
.iterate.mv = function (f, ..., u)
{ n = nrow (u)
y = numeric (n)
for (i in seq_len (n) )
y [i] = f (..., u [i,])
y
}
.iterate.mv.2 = function (f, ..., y)
{ n = nrow (y)
x = numeric (n)
for (i in seq_len (n) )
x [i,] = f (..., y [i,])
x
}
.test.y.ok = function (y)
{ if (any (y < 0 | y > 1) )
stop ("probabilities need to be between 0 and 1")
}
.scale.freq = function (y, freq, N, n)
{ if (freq)
{ if (missing (n) )
n = N
n * y
}
else
y
}
|
0821ef09edda4caa5323e4985e7a4416fb79f7a5
|
5f86b00794542041da6ce334d549cf54cee0302a
|
/acpca670_package/R/sim_apply_new_sample.R
|
9d8a7fef8750ac2ec429718a9a77f8e1986bfdb8
|
[] |
no_license
|
smweinst/acpca670
|
5853a8c4c05465e7c41f5e69943b6bd3a7171606
|
c91349770730a22f2f371ec7c4c879a38ef81eee
|
refs/heads/master
| 2022-06-13T01:47:17.780041
| 2020-05-05T22:02:10
| 2020-05-05T22:02:10
| 259,481,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,694
|
r
|
sim_apply_new_sample.R
|
#' @title Function to apply loadings from AC-PCA to new sample
#' @description User can specify alpha in the original sample and alpha in the new sample (i.e., samples differ in terms of strength of confounder)
#' @name sim_apply_new_sample
#' @param n the number of subjects; default is 5
#' @param b the number of brain regions; default is 10
#' @param p the number of features per brain region; default is 400
#' @param alpha_orig the constant that is multiplied by the confounder in the original simulated sample; default is 2.5
#' @param alpha_new the constant that is multiplied by the confounder in the new simulated sample; default is also 2.5
#' @param nsim number of simulations to run; default is 100
#' @export
sim_apply_new_sample = function(n=5,b=10,p=400,alpha_orig=2.5,alpha_new=2.5,nsim = 100){
scores.cor.omega.new = matrix(nrow=nsim,ncol=2)
for (s in 1:nsim){
# simulate original dataset from which AC-PCA loadings will be obtained
sim_dat.s = sim_dat_fun(n=n,b=b,p=p,alpha=alpha_orig)
X.mat.s = sim_dat.s$X.mat
Y = sim_dat.s$Y
# tune lambda
acpca.s.tune = acPCA::acPCAtuneLambda(X = X.mat.s,
Y = Y,
nPC = 2,
lambdas = seq(0,10,0.05),
anov=T, kernel = "linear",quiet = T)
# get acpca loadings
acpca.s.loadings = acPCA::acPCA(X = X.mat.s, Y = Y,
lambda = acpca.s.tune$best_lambda,
kernel = "linear", nPC = 2)$v[,1:2]
# simulate a new dataset with alpha_new
sim_dat.s.new = sim_dat_fun(n=n,b=b,p=p,alpha=alpha_new)
X.mat.s.new = sim_dat.s.new$X.mat
# apply AC-PCA loadings from original sample to new sample:
Xv.newsamp = X.mat.s.new%*%acpca.s.loadings
omega.s.new = sim_dat.s.new$Omega
omega.s.new.shared = do.call("rbind",replicate(n,omega.s.new,simplify = F))
# pca on omega in new data (the PCs of Omega are what we want the AC-PCA projections in the new data to be highly correlated with):
pca_omega.new.scores = prcomp(omega.s.new.shared, center = T)$x
scores.cor.omega.new[s,] = sapply(1:2, FUN = function(t){
cor(Xv.newsamp[,t], pca_omega.new.scores[,t],method = "pearson") # correlation between scores
})
}
if (nsim > 1){ # how data will be visualized if multiple simulations are run
par(mfrow=c(1,1))
vioplot::vioplot(abs(scores.cor.omega.new[,1]),abs(scores.cor.omega.new[,2]),
ylim = c(0,1), ylab = c("Pearson correlation"),
col = "white", names = c("PC1","PC2"),
main = "Correlation with shared component when AC-PCA from another sample is used");mtext(
bquote(paste(alpha['original']," = ",.(alpha_orig), " ", alpha['new'], " = ", .(alpha_new))),side = 3
)
}
else{ # how data will be visualized if only one simulation is specified
par(mfrow=c(1,2))
plot(pca_omega.new.scores[,1],pca_omega.new.scores[,2], main = "True Pattern",
xlab = "PC1", ylab = "PC2",type = 'n');text(
pca_omega.new.scores[,1],pca_omega.new.scores[,2],labels = sim_dat.s.new$labels
)
plot(Xv.newsamp[,1],Xv.newsamp[,2], type = 'n',xlab = "PC1",ylab = "PC2",
main = "AC-PCA from different sample applied");text(
labels = sim_dat.s.new$labels, col = sim_dat.s.new$group + 1,
Xv.newsamp[,1],Xv.newsamp[,2],
); mtext(
bquote(paste(alpha['original']," = ",.(alpha_orig), " ", alpha['new'], " = ", .(alpha_new))),side = 3
)
}
}
|
8fc554ad53384eef23921eb74868c802af20f442
|
b987a7c5953f699a4d8fba8498f98b8bc7dd62ac
|
/state/ma/licenses/docs/ma_scrape_licenses.R
|
5f9985932e6b6ba2447ca54942b77b0a4743ea83
|
[] |
no_license
|
irworkshop/accountability_datacleaning
|
67f7301cf216cec891f892d05d7c1abcdaf4a78b
|
ba160f21fdb44446166077754e04705b763abe46
|
refs/heads/master
| 2023-09-04T09:44:11.737083
| 2023-08-05T16:54:33
| 2023-08-05T16:54:33
| 183,467,430
| 12
| 4
| null | 2023-05-23T17:14:17
| 2019-04-25T16:02:32
|
Shell
|
UTF-8
|
R
| false
| false
| 3,696
|
r
|
ma_scrape_licenses.R
|
# Kiernan Nicholls
# Wed Jun 1 11:49:08 2022
library(tidyverse)
library(rvest)
library(httr)
library(fs)
get_home <- GET("https://madph.mylicense.com/verification/Search.aspx")
cook <- cookies(get_home)
sesh_id <- set_names(cook$value, cook$name)
home_html <- content(get_home)
all_types <- home_html %>%
html_elements("#t_web_lookup__profession_name option") %>%
html_text()
# remove the "All" option
all_types <- all_types[-1]
find_attr <- function(html, name) {
html_attr(html_element(home, sprintf("#__%s", name)), "value")
}
i <- 2
post_search <- POST(
url = "https://madph.mylicense.com/verification/Search.aspx",
set_cookies(sesh_id),
body = list(
`__EVENTTARGET` = find_attr(home, "EVENTTARGET"),
`__EVENTARGUMENT` = find_attr(home, "EVENTARGUMENT"),
`__LASTFOCUS` = find_attr(home, "LASTFOCUS"),
`__VIEWSTATEGENERATOR` = find_attr(home, "VIEWSTATEGENERATOR"),
`__EVENTVALIDATION` = find_attr(home, "EVENTVALIDATION"),
t_web_lookup__profession_name = "",
t_web_lookup__license_type_name = all_types[i],
t_web_lookup__first_name = "",
t_web_lookup__last_name = "",
t_web_lookup__license_no = "",
t_web_lookup__license_status_name = "",
t_web_lookup__addr_city = "",
t_web_lookup__addr_state = "",
t_web_lookup__addr_zipcode = "",
sch_button = "Search"
)
)
get_results <- GET(
url = "https://madph.mylicense.com/verification/SearchResults.aspx",
set_cookies(sesh_id)
)
results_html <- content(get_results)
result_head <- results_html %>%
html_element("#datagrid_results") %>%
html_table()
post_save <- POST(
url = "https://madph.mylicense.com/verification/SearchResults.aspx",
set_cookies(sesh_id),
body = list(
`__EVENTTARGET` = find_attr(results_html, "EVENTTARGET"),
`__EVENTARGUMENT` = find_attr(results_html, "EVENTARGUMENT"),
`__VIEWSTATE` = find_attr(results_html, "VIEWSTATE"),
`__VIEWSTATEGENERATOR` = find_attr(results_html, "VIEWSTATEGENERATOR"),
`__EVENTVALIDATION` = find_attr(results_html, "EVENTVALIDATION"),
# click the download file button
btnBulkDownLoad = "Download+File"
)
)
get_confirm <- GET(
url = "https://madph.mylicense.com/verification/Confirmation.aspx",
query = list(from_page = "SearchResults.aspx"),
set_cookies(sesh_id)
)
get_login <- GET(
url = "https://madph.mylicense.com/verification/Login.aspx",
query = list(from_page = "Confirmation.aspx"),
set_cookies(sesh_id)
)
get_verify <- GET(
url = "https://madph.mylicense.com/verification/Confirmation.aspx",
query = list(from_page = "Login.aspx"),
set_cookies(sesh_id)
)
verify_html <- content(get_verify)
post_verify <- POST(
url = "https://madph.mylicense.com/verification/Confirmation.aspx",
query = list(from_page = "Login.aspx"),
set_cookies(sesh_id),
body = list(
`__VIEWSTATE` = find_attr(verify_html, "VIEWSTATE"),
`__VIEWSTATEGENERATOR` = find_attr(verify_html, "VIEWSTATEGENERATOR"),
`__EVENTVALIDATION` = find_attr(verify_html, "EVENTVALIDATION"),
# click the download file button
btnBulkDownLoad = "Continue"
)
)
get_pref <- GET(
url = "https://madph.mylicense.com/verification/PrefDetails.aspx",
set_cookies(sesh_id)
)
pref_html <- content(get_pref)
post_down <- POST(
url = "https://madph.mylicense.com/verification/PrefDetails.aspx",
set_cookies(sesh_id),
body = list(
`__VIEWSTATE` = find_attr(pref_html, "VIEWSTATE"),
`__VIEWSTATEGENERATOR` = find_attr(pref_html, "VIEWSTATEGENERATOR"),
`__EVENTVALIDATION` = find_attr(pref_html, "EVENTVALIDATION"),
# click the download file button
filetype = "delimitedtext",
sch_button = "Download"
)
)
content(post_down, as = "text")
|
23694f66b194b91ad64429dfd48e0c0d87e4fad0
|
13188691adffd02816283d8826e01e845817bb2d
|
/src/USA_ARHR_Graphics.R
|
29ba59112659f2637e50ba9de7f50775d6f547a1
|
[
"MIT"
] |
permissive
|
kw-lee/ARHR_USA
|
114a808b8c17f58006ff43e15268730b4198d379
|
569bccc3b5c5869b33438940b50054fabaf2a01e
|
refs/heads/master
| 2020-09-21T13:27:28.244027
| 2019-12-15T07:14:31
| 2019-12-15T07:14:31
| 224,801,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,063
|
r
|
USA_ARHR_Graphics.R
|
install.packages("ggtern")
library(ggtern)
load("C:/Users/user/Desktop/ARHR_error.Rdata")
dyn.load('C:/Users/user/Desktop/Add-Reg-Hilbert-Res/Dll files/CBS_continuous_simplex.dll') # path of the CBS_continuous_simplex.dll file
dyn.load('C:/Users/user/Desktop/Add-Reg-Hilbert-Res/Dll files/SBF_continuous_simplex.dll') # path of the CBS_continuous_simplex.dll file
summary(X)
mean_X <- apply(X,2,mean)
test <- as.data.frame(matrix(rep(mean_X, 100), nrow = 100, byrow = T))
colnames(test) <- colnames(X)
test_age <- test
test_age$med_age <- seq(0,1, length.out = 100)
Y_pred <- SBF_simplex(as.matrix(test_age), X_training, Y_training, h = optimal_h[k,])$Y_hat
colnames(Y_pred) <- c("Caucasian", "African_American", "Mongoloid")
df_age <- as.data.frame(cbind(test_age,Y_pred))
p_age <- ggtern(data = df_age, aes(x = Caucasian, y = African_American, z = Mongoloid, color = med_age)) +
geom_point(size=2) +
scale_color_gradientn(colours = rainbow(3))+
ggtitle("age") +
theme_showarrows() +
labs(fill = "age") +
theme(legend.position = c(0,1),
legend.justification = c(0,1)) +
tern_limits(T=0.2, L=1, R=0.2)
p_age
test_income <- test
test_income$income <- seq(0,1, length.out = 100)
Y_pred <- SBF_simplex(as.matrix(test_income), X_training, Y_training, h = optimal_h[k,])$Y_hat
colnames(Y_pred) <- c("Caucasian", "African_American", "Mongoloid")
df_income <- as.data.frame(cbind(test_income,Y_pred))
p_income <- ggtern(data = df_income, aes(x = Caucasian, y = African_American, z = Mongoloid, color = income)) +
geom_point(size=2) +
scale_color_gradientn(colours = rainbow(3))+
ggtitle("income") +
labs(fill = "income") +
theme_showarrows() +
theme(legend.position = c(0,1),
legend.justification = c(0,1)) +
tern_limits(T=0.4, L=1, R=0.4)
p_income
test_vcrime <- test
test_vcrime$vcrime <- seq(0,1, length.out = 100)
Y_pred <- SBF_simplex(as.matrix(test_vcrime), X_training, Y_training, h = optimal_h[k,])$Y_hat
colnames(Y_pred) <- c("Caucasian", "African_American", "Mongoloid")
df_vcrime <- as.data.frame(cbind(test_vcrime,Y_pred))
p_vcrime <- ggtern(data = df_vcrime, aes(x = Caucasian, y = African_American, z = Mongoloid, color = vcrime)) +
geom_point(size=2) +
scale_color_gradientn(colours = rainbow(3))+
ggtitle("vcrime") +
labs(fill = "vcrime") +
theme_showarrows() +
theme(legend.position = c(0,1),
legend.justification = c(0,1)) +
tern_limits(T=0.2, L=1, R=0.2)
p_vcrime
test_temperature <- test
test_temperature$temperature <- seq(0,1, length.out = 100)
Y_pred <- SBF_simplex(as.matrix(test_temperature), X_training, Y_training, h = optimal_h[k,])$Y_hat
colnames(Y_pred) <- c("Caucasian", "African_American", "Mongoloid")
df_temperature <- as.data.frame(cbind(test_temperature,Y_pred))
p_temperature <- ggtern(data = df_temperature, aes(x = Caucasian, y = African_American, z = Mongoloid, color = temperature)) +
geom_point(size=2) +
scale_color_gradientn(colours = rainbow(3))+
ggtitle("temperature") +
labs(fill = "temperature") +
theme_showarrows() +
theme(legend.position = c(0,1),
legend.justification = c(0,1)) +
tern_limits(T=0.25, L=1, R=0.25)
p_temperature
test_precipitation <- test
test_precipitation$precipitation <- seq(0,1, length.out = 100)
Y_pred <- SBF_simplex(as.matrix(test_precipitation), X_training, Y_training, h = optimal_h[k,])$Y_hat
colnames(Y_pred) <- c("Caucasian", "African_American", "Mongoloid")
df_precipitation <- as.data.frame(cbind(test_precipitation,Y_pred))
p_precipitation <- ggtern(data = df_precipitation, aes(x = Caucasian, y = African_American, z = Mongoloid, color = precipitation)) +
geom_point(size=2) +
scale_color_gradientn(colours = rainbow(3))+
ggtitle("precipitation") +
theme_showarrows() +
labs(fill = "precipitation") +
theme(legend.position = c(0,1),
legend.justification = c(0,1)) +
tern_limits(T=0.2, L=1, R=0.2)
p_precipitation
|
4e427ce991ee1f6ee4e168782183d1c8f1eb26cd
|
b133065419866205155d0cea5ff85a833dc32d64
|
/man/pollen.Rd
|
730de850946bdd2929fd580908c02c106debd2dc
|
[] |
no_license
|
mcomas/coda.base
|
985a7e99b66abea34cbdb04457c00c7a693ad778
|
ef9e03e345ff8c9e980f5bdc716999cf105ccffc
|
refs/heads/master
| 2023-03-05T07:16:35.090759
| 2023-03-01T17:47:03
| 2023-03-01T17:47:03
| 97,572,580
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 506
|
rd
|
pollen.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{pollen}
\alias{pollen}
\title{Pollen composition in fossils}
\format{
An object of class \code{data.frame} with 30 rows and 4 columns.
}
\usage{
pollen
}
\description{
The pollen data set is formed by 30 fossil pollen samples from three different
locations (recorded in variable group) . The samples were analysed and the 3-part
composition [pinus, abies, quercus] was measured.
}
\keyword{datasets}
|
08ca452f82e10a69936f198a2598b36f70d3915f
|
ba4af2c77581cc4a42959ba5307130cf06590532
|
/NFP/Scripts/Obsolete/computeConfidence.R
|
4cd7ddafa604c033597092ff544e0c67a3e9e124
|
[] |
no_license
|
gorilazz/BP
|
94261607ee5c70742bd7acf482c56039bd7547e8
|
ab126d810bca1909318d1986efcf5b32cfc55e18
|
refs/heads/master
| 2021-01-19T03:22:09.777201
| 2015-02-04T18:19:11
| 2015-02-04T18:19:11
| 25,895,720
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 370
|
r
|
computeConfidence.R
|
source('utility.R');
path_inPrediction = "../Model/201410/experiments_AR_Social_Model_13_median_bestmodels_bootstrapping.csv";
predictionsFull = read.csv(file=path_inPrediction, head=TRUE, sep=",");
predictions = predictionsFull$Prediction;
result_mean = ConfidenceInterval(predictions, 0.95, "mean");
result_median = ConfidenceInterval(predictions, 0.95, "median");
|
f6b7fb6a61e8a879e0b430c8257b5c8096756dff
|
8edf0521ebc0ca53ec618d6d220c47c851caaa71
|
/man/sim_add_CPUE.Rd
|
2677b9f0b25ec02369ffbd0648173d9f2a0329d0
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
amart/r4ss
|
9b730038ee4c4b6d38aaabe81b6ad9fddf0eb4f3
|
fbccbace9a70e846401d32577aeab9f25cb31ba5
|
refs/heads/master
| 2021-01-17T06:03:03.172272
| 2020-10-04T01:38:14
| 2020-10-04T01:38:14
| 24,735,775
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 865
|
rd
|
sim_add_CPUE.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sim_add_CPUE.r
\name{sim_add_CPUE}
\alias{sim_add_CPUE}
\title{this function adds a new line for a CPUE/survey observation in the CPUE data structure for the SS DAT file}
\usage{
sim_add_CPUE(
dat_struct = NULL,
CPUE_year = -1,
CPUE_seas = -1,
CPUE_fleet = -1,
CPUE_obs = -999,
CPUE_std_err = 999
)
}
\arguments{
\item{dat_struct}{- DAT structure to be edited}
\item{CPUE_year}{- year for the CPUE observation}
\item{CPUE_seas}{- season for the CPUE observation}
\item{CPUE_fleet}{- fleet for the CPUE observation}
\item{CPUE_obs}{- CPUE observation}
\item{CPUE_std_err}{- standard error for the CPUE observation}
}
\value{
edited DAT structure
}
\description{
this function adds a new line for a CPUE/survey observation in the CPUE data structure for the SS DAT file
}
|
5b3a9892635fb43c0e312626c18edba4254f4074
|
30d67c192ac6e9da0799e28fd4855b0537eb0e3e
|
/assignment2.R
|
a471e3f63c9e186ebcb85e6fa37f64a32b771038
|
[] |
no_license
|
awartany/datasciencecoursera
|
9f07fbc66f9fe70c9f19ff622e84452f315af182
|
7a1ac2081f4dbe17be859030e607e4abf4135518
|
refs/heads/master
| 2021-09-01T18:51:42.967567
| 2017-12-28T09:48:28
| 2017-12-28T09:48:28
| 115,052,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 898
|
r
|
assignment2.R
|
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
cacheSolve <- function(x, ...) {
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
aVector <- makeCacheMatrix(matrix(1:4,2,2))
aVector$get() # retrieve the value of x
aVector$getsolve() # retrieve the value of m, which should be NULL
aVector$set(30:50) # reset value with a new vector
cacheSolve(aVector) # notice mean calculated is mean of 30:50, not 1:10
aVector$getsolve() # retrieve it directly, now that it has been cached
|
1f66e31354bc6c9325ac27a91c921a7a0e66f7d6
|
5b103a7eda733caffd7aee485d0b9577113ab108
|
/man/aggregate_duplicates.Rd
|
c5431be2305ced0d4eaa2c3de043babc3709b09f
|
[] |
no_license
|
jackieduckie/ttBulk
|
ddf6611fa90d91db861373742a0a207b0ddc3f85
|
de05cfd41bcb0b6ff8f7bd6604c2a8e5f905362f
|
refs/heads/master
| 2020-07-18T07:32:00.334355
| 2019-09-11T09:21:30
| 2019-09-11T09:21:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,189
|
rd
|
aggregate_duplicates.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{aggregate_duplicates}
\alias{aggregate_duplicates}
\title{Aggregates multiple read counts from the same samples (e.g., from isoforms)
This function aggregates read counts over samples, concatenates other character columns, and averages other numeric columns}
\usage{
aggregate_duplicates(input.df, aggregation_function = sum,
sample_column = NULL, transcript_column = NULL,
counts_column = NULL, keep_integer = T)
}
\arguments{
\item{input.df}{A tibble}
\item{aggregation_function}{A function for counts aggregation (e.g., sum)}
\item{sample_column}{A character name of the sample column}
\item{transcript_column}{A character name of the gene/transcript name column}
\item{counts_column}{A character name of the read count column}
\item{keep_integer}{A boolean. Whether to force the aggregate counts to integer}
}
\value{
A tibble with aggregated genes and annotation
}
\description{
Aggregates multiple read counts from the same samples (e.g., from isoforms)
This function aggregates read counts over samples, concatenates other character columns, and averages other numeric columns
}
|
79f9e0c13c66676eac80c10ed088c09fb7bf9cff
|
90dfda50ead37d0a876c75bbd62469076f904d4f
|
/data/norolling_speed/stats_script.r
|
bf3898547f3d4b5d30822c69221538c945f6b231
|
[
"Apache-2.0"
] |
permissive
|
fritzfrancisco/fish_abm
|
6091525cfdb4da982c0efe85f6c98f65e61083d4
|
1819a02babb021b0abbb218ddea1a96668eec387
|
refs/heads/master
| 2020-01-23T21:32:17.579076
| 2016-12-22T14:46:52
| 2016-12-22T14:46:52
| 74,699,690
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,347
|
r
|
stats_script.r
|
#setwd("~/norolling_speed")
get_rates <- function(data_set){
rates <- NULL
start <- NULL
for(i in 1:ncol(data_set)){
x = seq(0, nrow(data_set) - 1, by = 1)
y = data_set[,i]
mod = lm(y ~ x)
rates[i] <- mod[[1]][2]
start[i] <- mod[[1]][1]
}
return(data.frame(rates,start))
}
nrspeed0 <- data.frame(t(read.csv("norolling_speed2.5.csv", header = FALSE)))
row.names(nrspeed0) <- NULL
nrspeed1 <- data.frame(t(read.csv("norolling_speed5.csv", header = FALSE)))
row.names(nrspeed1) <- NULL
nrspeed2 <- data.frame(t(read.csv("norolling_speed7.5.csv", header = FALSE)))
row.names(nrspeed2) <- NULL
nrspeed3 <- data.frame(t(read.csv("norolling_speed10.csv", header = FALSE)))
row.names(nrspeed3) <- NULL
nrspeed4 <- data.frame(t(read.csv("norolling_speed12.5.csv", header = FALSE)))
row.names(nrspeed4) <- NULL
nrspeed5 <- data.frame(t(read.csv("norolling_speed15.csv", header = FALSE)))
row.names(nrspeed5) <- NULL
nrspeed6 <- data.frame(t(read.csv("norolling_speed17.5.csv", header = FALSE)))
row.names(nrspeed6) <- NULL
nrspeed7 <- data.frame(t(read.csv("norolling_speed20.csv", header = FALSE)))
row.names(nrspeed7) <- NULL
matplot(nrspeed1,type="l",xlab = "iterations",ylab = "environmental quality",main="Depletion: Non-Rolling",ylim=c(1500,3700))
rates_025<- get_rates(nrspeed0)
rates_050<- get_rates(nrspeed1)
rates_075<- get_rates(nrspeed2)
rates_100<- get_rates(nrspeed3)
rates_125<- get_rates(nrspeed4)
rates_150<- get_rates(nrspeed5)
rates_175<- get_rates(nrspeed6)
rates_200<- get_rates(nrspeed7)
boxplot(rates_025$rates,rates_050$rates,rates_075$rates,rates_100$rates,rates_125$rates,rates_150$rates,rates_175$rates,rates_200$rates,xlab="speed",ylab="rate of depletion")
shapiro.test(rates_025$rates)
shapiro.test(rates_050$rates)
shapiro.test(rates_075$rates)
shapiro.test(rates_100$rates)
shapiro.test(rates_125$rates)
shapiro.test(rates_150$rates)
shapiro.test(rates_175$rates)
shapiro.test(rates_200$rates)
nr_rate_list <- c(rates_025$rates,rates_050$rates,rates_075$rates,rates_100$rates,rates_125$rates,rates_150$rates,rates_175$rates,rates_200$rates)
nr_speed_list <- rep(seq(2.5,20,by=2.5),each=30)
boxplot(nr_rate_list ~ nr_speed_list,xlab="speed",ylab="depletion rate",ylim=c(-1,-0),main="Without Rolling Behaviour")
bartlett.test(rate_list~speed_list)
kruskal.test(rate_list~speed_list)
|
2b7954c31159f8d23b828d751b578d9ecba91046
|
823576d24e5b373201edbb0f49a87f1c06150223
|
/reeffishmanagementv030/global.R
|
520150d2334e289402f122a31a37a73531ee076e
|
[] |
no_license
|
claire-roberts/Reef-Fish-Management-Areas
|
3192e2cd227dd14ab3f043527740e0596e5eece3
|
3a5624cfd46b64cb68994269afb149415a9d0b73
|
refs/heads/master
| 2021-01-19T09:03:20.420775
| 2017-02-16T18:28:02
| 2017-02-16T18:28:02
| 82,180,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,072
|
r
|
global.R
|
##global R
## Updates version 0.30 created github repo claire-roberts/Reef-Fish-Management-Areas
#setwd("X:/Data_John/shiny/reeffishmanagementv020")
## version 0.10
#setwd("X:/Data_John/shiny/reeffishmanagementv020")
#setwd("C:/reeffishmanagementareas")
## This script was used to import and convert the shapefiles to binary
## Obtained from NMFS SERO 12-4-2015
# library(rgdal)
# ## Madison Swanson, edges, steamboat lumps
# setwd("X:/Data_John/shiny/reeffishmanagementareas/madswan_steamboat_edges")
# polyMSESL<- readOGR("MadSwan_Steamboat_Edges_po.shp", layer="MadSwan_Steamboat_Edges_po")
# ## Alabama SMZ
# setwd("X:/Data_John/shiny/reeffishmanagementareas/al_smz")
# polyALSMZ<- readOGR("AL_SMZ_po.shp", layer="AL_SMZ_po")
# ## Shallow water grouper closure
# setwd("X:/Data_John/shiny/reeffishmanagementareas/swg")
# lineSWG <- readOGR("SWG_ln.shp", layer="SWG_ln")
# pointSWG <- readOGR("SWG_pt.shp", layer="SWG_pt")
#
# setwd("X:/Data_John/shiny/reeffishmanagementareas/gulf_reefll_seasonal")
# polyLongLine <- readOGR("Gulf_ReefLL_seasonal_po.shp", layer="Gulf_ReefLL_seasonal_po")
# pointLongLine <- readOGR("Gulf_ReefLL_seasonal_pt.shp", layer="Gulf_ReefLL_seasonal_pt")
#
# setwd("X:/Data_John/shiny/reeffishmanagementareas/longline_buoy")
# polyLongLineBuoy <- readOGR("longline_buoy_po.shp", layer="longline_buoy_po")
# pointLongLineBuoy <- readOGR("longline_buoy_pt.shp", layer="longline_buoy_pt")
#
# setwd("X:/Data_John/shiny/reeffishmanagementareas/reef_stressed")
# polyReefStressed <- readOGR("reef_stressed_po.shp", layer="reef_stressed_po")
# pointReefStresssed <- readOGR("reef_stressed_pt.shp", layer="reef_stressed_pt")
#
#
# setwd("X:/Data_John/shiny/reeffishmanagementareas/NorthernAndSouthern")
# SWGOpen <- readOGR("NorthernSGrouper.shp", layer="NorthernSGrouper")
# SWGClosed <- readOGR("SouthernSGrouper.shp", layer="SouthernSGrouper")
#
# library(taRifx.geo)
# polySWG <- rbind(SWGOpen, SWGClosed)
# setwd("X:/Data_John/shiny/reeffishmanagementareas")
# save.image("ReefFishManagement.RData")
############################### install libraries ##################
# This section is provided for convenience to install libraries that are
# often required in apps. May require some special set-up on your R install
# strongly recommend using RStudio
##use development version of leaflet to leverage new features 11.3.2015
# Note: this may be necessary: Rtools 3.1 from http://cran.r-project.org/bin/windows/Rtools/ and then run find_rtools()
# if (!require('devtools')) install.packages('devtools')
# if (!require('shinydashboard')) install.packages('shinydashboard')
# if (!require('rgdal')) install.packages('rgdal')
# if (!require('sp')) install.packages('sp')
# if (!require('DT')) install.packages('DT')
# library(devtools)
# if (!require('leaflet')) devtools::install_github('rstudio/leaflet')
############################### install libraries ##################
#### Set working directory: this needs to be run prior to using the
## 'Run App' button, but must be commented out prior to publishing
## to the web!
#setwd("X:/Data_John/shiny/reeffishmanagementareas")
########################## load libraries:
## standard R stuff here :)
## load required libraries
library(shiny)
library(shinydashboard)
library(leaflet)
library(rgdal)
# library(DT)
# library(sp)
#############################
## output version info to text file
## this is useful for debugging
# sink("sessionInfo.txt")
# sessionInfo()
# sink()
load("ReefFishManagement.RData")
##split into separate files
# Edges <- subset(polyMSESL, LABEL=="Edges")
# SteamboatLumps <- subset(polyMSESL, LABEL=="Steamboat Lumps")
# MadisonSwanson <- subset(polyMSESL, LABEL=="Madison and Swanson sites")
Date <- format(Sys.Date(), "%A %b %d %Y")
DateMonth <- as.numeric(format(Sys.Date(), "%m"))
# DateMonth <- 2
#
# content <- paste(sep = "","<b> <a test </a></b>",
# "Welcome to the Gulf Council Reef Fish Management Mapping tool. This map illustrates spatial management
# tools currently used to manage Gulf reef fisheries. Today is ", Date, " and the areas initially displayed on the map
# are subject to management closure for one or more species and/or gear types. Layers are clickable with links to a full description of the
# regulations and associated boundaries.")
content <- HTML(paste(sep = " ",
"<center><b><a href='http://www.gulfcouncil.org' target='_blank'>Gulf Council Reef Fish Management</a></b></center>", "<br/>",
"Welcome to the Gulf Council Reef Fish Management Mapping tool.", "<br/>", "This map illustrates spatial management
tools currently used to", "<br/>", "manage Gulf reef fisheries. <b>Today is</b> ", "<b>",Date,"</b>","<br/>", " and the areas initially displayed on the map
are subject to", "<br/>", "management closure for one or more species and/or gear types.", "<br/>", "Layers are clickable with links to a full description of the regulations", "<br/>", "and associated boundaries."))
|
939abc58e5c7a8a72ac0658cda7a0ab13b7b4cdc
|
4f217be84965dcdf28299a7ffea4724d2ef662e4
|
/R/gta get imf data.R
|
1b68156a6e4bd6beee701b5ae67f809e1f834de3
|
[] |
no_license
|
global-trade-alert/gtalibrary
|
694cbc2718954ca8737ab2d2e72c787da649df68
|
a8ad12b2792f5558dacde494adbd7c13faffff49
|
refs/heads/master
| 2023-08-17T09:21:23.631486
| 2023-08-08T09:45:05
| 2023-08-08T09:45:05
| 145,339,633
| 7
| 1
| null | 2023-07-17T17:01:39
| 2018-08-19T21:43:20
|
R
|
UTF-8
|
R
| false
| false
| 2,099
|
r
|
gta get imf data.R
|
# Roxygen documentation
#' A wrapper for the IMFData function
#'
#'
#' @param start.date Provide the period start date in R format ('YYYY-mm-dd').
#' @param end.date Provide the period end date in R format ('YYYY-mm-dd').
#' @param fx.frequency Provide time series frequency e.g. ('D','M','Q','A')
#' @param series What time series do you want? Options: 'fx' for USD exchange rates.
#' @param countries What countries do you want? Permissiable options are 'all' plus GTA names and ISO3 codes.
#'
#' @references www.globaltradealert.org
#' @author Global Trade Alert
#'
gta_get_imf_data <- function(start.date=NULL,
end.date=NULL,
frequency=NULL,
series=NULL,
countries=NULL) {
library(IMFData)
imf.cur=data.frame(currency=c("GBP", "PLN", "EUR", "SEK", "DKK", "HUF", "BGN", "CZK", "NOK", "CHF", "HRK", "USD", "RON", "SKK", "MKD", "ISK", "JPY", "LTL", "LVL", "MTL"),
imf.symbol=c("GB", "PL", "U2", "SE", "DK", "HU", "BG","CZ", "NO", "CH", "HR", "US", "RO", "SK","MK", "IS","JP", "LT", "LV", "MT"),
stringsAsFactors = F)
checkquery = FALSE
if(any(is.null(start.date),is.null(end.date),is.null(frequency), is.null(series))){
stop("Please specify all parameters.")
}
if(series=="fx"){
query.series='ENDA_XDC_USD_RATE'
database.id <- 'IFS'
}
if(countries=="all"){
query.countries=imf.cur$imf.symbol
}
queryfilter <- list(CL_FREQ=frequency,
CL_AREA_IFS=query.countries,
CL_INDICATOR_IFS =query.series)
imf.data <- CompactDataMethod(database.id,
queryfilter,
start.date,
end.date,
checkquery, tidy = T)
if(series=="fx"){
imf.data=imf.data[,c(1,2,4)]
names(imf.data)=c("date","lcu.per.usd", "imf.symbol")
imf.data=merge(imf.data, imf.cur, by="imf.symbol",all.x=T)
imf.data$imf.symbol=NULL
}
return(imf.data)
}
|
5d1064ebd40b4ad07c0b60cb01f80dd827087b51
|
b6312d8298f60b08b040b51ad1d66f8f3b6627a5
|
/man/histWithDist.Rd
|
74c312f3447f777fe0f4db7aa3cc42a1e69e5918
|
[] |
no_license
|
hutchisonjtw/JNCCTools
|
36ced580cb7feb91cf310684220451843996bb16
|
48242eac43c37d16b50aa50504dd4ca7f02c4551
|
refs/heads/master
| 2021-01-10T10:13:43.298389
| 2017-03-22T14:25:45
| 2017-03-22T14:25:45
| 54,502,489
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,226
|
rd
|
histWithDist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/histWithDist_function.R
\name{histWithDist}
\alias{histWithDist}
\title{histWithDist}
\usage{
histWithDist(x, main = "Histogram with fitted distributions",
distr = c("nbinom", "pois", "norm", "lnorm"))
}
\arguments{
\item{x}{Numeric vector of data values to be plotted to be plotted as a histogram.}
\item{main}{Title for the plot. Default value is \code{"Histogram with fitted distributions"}.}
\item{distr}{Character vector of distribution curves to overlay on the histogram. Note that this uses the standard R names for the distributions which differ from those used in emon. Should be one or more of "norm" (normal), "pois" (Poisson), "lnorm" (log normal) and "nbinom" (negative binomial). By default all four are plotted.}
}
\value{
Primarily used directly plotting, but also invisibly returns a \code{histWithDist} object that can be stored for later plotting if needed.
}
\description{
Plots histogram with fitted distribution curves
}
\details{
This function uses \code{MASS::fitdistr} to fit distribution curves to \code{x} for the distributions specified, then overlays these on a histogram of \code{x}.
}
\author{
James Hutchison
}
|
c92e9ca278d9a0c52a2a2d695056644188fc9f5e
|
a18cc51bd8def3537206608adacdfbfe7a0f61da
|
/ui.R
|
848a9830287a34be6da5b2d5dc03089b62c7c867
|
[] |
no_license
|
rbjork/Developing-Data-Products
|
eae6b5f3adf396a60b6203289989fde4102b0bd7
|
da42840e4c2e1d1fd6d342642c052a729af172b3
|
refs/heads/master
| 2016-08-06T04:54:07.560033
| 2014-10-24T18:48:56
| 2014-10-24T18:48:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,080
|
r
|
ui.R
|
# UI for ROC
shinyUI(pageWithSidebar(
headerPanel("ROC of SVM exercise prediction by roll and pitch belt"),
sidebarPanel(
wellPanel(
h5("Select outputs to group for prediction"),
checkboxInput("A","A", FALSE),
checkboxInput("B","B", FALSE),
checkboxInput("C","C", TRUE),
checkboxInput("D","D", TRUE),
checkboxInput("E","E", FALSE),
actionButton("goButton", "Apply"),
h5("HELP"),
p("Documentation:",a("ROC plot from SVM",href="helprocsvm.html"))
),
sliderInput('gammaset', label='Set the Gamma for SVM',value = 10, min = 0, max = 20, step = .5),
sliderInput('costset', label='Set the Cost for SVM',value = 10, min = 0, max = 20, step = .5),
verbatimTextOutput("overallAccuracy"),
verbatimTextOutput("myConfusion")
),
mainPanel(
tabsetPanel(
tabPanel("Plots",
plotOutput('myROC'),
plotOutput('myROC2')
),
tabPanel("Table of Data",
h5("Random sample of 2000 from 20,000"),
tableOutput('myTable')
)
)
)
))
|
4d386ddaf5b1af2c58f89c37e3c37bf4ca5ebd14
|
3ca4f0bb87f51eb60051665434a71de294915f3d
|
/Edx/Null_Distribution.R
|
0847fe99e70138fa878a3c2d823f7f63022b9e2c
|
[] |
no_license
|
Labimide/Data-analysis-for-life-sciences
|
6b3c283471b7d8dc13db9870d78570b8f8d12844
|
ae878cc72fbaa9e56c3d2d0ad3900aad63d7a105
|
refs/heads/main
| 2023-07-08T05:50:49.041266
| 2021-08-17T13:44:48
| 2021-08-17T13:44:48
| 397,268,914
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,060
|
r
|
Null_Distribution.R
|
# upload packages
library(dplyr)
# upload data
x <- unlist (read.csv("femaleControlsPopulation.csv"))
# Exercise
## Sampling for 1000 times------------------------------------------------------------------------
set.seed(1)
n = 1000
nulls <- vector("numeric", n)
for (i in 1:n) {
placebo <- sample( x, 5)
nulls[i] <- mean(placebo)
}
diff <- nulls - mean(x)
mean( abs( diff ) > 1 )
## sampling for 10,000 times------------------------------------------------------------------------------
set.seed(1)
n = 10000
nulls <- vector("numeric", n)
for (i in 1:n) {
placebo <- sample( x, 5)
nulls[i] <- mean(placebo)
}
diff <- nulls - mean(x)
mean( abs( diff ) > 1 )
## sampling 50 mice for 1000 times------------------------------------------------------------------------------
set.seed(1)
n = 1000
nulls <- vector("numeric", n)
for (i in 1:n) {
placebo <- sample( x, 50)
nulls[i] <- mean(placebo)
}
diff <- nulls - mean(x)
mean( abs( diff ) > 1 )
|
c84257582f58e6d0f462d2daa5d7739380612ed9
|
1848a46cc64114e3d1525191afeca645527f10f4
|
/cachematrix.R
|
5b94ecc70d0e85036a24d7a55af33d5c1320c69a
|
[] |
no_license
|
NathanKim/ProgrammingAssignment2
|
154c39df9846c80aa37e215b72fef99e473b8d75
|
3d3bd0d38c1f60fddac7f778a75b04af62bc6599
|
refs/heads/master
| 2020-12-03T05:33:22.313550
| 2015-10-25T16:58:09
| 2015-10-25T16:58:09
| 44,433,805
| 0
| 0
| null | 2015-10-17T11:00:39
| 2015-10-17T11:00:38
| null |
UTF-8
|
R
| false
| false
| 1,116
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
#Creates a class, like a list
#Contains four functions
#set stores matrix in cache and get recalls
#setinverse/getinverse is the same but for original matrix
makeCacheMatrix <- function(x = matrix()) {
z <- NULL
#z matrix value
set <- function(y) {
x <<- y
z <<- NULL
}
get <- function() x
setInverse <- function(inverse) z <<- inverse
getInverse <- function() z
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
#The inverse is only solved once; if it has already been calculated, 'cacheSolve' is used.
#cacheSolve takes the matrix and checks if it is solved, if so, it recalls from cache
#if not, it will calculate and store in cache.
cacheSolve <- function(x, ...) {
#Matrix that is inverse to z
z <- x$getInverse()
if(!is.null(z)) {
message("getting cached data")
return(z) #checks z matrix cache's and if true, returns
}
data <- x$get()
z <- solve(data, ...)
x$setInverse(z)
z
#if not found, it is calculated
}
|
5c4aeed20e362f6e57e5aa7c4d1dabd556e8a4a7
|
5c6581472562e43646b2ce0097c1acabdf765e3a
|
/pkg/R/convhulln.R
|
48f8c3c1dc002d7af0214e9e11bc8c6169262153
|
[] |
no_license
|
fguilhaumon/geometry
|
14d1e16fd9bd0f7660935cd80ce9a80ea695a637
|
fa2c83d5e81bc347c15bd3841bb956024f48079e
|
refs/heads/master
| 2020-03-17T18:59:06.764615
| 2018-05-17T16:57:20
| 2018-05-17T16:57:20
| 133,841,587
| 1
| 0
| null | 2018-05-17T16:36:28
| 2018-05-17T16:36:27
| null |
UTF-8
|
R
| false
| false
| 5,486
|
r
|
convhulln.R
|
##' Compute smallest convex hull that encloses a set of points
##'
##' Returns an index matrix to the points of simplices
##' (\dQuote{triangles}) that form the smallest convex simplical
##' complex of a set of input points in N-dimensional space. This
##' function interfaces the Qhull library.
##'
##' For silent operation, specify the option \code{Pp}.
##'
##' @param p An \code{n}-by-\code{dim} matrix. The rows of \code{p}
##' represent \code{n} points in \code{dim}-dimensional space.
##'
##' @param options String containing extra options for the underlying
##' Qhull command; see details below and Qhull documentation at
##' \url{http://www.qhull.org/html/qconvex.htm#synopsis}.
##'
##' @param return.non.triangulated.facets logical defining whether the
##' output facets should be triangulated; \code{FALSE} by default.
##'
##' @return If \code{return.non.triangulated.facets} is \code{FALSE}
##' (default), an \code{m}-by-\code{dim} index matrix of which each
##' row defines a \code{dim}-dimensional \dQuote{triangle}. If
##' \code{return.non.triangulated.facets} is \code{TRUE} the number
##' of columns equals the maximum number of vertices in a facet, and
##' each row defines a polygon corresponding to a facet of the
##' convex hull with its vertices followed by \code{NA}s until the
##' end of the row. The indices refer to the rows in \code{p}. If
##' the option \code{FA} is provided, then the output is a
##' \code{list} with entries \code{hull} containing the matrix
##' mentioned above, and \code{area} and \code{vol} with the
##' generalised area and volume of the hull described by the matrix.
##' When applying convhulln to a 3D object, these have the
##' conventional meanings: \code{vol} is the volume of enclosed by
##' the hull and \code{area} is the total area of the facets
##' comprising the hull's surface. However, in 2D the facets of the
##' hull are the lines of the perimeter. Thus \code{area} is the
##' length of the perimeter and \code{vol} is the area enclosed. If
##' \code{n} is in the \code{options} string, then the output is a
##' list with with entries \code{hull} containing the matrix
##' mentioned above, and \code{normals} containing hyperplane
##' normals with offsets \url{../doc/html/qh-opto.html#n}.
##'
##' @note This is a port of the Octave's (\url{http://www.octave.org})
##' geometry library. The Octave source was written by Kai Habel.
##'
##' See further notes in \code{\link{delaunayn}}.
##'
##' @author Raoul Grasman, Robert B. Gramacy, Pavlo Mozharovskyi and David Sterratt
##' \email{david.c.sterratt@ed.ac.uk}
##' @seealso \code{\link[tripack]{convex.hull}}, \code{\link{delaunayn}},
##' \code{\link{surf.tri}}, \code{\link{distmesh2d}}
##' @references \cite{Barber, C.B., Dobkin, D.P., and Huhdanpaa, H.T.,
##' \dQuote{The Quickhull algorithm for convex hulls,} \emph{ACM Trans. on
##' Mathematical Software,} Dec 1996.}
##'
##' \url{http://www.qhull.org}
##' @keywords math dplot graphs
##' @examples
##' # example convhulln
##' # ==> see also surf.tri to avoid unwanted messages printed to the console by qhull
##' ps <- matrix(rnorm(3000), ncol=3) # generate points on a sphere
##' ps <- sqrt(3)*ps/drop(sqrt((ps^2) %*% rep(1, 3)))
##' ts.surf <- t(convhulln(ps)) # see the qhull documentations for the options
##' \dontrun{
##' rgl.triangles(ps[ts.surf,1],ps[ts.surf,2],ps[ts.surf,3],col="blue",alpha=.2)
##' for(i in 1:(8*360)) rgl.viewpoint(i/8)
##' }
##'
##' @export
##' @useDynLib geometry
convhulln <- function (p, options = "Tv", return.non.triangulated.facets = FALSE) {
#unique temp dir for parallel computations
makeRandomString <- function(n=1, lenght=12)
{
randomString <- c(1:n) # initialize vector
for (i in 1:n)
{
randomString[i] <- paste(sample(c(0:9, letters, LETTERS),
lenght, replace=TRUE),
collapse="")
}
return(randomString)
}
tmpdir <- file.path(getwd(),makeRandomString())
dir.create(tmpdir)
## Check directory writablet
#tmpdir <- tempdir()
## R should guarantee the tmpdir is writable, but check in any case
if (file.access(tmpdir, 2) == -1) {
stop(paste("Unable to write to R temporary directory", tmpdir, "\n",
"This is a known issue in the geometry package\n",
"See https://r-forge.r-project.org/tracker/index.php?func=detail&aid=5738&group_id=1149&atid=4552"))
}
## Input sanitisation
options <- paste(options, collapse=" ")
## Coerce the input to be matrix
if (is.data.frame(p)) {
p <- as.matrix(p)
}
## Make sure we have real-valued input
storage.mode(p) <- "double"
## We need to check for NAs in the input, as these will crash the C
## code.
if (any(is.na(p))) {
stop("The first argument should not contain any NAs")
}
if (!return.non.triangulated.facets){
## It is essential that delaunayn is called with either the QJ or Qt
## option. Otherwise it may return a non-triangulated structure, i.e
## one with more than dim+1 points per structure, where dim is the
## dimension in which the points p reside.
if (!grepl("Qt", options) & !grepl("QJ", options)) {
options <- paste(options, "Qt")
}
}
a <- .Call("C_convhulln", p, as.character(options), as.integer(return.non.triangulated.facets), tmpdir, PACKAGE="geometry")
unlink(tmpdir, recursive = TRUE, force = TRUE)
a
}
|
1a13d2b107b4030c539567411e869900a9a822c6
|
ed2409820e00b5dfaa89513f709dc3bac61bb743
|
/Rcode/data_merge.R
|
f81ebf1e3fb3848d89fbb4a5189dccc93a56ec43
|
[] |
no_license
|
KevinCayenne/PROSOCIAL
|
e771dd8d2b399f50ce5d2e09b216e13894b7c96f
|
039df21546c7b15fa506e2e1bf2a0fcf7927f746
|
refs/heads/master
| 2021-01-23T20:27:46.825943
| 2018-11-07T13:11:39
| 2018-11-07T13:11:39
| 102,862,344
| 1
| 0
| null | null | null | null |
BIG5
|
R
| false
| false
| 37,750
|
r
|
data_merge.R
|
setwd("C:/Users/acer/Desktop/PROS/Data/fMRI_PilotData")
library(stringi)
library(tidyverse)
library(ggplot2)
library(ggpubr)
library(gtools)
library(magrittr)
library(tidyr)
library(dplyr)
library(gridExtra)
library(ggsignif)
library(lme4)
library(lmerTest)
File.list = mixedsort(list.files("behaviorD"))
#list.files命令將behavior文件夾下所有文件名輸入File.list
combined = paste("./behaviorD/", File.list, sep="")
#用paste命令構建路徑變量combined
leng = length(combined)
#讀取combined長度,也就是文件夾下的文件個數
Subject.number =leng/6
#每個受試者有6個檔案, 除六可得幾位受試者
merge.data = read.csv(file = combined[ 1], header=T, sep=",")
#讀入第一個文件內容(可以不用先讀一個,但是為了簡單,省去定義data.frame的時間,選擇先讀入一個文件。
for (i in 2:leng){
new.data = read.csv(file = combined[ i], header=T, sep=",")
merge.data = rbind(merge.data,new.data)
}
behavior.df <- data.frame(merge.data)
############################## Adding columns ########################################
youngnum <- round(table(behavior.df$GroupN)[1]/64)
oldnum <- round(table(behavior.df$GroupN)[2]/64)
allnum <- youngnum + oldnum
#calculate the subjects number in groups
ncolbehavior.df <- ncol(behavior.df) #計算column number
for (i in c(1:nrow(behavior.df))){
behavior.df[i, ncolbehavior.df+1] <- behavior.df[i, 12] - behavior.df[i, 11] # MDRT - MDFirstP = 給錢情境的反應時間 ( 12 - 11 )
behavior.df[i, ncolbehavior.df+2] <- behavior.df[i, 15] - behavior.df[i, 14] # EmoRT - EFirstP = 情緒反應的反應時間 ( 15 - 14 )
behavior.df[i, ncolbehavior.df+3] <- behavior.df[i, 27] - behavior.df[i, 22] # TrialEnd - fixOnsettime = ITI duration = ITI ( 27 - 22 )
behavior.df[i, ncolbehavior.df+4] <- behavior.df[i, 24] - behavior.df[i, 23] # ISIstart - MDOnsettime = 給錢情境的duraiton ( 24 - 23 )
behavior.df[i, ncolbehavior.df+5] <- behavior.df[i, 25] - behavior.df[i, 24] # EmoOnsettime - ISIstart = ISI duration = ISI ( 25 - 24 )
behavior.df[i, ncolbehavior.df+6] <- behavior.df[i, 26] - behavior.df[i, 25] # EmoEndtime - EmoOnsettime = 情緒選擇的duration ( 26 - 25 )
behavior.df[i, ncolbehavior.df+7] <- behavior.df[i, 27] - behavior.df[i, 5] # TrialEnd - TriggerS = 從Trigger開始到當前Trial結束的時間 ( 27 - 5 )
if (i >= 2){
behavior.df[i, ncolbehavior.df+8] <- behavior.df[i, ncolbehavior.df+7] - behavior.df[(i-1), ncolbehavior.df+7] #一個Trial的總時間
}
}
for (i in c(1:nrow(behavior.df))){
behavior.df[i, ncolbehavior.df+9] <- behavior.df[i, 21] - behavior.df[i, 5] #LongFixation總時間( 21 -5 )
behavior.df[i, ncolbehavior.df+10] <- behavior.df[i, 19] + behavior.df[i, 20] + 24000 #default duartion per trial = behavior.df[i, ncolbehavior.df+8]
}
behavior.df[1, ncolbehavior.df+8] <- behavior.df[1, ncolbehavior.df+7] - behavior.df[1, ncolbehavior.df+9] #第一個Trial的總時間
colnames(behavior.df)[(ncolbehavior.df+1):(ncolbehavior.df+10)] <- c("MoneyD_RT", "EmoD_RT", "ITI_D", "MoneyD", "ISI_D","EmoD","DTriggerOnset","TrialD","LongD","DefaultT")
# adding tags
behavior.con <- behavior.df
behavior.con$SIT <- NULL
behavior.con$EmoRESP <- NULL
write.csv(behavior.con, file = sprintf("behavior.CSV"), row.names=FALSE)
# prepare the csv for MATLAB, delete the chinese columns
########################## End of adding columns #####################################
# for (j in c(1:Subject.number)){
#
# tryy.1 <- behavior.df[(1+((j-1)*64)):(j*64),]
# MD.mm <- matrix(list(), 4, 6)
# ED.mm <- matrix(list(), 7, 6)
#
# for (i in c(1:6)) {
# for (k in c(1:4)) {
# MD.mm[[k, i]] <- tryy.1[tryy.1$SessionN ==i & tryy.1$SITtag==k,]$MDOnsettime
# }
# }
#
# for (i in c(1:6)) {
# for (k in c(1:7)) {
# ED.mm[[k, i]] <- tryy.1[tryy.1$SessionN ==i & tryy.1$RegMtag ==k,]$EmoOnsettime
# }
# }
# write.csv(MD.mm, file = sprintf("%d-MD.csv", j), row.names = FALSE)
# write.csv(ED.mm, file = sprintf("%d-ED.csv", j), row.names = FALSE)
# }
########################## loop preprocessing ########################################
for (i in c(1,2,3,4,13,17,18)){
behavior.df[ ,i] <- as.factor(behavior.df[ ,i])
}
MG.plot.width = 600
for (i in c(1:Subject.number)){
Money <- as.vector(tapply(behavior.df$giveM[(1+((i-1)*64)):(i*64)], behavior.df$SITtag[(1+((i-1)*64)):(i*64)], mean))
Situation <- as.vector(levels(behavior.df$SITtag[(1+((i-1)*64)):(i*64)]))
########################## start plotting ##########################################
if (behavior.df$GroupN[(1+((i-1)*64))] == 1) { sub.group <- "Young" } else { sub.group <- "Old" }
if (behavior.df$SexN[(1+((i-1)*64))] == 1) { sub.gender <- "Male" } else { sub.gender <- "Female" }
sub.number <- as.character(behavior.df$SubjectN[(1+((i-1)*64))])
money.sd <- as.vector(tapply(behavior.df$giveM[(1+((i-1)*64)):(i*64)], behavior.df$SITtag[(1+((i-1)*64)):(i*64)], sd)/8)
title.name <- sprintf("Average of money giving pilot_%s_%s_%s.", sub.number, sub.group, sub.gender)
title.name.emotion <- sprintf("Emotional degree_%s_%s_%s.", sub.number, sub.group, sub.gender)
png(sprintf("Average of money giving_%s.png", sub.number), width = MG.plot.width, height = 700)
print(MD.plot <- ggplot() +
geom_bar(mapping = aes(x = Situation, y = Money),
stat = 'identity', position = 'dodge', color="black") +
labs(title = title.name, x = "Conditions", y = "Unit: dollars") +
ylim(c(0, 300)) +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=15),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
geom_text(mapping = aes(x = Situation, y = Money),
size = 4, colour = 'black', vjust = -0.5, hjust = .5,
label=format(Money, digits=4),
position = position_dodge(.9)) +
scale_x_discrete(labels=c("1" = "Prosocial", "2" = "Purchase",
"3" = "Neutral", "4" = "Uncommon")) +
geom_errorbar(aes(x = Situation, ymin = Money, ymax = Money+money.sd), width = .3,
position = position_dodge(.9))
)
dev.off()
##### emotion plotting ######
Emo.mean.bySIT <- tapply(behavior.df$EmoTag[(1+((i-1)*64)):(i*64)], list(behavior.df$RegMtag[(1+((i-1)*64)):(i*64)], behavior.df$SITtag[(1+((i-1)*64)):(i*64)]), mean)
moneyReg.type <- as.factor(rep(c("300", "+50", "+20", "same", "-20", "-50", "0"),4))
SIT.type <- as.factor(c(rep("prosocial",7),rep("purchase",7),rep("neutral",7),rep("Uncommon",7)))
levels(moneyReg.type) <- list(all_give = "300", fifty_more = "+50", twenty_more = "+20", same = "same", twenty_less = "-20", fifty_less = "-50", none_give = "0")
levels(SIT.type) <- list(prosocial = "prosocial", purchase = "purchase",neutral = "neutral", Uncommon = "Uncommon")
Emo.mean <- c(Emo.mean.bySIT[1:28])
Emo.dataframe <- data.frame(Emo.mean, SIT.type, moneyReg.type)
Emo.dataframe$moneyReg.type = factor(Emo.dataframe$moneyReg.type, levels = c('none_give','fifty_less','twenty_less','same','twenty_more','fifty_more','all_give'), order = T)
png(sprintf("Emotional degree_%s.png", sub.number), width = 1000, height = 700)
print(Emo.plot <- ggplot(data = Emo.dataframe, aes(x = SIT.type, y = Emo.mean)) +
geom_bar(aes(fill = moneyReg.type),
stat = 'identity', position = 'dodge', color="black") +
labs(title = title.name.emotion, x = "Situations", y = "Mean emotion degree", fill = "money regulation type") +
ylim(c(-4, 4)) +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=12),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
geom_text(mapping = aes(x = SIT.type, y = Emo.mean, group = moneyReg.type),
size = 4, colour = 'black', vjust = -0.5, hjust = .5,
label=format(Emo.mean, digits=2),
position = position_dodge(width= .9))
)
dev.off()
png(sprintf("Subject_%s_mergedplot.png", sub.number), width = 1200, height = 700)
print(subj_plot <- ggarrange(MD.plot, Emo.plot,
ncol = 2, nrow = 1))
dev.off()
}
############################## ALL plotting MD + Emo ##########################################
## Total MD plot #####
ALL_Money <- as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$GroupN), mean))
ALL_Money <- replace(ALL_Money, c(2,3), ALL_Money[c(3,2)])
ALL_Money <- replace(ALL_Money, c(2,5), ALL_Money[c(5,2)])
ALL_Money <- replace(ALL_Money, c(4,6), ALL_Money[c(6,4)])
ALL_Money <- replace(ALL_Money, c(6,7), ALL_Money[c(7,6)])
# ALL_money.sd <- as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$GroupN), sd)/sqrt(Subject.number))
ALL_money.sd <- as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$GroupN), sd)/sqrt(Subject.number))
ALL_Money_Y.se <- (apply(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[,,1], 1, sd, na.rm = T))/sqrt(youngnum)
ALL_Money_O.se <- (apply(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[,,2], 1, sd, na.rm = T))/sqrt(oldnum)
ALL_Money.se <- c(ALL_Money_Y.se, ALL_Money_O.se)
ALL_Money.se <- replace(ALL_Money.se, c(2,3), ALL_Money.se[c(3,2)])
ALL_Money.se <- replace(ALL_Money.se, c(2,5), ALL_Money.se[c(5,2)])
ALL_Money.se <- replace(ALL_Money.se, c(4,6), ALL_Money.se[c(6,4)])
ALL_Money.se <- replace(ALL_Money.se, c(6,7), ALL_Money.se[c(7,6)])
x <- as.factor(c(rep("prosocial",2),rep("purchase",2),rep("neutral",2),rep("Uncommon",2)))
Group <- as.factor(rep(c('Young','Old'),times = 4))
x <- factor(x, levels = levels(x))
levels(x) <- list(prosocial = "prosocial", purchase = "purchase",neutral = "neutral", Uncommon = "Uncommon")
levels(Group) <- list(Young = "Young", Old = "Old")
Group <- factor(Group , levels = c('Old','Young'), order = T)
title.name <- sprintf("Average of money givilang pilot_ALL(Old: %d, Young: %d)", oldnum, youngnum)
png(sprintf("Average of money giving_pilot_ALL.png"), width = MG.plot.width, height = 700)
print(total.MD.plot <- ggplot() +
geom_bar(mapping = aes(x = x, y = ALL_Money, fill = Group),
stat = 'identity', position = 'dodge', color="black") +
labs(title = title.name, x = "Conditions", y = "Unit: dollars") +
ylim(c(0,300)) +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=15),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
geom_text(mapping = aes(x = x, y = ALL_Money, group = Group),
size = 4, colour = 'black', vjust = -0.5, hjust = .5,
label=format(ALL_Money, digits=4),
position = position_dodge(.9)) +
geom_errorbar(aes(x = x, ymin = ALL_Money, ymax = ALL_Money + ALL_Money.se, group = Group), width= .3,
position = position_dodge(.9))
)
dev.off()
## Total emoD plot ####
Emo.mean.bySIT <- tapply(behavior.df$EmoTag, list(behavior.df$RegMtag, behavior.df$SITtag), mean)
moneyReg.type <- as.factor(rep(c("300", "+50", "+20", "same", "-20", "-50", "0"),4))
SIT.type <- as.factor(c(rep("prosocial",7),rep("purchase",7),rep("neutral",7),rep("Uncommon",7)))
levels(moneyReg.type) <- list(all_give = "300", fifty_more = "+50", twenty_more = "+20", same = "same", twenty_less = "-20", fifty_less = "-50", none_give = "0")
levels(SIT.type) <- list(prosocial = "prosocial", purchase = "purchase",neutral = "neutral", Uncommon = "Uncommon")
Emo.means <- c(Emo.mean.bySIT[1:28])
Emo.dataframe <- data.frame(Emo.means, SIT.type, moneyReg.type)
Emo.dataframe$moneyReg.type = factor(Emo.dataframe$moneyReg.type, levels = c('none_give','fifty_less','twenty_less','same','twenty_more','fifty_more','all_give'), order = T)
png(sprintf("Emotional degree_All.png"), width = 1000, height = 700)
print(total.emo.plot <- ggplot(data = Emo.dataframe, aes(x = SIT.type, y = Emo.means)) +
geom_bar(aes(fill = moneyReg.type, group = moneyReg.type),
stat = 'identity', position = 'dodge', color="black") +
labs(title = sprintf("Emotional degree_All (Old: %d, Young: %d)", oldnum, youngnum), x = "Situations", y = "Mean emotion degree", fill = "money regulation type") +
ylim(c(-4, 4)) +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=12),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
geom_text(mapping = aes(x = SIT.type, y = Emo.means, label = "labs", group = moneyReg.type),
size = 4, colour = 'black', vjust = -0.5, hjust = .5,
label=format(Emo.means, digits=2),
stat = 'identity',
position = position_dodge(width = 0.9))
)
dev.off()
#### Group emoD ploting ####
Emo.mean.byGroup <- tapply(behavior.df$EmoTag, list(behavior.df$RegMtag, behavior.df$SITtag, behavior.df$GroupN), mean)
Emo.young.means <- c(Emo.mean.byGroup[1:28])
Emo.old.means <- c(Emo.mean.byGroup[29:56])
Emo.group.dataframe <- data.frame(Emo.young.means, Emo.old.means, SIT.type, moneyReg.type)
Emo.dataframe$moneyReg.type = factor(Emo.dataframe$moneyReg.type, levels = c('none_give','fifty_less','twenty_less','same','twenty_more','fifty_more','all_give'), order = T)
group.emo.y.plot <- ggplot(data = Emo.dataframe, aes(x = SIT.type, y = Emo.young.means)) +
geom_bar(aes(fill = moneyReg.type, group = moneyReg.type),
stat = 'identity', position = 'dodge', color="black") +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=12),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
geom_text(mapping = aes(x = SIT.type, y = Emo.young.means, label = "labs", group = moneyReg.type),
size = 4, colour = 'black', vjust = -0.5, hjust = .5,
label=format(Emo.young.means, digits=2),
stat = 'identity',
position = position_dodge(width = 0.9)) +
ylim(c(-4, 3))
group.emo.o.plot <- ggplot(data = Emo.dataframe, aes(x = SIT.type, y = Emo.old.means)) +
geom_bar(aes(fill = moneyReg.type, group = moneyReg.type),
stat = 'identity', position = 'dodge', color="black") +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=12),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
geom_text(mapping = aes(x = SIT.type, y = Emo.old.means, label = "labs", group = moneyReg.type),
size = 4, colour = 'black', vjust = -0.5, hjust = .5,
label=format(Emo.old.means, digits=2),
stat = 'identity',
position = position_dodge(width = 0.9)) +
ylim(c(-4, 3))
##### Total MD and Emo merge ploting ####
png(sprintf("Total_merge.png"), width = 1200, height = 700)
print(final_plot <- ggarrange(total.MD.plot, total.emo.plot,
ncol = 2, nrow = 1))
dev.off()
#### Total group Emo ploting ####
png(sprintf("Total_groupEmo_merge.png"), width = 1400, height = 700)
print(final_plot <- ggarrange(group.emo.y.plot, group.emo.o.plot,
ncol = 2, nrow = 1))
dev.off()
## RT plot ##
count_trial <- c(1:length(behavior.df$MDRT))
png(sprintf("RTplot_ALL.png"), width = 1200, height = 700)
print(RTplot <- ggplot(behavior.df, aes(count_trial, MDRT, colour = SubjectN)) +
geom_point(aes(shape = factor(GroupN))) + geom_smooth(method = "lm") +
geom_linerange(aes(ymin = MDFirstP, ymax = MDRT)))
dev.off()
dev.off()
for (i in c(1:Subject.number)){
boxplot(behavior.df$giveM[(1+((i-1)*64)):(i*64)] ~ behavior.df$SITtag[(1+((i-1)*64)):(i*64)])
}
################################### T-test ###########################################
Y.PRO.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[1,,1])))
O.PRO.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[1,,2])))
Y.PUR.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[2,,1])))
O.PUR.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[2,,2])))
Y.NEU.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[3,,1])))
O.NEU.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[3,,2])))
Y.UNC.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[4,,1])))
O.UNC.mean <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), mean)[4,,2])))
Y.PRO.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[1,,1])))
O.PRO.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[1,,2])))
Y.PUR.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[2,,1])))
O.PUR.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[2,,2])))
Y.NEU.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[3,,1])))
O.NEU.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[3,,2])))
Y.UNC.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[4,,1])))
O.UNC.sd <- as.vector(na.omit(as.vector(tapply(behavior.df$giveM, list(behavior.df$SITtag, behavior.df$SubjectN, behavior.df$GroupN), sd)[4,,2])))
mean(O.PRO.mean)
sd(O.PRO.mean)
mean(Y.PRO.mean)
sd(Y.PRO.mean)
std <- function(x) sd(x)/sqrt(length(x))
std(Y.PRO.mean)
T.PRO.oneT <- t.test(O.PRO.mean,Y.PRO.mean, alternative = "greater")
T.PRO <- t.test(Y.PRO.mean,O.PRO.mean)
T.PUR <- t.test(Y.PUR.mean,O.PUR.mean)
T.NEU <- t.test(Y.NEU.mean,O.NEU.mean)
T.UNC <- t.test(Y.UNC.mean,O.UNC.mean)
ALL_T_MD_Y_O_Tscore <- c(T.PRO$statistic, T.PUR$statistic, T.NEU$statistic, T.UNC$statistic)
ALL_T_MD_Y_O <- c(T.PRO$p.value, T.PUR$p.value, T.NEU$p.value, T.UNC$p.value)
names(ALL_T_MD_Y_O) <- c("T.PRO", "T.PUR", "T.NEU", "T.UNC")
YT.PRO_PUR <- t.test(Y.PRO.mean, Y.PUR.mean)
YT.PRO_NEU <- t.test(Y.PRO.mean, Y.NEU.mean)
YT.PRO_UNC <- t.test(Y.PRO.mean, Y.UNC.mean)
YT.PUR_NEU <- t.test(Y.PUR.mean, Y.NEU.mean)
YT.PUR_UNC <- t.test(Y.PUR.mean, Y.UNC.mean)
YT.UNC_NEU <- t.test(Y.UNC.mean, Y.NEU.mean)
OT.PRO_PUR <- t.test(O.PRO.mean, O.PUR.mean)
OT.PRO_NEU <- t.test(O.PRO.mean, O.NEU.mean)
OT.PRO_UNC <- t.test(O.PRO.mean, O.UNC.mean)
OT.PUR_NEU <- t.test(O.PUR.mean, O.NEU.mean)
OT.PUR_UNC <- t.test(O.PUR.mean, O.UNC.mean)
OT.UNC_NEU <- t.test(O.UNC.mean, O.NEU.mean)
ALL_Young_T <- c(YT.PRO_PUR$p.value, YT.PRO_NEU$p.value, YT.PRO_UNC$p.value, YT.PUR_NEU$p.value, YT.PUR_UNC$p.value, YT.UNC_NEU$p.value)
names(ALL_Young_T) <- c("T.PRO_PUR", "T.PRO_NEU", "T.PRO_UNC", "T.PUR_NEU", "T.PUR_UNC", "T.UNC_NEU")
ALL_Old_T <- c(OT.PRO_PUR$p.value, OT.PRO_NEU$p.value, OT.PRO_UNC$p.value, OT.PUR_NEU$p.value, OT.PUR_UNC$p.value, OT.UNC_NEU$p.value)
names(ALL_Old_T) <- c("T.PRO_PUR", "T.PRO_NEU", "T.PRO_UNC", "T.PUR_NEU", "T.PUR_UNC", "T.UNC_NEU")
rbind(ALL_Young_T, ALL_Old_T)
###################### ALL boxplot ###########################
total.boxplot.mean_money.vector <- c(Y.PRO.mean, O.PRO.mean, Y.PUR.mean, O.PUR.mean, Y.NEU.mean, O.NEU.mean, Y.UNC.mean, O.UNC.mean)
total.boxplot.sit.vector <- as.factor(c(rep("PROS", Subject.number), rep("PUR", Subject.number), rep("NEU", Subject.number), rep("UNC", Subject.number)))
levels(total.boxplot.sit.vector) <- list(PRO = "PROS", PUR = "PUR", NEU = "NEU", UNC = "UNC")
total.boxplot.group.vector <- as.factor(c(rep(c(rep("Young", youngnum),rep("Old", oldnum)),4)))
total.boxplot <- data.frame(total.boxplot.mean_money.vector, total.boxplot.sit.vector, total.boxplot.group.vector)
levels(total.boxplot$total.boxplot.group.vector) <- list(Young = "Young", Old = "Old")
png(sprintf("Average of money giving_pilot_boxplot_ALL.png"), width = MG.plot.width, height = 700)
print(total.MD.boxplot <- ggplot(total.boxplot,
aes(x = total.boxplot.sit.vector,
y = total.boxplot.mean_money.vector, fill = total.boxplot.group.vector)) +
geom_boxplot(aes(fill = total.boxplot.group.vector),
position=position_dodge(.9)) +
geom_dotplot(binaxis='y', stackdir='center', binwidth=3,
position=position_dodge(.9)) +
stat_summary(fun.y=mean, geom="point", shape=18, size=3,
position=position_dodge(.9)) +
labs(title = title.name, x = "Conditions", y = "Unit: dollars", fill = "Group") +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=15),
legend.text = element_text(size=15),
legend.title = element_text(size=15),
axis.text = element_text(size=13),
axis.title = element_text(size=13,face="bold")) +
ylim(c(0,300))
)
dev.off()
##### plot group RT boxplot ####
mean(behavior.df$MDFirstP)
mean(behavior.df$MDRT)
tapply(behavior.df$MDRT, behavior.df$GroupN, mean)
tapply(behavior.df$MDFirstP, behavior.df$GroupN, mean)
group_MDrt_boxplot <- ggplot(behavior.df, aes(x=GroupN, y=MDRT, group = GroupN)) +
geom_boxplot() +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.2) +
ylim(0, 12000)
group_MDfirstP__boxplot <- ggplot(behavior.df, aes(x=GroupN, y=MDFirstP, group = GroupN)) +
geom_boxplot() +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.2) +
ylim(0, 12000)
group_MD_RTdur__boxplot <- ggplot(behavior.df, aes(x=GroupN, y=(MDRT-MDFirstP), group = GroupN)) +
geom_boxplot() +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.2) +
ylim(0, 12000)
group_EMrt_boxplot <- ggplot(behavior.df, aes(x=GroupN, y=EmoRT, group = GroupN)) +
geom_boxplot() +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.2) +
ylim(0, 12000)
group_EMfirstP__boxplot <- ggplot(behavior.df, aes(x=GroupN, y=EFirstP, group = GroupN)) +
geom_boxplot() +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.2) +
ylim(0, 12000)
group_EM_RTdur__boxplot <- ggplot(behavior.df, aes(x=GroupN, y=(EmoRT-EFirstP), group = GroupN)) +
geom_boxplot() +
geom_dotplot(binaxis='y', stackdir='center', dotsize=.2) +
ylim(0, 12000)
png(sprintf("RT_boxplot_ALL.png"), width = 1000, height = 800)
print(grid.arrange(group_MDfirstP__boxplot, group_MDrt_boxplot, group_MD_RTdur__boxplot, group_EMfirstP__boxplot, group_EMrt_boxplot, group_EM_RTdur__boxplot, nrow=2, ncol=3))
dev.off()
dev.off()
par(mfrow=c(1,4))
boxplot(behavior.df$MDRT, behavior.df$EmoD_RT)
boxplot(c(behavior.df$MDRT, behavior.df$EmoD_RT))
boxplot(behavior.df$MDFirstP, behavior.df$EFirstP)
boxplot(c(behavior.df$MDFirstP, behavior.df$EFirstP))
dev.off()
compare_means(total.boxplot.mean_money.vector ~ total.boxplot.group.vector, group.by = "total.boxplot.sit.vector", data = total.boxplot, method = "t.test")
#### ggline ####
png(sprintf("Mean money giving ggline by situations.png"), width = 800, height = 800)
print(total.ggplot.mmoney <- ggline(total.boxplot, x = "total.boxplot.sit.vector", y = "total.boxplot.mean_money.vector", add= c("mean_se"),
color = "total.boxplot.group.vector", fill = "total.boxplot.group.vector",
palette = "jco", size=3, add.params = list(group = "total.boxplot.group.vector"),
position = position_dodge(0.8), order = c("PRO", "PUR", "NEU", "UNC")) +
labs(x = "Situations", y = "Mean Money Given (NTD)", colour = "Groups") +
stat_compare_means(aes(group = total.boxplot.group.vector),
label.y = 230, size = 20, label = "p.signif") +
theme(plot.title = element_text(hjust = 0.5, face="bold")) +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=30),
legend.text = element_text(size=45),
legend.title = element_text(size=45),
axis.text = element_text(size=45),
axis.title = element_text(size=45,face="bold")
)
)
dev.off()
# p <- ggplot(data = total.boxplot, aes(x = total.boxplot.sit.vector, y = total.boxplot.mean_money.vector,
# fill = total.boxplot.sit.vector)) +
# geom_line(aes(group = total.boxplot.group.vector, colour = total.boxplot.group.vector), position = position_dodge(1))
v <- ggviolin(total.boxplot, x = "total.boxplot.sit.vector", y = "total.boxplot.mean_money.vector",
color = "total.boxplot.group.vector", palette = "jco", width = 1.5) +
labs(title = "Group difference in money giving for each situation", x = "Situations", y = "Money (NT dollars)", colour = "Groups", fill = "Fill") +
stat_compare_means(aes(group = total.boxplot.group.vector), label = "p.signif",
label.y = 300) +
theme(plot.title = element_text(hjust = 0.5, size= 15)) +
ylim(0,300)
png(sprintf("Mean money giving ggline by situations_violin.png"), width = 600, height = 600)
ggadd(v, add = c("mean_se", "dotplot"), fill = "total.boxplot.group.vector", position = position_dodge(0.8), binwidth = 6)
dev.off()
levels(total.boxplot$total.boxplot.group.vector) <- list(Young = "Young", Old = "Old")
levels(total.boxplot$total.boxplot.sit.vector) <- list(NEU = "NEU",PUR = "PUR",PROS = "PROS",UNC = "UNC")
TT <- lm(total.boxplot.mean_money.vector ~ total.boxplot.group.vector *total.boxplot.sit.vector , data = total.boxplot)
summary.TT <- summary(TT)
summary.TT
#### All ggline emotional section ####
all.emo.vector <- as.vector(na.omit(as.vector(tapply(behavior.df$EmoTag, list(behavior.df$SITtag, behavior.df$RegMtag, behavior.df$SubjectN, behavior.df$GroupN), mean))))
all.emo.group.tag <- as.factor(rep(c("Young","Old"),c((youngnum*28), (oldnum*28))))
all.emo.sit.tag <- as.factor(rep(c("PRO","PUR","NEU","UNC"), length(all.emo.vector)/4))
all.emo.tag <- as.factor(rep(rep(c("300", "+50", "+20", "same", "-20", "-50", "0"), c(4,4,4,4,4,4,4)), Subject.number))
all.subject.tag <- as.factor(rep(1:allnum, each = 28))
levels(all.emo.sit.tag) <- list(PRO = "PRO", PUR = "PUR", NEU = "NEU", UNC = "UNC")
levels(all.emo.group.tag) <- list(Young = "Young", Old = "Old")
levels(all.emo.tag) <- list("0" = "0", "-50" = "-50", "-20" = "-20", same = "same", "+20" = "+20", "+50" = "+50", "300" = "300")
all.emo.dataf.o <- data.frame(all.emo.vector, all.emo.group.tag, all.emo.sit.tag, all.emo.tag, all.subject.tag)
fs <- 40
png(sprintf("Emo_ggline_by_situations.png"), width = 3000, height = 900)
ggline(all.emo.dataf.o, x = "all.emo.tag", y = "all.emo.vector", add = c("mean_se"),
color = "all.emo.group.tag", palette = "jco",
add.params = list(group = "all.emo.group.tag"),
facet.by = "all.emo.sit.tag", size=3, point.size =3) +
labs(x = "Money Regulation Type", y = "Emotion Reaction", colour = "Group") +
theme(plot.title = element_text(hjust = 0.5, size= fs)) +
stat_compare_means(aes(group = all.emo.group.tag), label = "p.signif",
label.y = 4.5, size = 15) +
geom_hline(yintercept = 0) +
facet_wrap( ~ all.emo.sit.tag, nrow=1, ncol=4) +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=fs, face="bold"),
legend.text = element_text(size=fs),
legend.title = element_text(size=fs),
axis.text = element_text(size=fs),
axis.title = element_text(size=fs,face="bold"),
text = element_text(size=fs)
)
dev.off()
anova(lmer(all.emo.vector ~ all.emo.tag*all.emo.sit.tag + (1|all.subject.tag) + (1|all.emo.tag:all.subject.tag) + (1|all.emo.sit.tag:all.subject.tag), data = all.emo.dataf.o))
aa <- lmer(all.emo.vector ~ all.emo.tag*all.emo.sit.tag*all.emo.group.tag +
(1|all.subject.tag) +
(1|all.emo.tag:all.subject.tag) + (1|all.emo.sit.tag:all.subject.tag) + (1|all.emo.group.tag:all.subject.tag) +
(1|all.emo.sit.tag:all.emo.group.tag:all.subject.tag) +
(1|all.emo.tag:all.emo.group.tag:all.subject.tag), data = all.emo.dataf.o)
emlm <- lm(all.emo.vector ~ all.emo.group.tag * all.emo.sit.tag * all.emo.tag , data = all.emo.dataf)
summary(emlm)
em.lmer <- lmer(all.emo.vector ~ all.emo.group.tag * all.emo.sit.tag * (1|all.emo.tag) , data = all.emo.dataf)
summary(em.lmer)
Y.PRO.vec <- total.boxplot[total.boxplot$total.boxplot.group.vector=="Young" & total.boxplot$total.boxplot.sit.vector=="PRO",]
Y.PUR.vec <- total.boxplot[total.boxplot$total.boxplot.group.vector=="Young" & total.boxplot$total.boxplot.sit.vector=="PUR",]
Y.NEU.vec <- total.boxplot[total.boxplot$total.boxplot.group.vector=="Young" & total.boxplot$total.boxplot.sit.vector=="NEU",]
O.PRO.vec <- total.boxplot[total.boxplot$total.boxplot.group.vector=="Old" & total.boxplot$total.boxplot.sit.vector=="PRO",]
O.PUR.vec <- total.boxplot[total.boxplot$total.boxplot.group.vector=="Old" & total.boxplot$total.boxplot.sit.vector=="PUR",]
O.NEU.vec <- total.boxplot[total.boxplot$total.boxplot.group.vector=="Old" & total.boxplot$total.boxplot.sit.vector=="NEU",]
inter.Y.PRO.PUR <- Y.PRO.vec$total.boxplot.mean_money.vector - Y.PUR.vec$total.boxplot.mean_money.vector
inter.Y.PRO.NEU <- Y.PRO.vec$total.boxplot.mean_money.vector - Y.NEU.vec$total.boxplot.mean_money.vector
inter.O.PRO.PUR <- O.PRO.vec$total.boxplot.mean_money.vector - O.PUR.vec$total.boxplot.mean_money.vector
inter.O.PRO.NEU <- O.PRO.vec$total.boxplot.mean_money.vector - O.NEU.vec$total.boxplot.mean_money.vector
t.test(inter.O.PRO.PUR, inter.Y.PRO.PUR)
t.test(inter.O.PRO.NEU, inter.Y.PRO.NEU)
inter.Y.PRO.PUR.mean <- mean(inter.Y.PRO.PUR)
inter.Y.PRO.PUR.se <- sd(inter.Y.PRO.PUR)/sqrt(length(inter.Y.PRO.PUR))
inter.Y.PRO.NEU.mean <- mean(inter.Y.PRO.NEU)
inter.Y.PRO.NEU.se <- sd(inter.Y.PRO.NEU)/sqrt(length(inter.Y.PRO.NEU))
inter.O.PRO.PUR.mean <- mean(inter.O.PRO.PUR)
inter.O.PRO.PUR.se <- sd(inter.O.PRO.PUR)/sqrt(length(inter.O.PRO.PUR))
inter.O.PRO.NEU.mean <- mean(inter.O.PRO.NEU)
inter.O.PRO.NEU.se <- sd(inter.O.PRO.NEU)/sqrt(length(inter.O.PRO.NEU))
inter.mean <- c(inter.Y.PRO.PUR.mean, inter.O.PRO.PUR.mean, inter.Y.PRO.NEU.mean, inter.O.PRO.NEU.mean)
inter.se <- c(inter.Y.PRO.PUR.se, inter.O.PRO.PUR.se, inter.Y.PRO.NEU.se, inter.O.PRO.NEU.se)
inter.tag <- as.factor(c("PRO-PUR", "PRO-PUR", "PRO-NEU", "PRO-NEU"))
inter.group <- as.factor(c("Young","Old","Young","Old"))
inter.total.money <- data.frame(inter.mean, inter.se, inter.tag, inter.group)
a <- ggplot(inter.total.money, aes(x=inter.tag, y=inter.mean, fill=inter.group)) +
geom_bar(position=position_dodge(), stat="identity",
colour="black", # Use black outlines,
size=.3) + # Thinner lines
geom_errorbar(aes(ymin=inter.mean, ymax=inter.mean+inter.se),
size=.5, # Thinner lines
width=.5,
position=position_dodge(.9)) +
geom_signif(y_position=c(125, 100), xmin=c(0.8, 1.8), xmax=c(1.2, 2.2),
annotation=c("**", "**"), textsize=20, tip_length=0) +
labs(y = "Mean Money Given (NTD)",
x = "Interaction",
colour = "Groups", fill = "Group") +
theme(plot.title = element_text(hjust = 0.5),
title = element_text(size=30, face="bold"),
legend.text = element_text(size=45),
legend.title = element_text(size=45),
axis.text = element_text(size=45),
axis.title = element_text(size=45,face="bold")
) +
scale_fill_manual("Groups", values = c("Old" = "#E5BF21", "Young" = "#0075C9")) +
ylim(c(0,150))
png(sprintf("Mean_money_giving_ggline_by_situations.png"), width = 1800, height = 800)
grid.arrange(total.ggplot.mmoney, a, ncol=2)
dev.off()
##### gender differences
gender.diff <- aggregate(behavior.df$giveM, by = list(gender = behavior.df$SexN, sit = behavior.df$SITtag, id = behavior.df$SubjectN, group = behavior.df$GroupN), mean)
levels(gender.diff$gender) <- list(male = "1", female = "2")
levels(gender.diff$sit) <- list(PRO = "1", PUR = "2", NEU = "3", UNC ="4")
levels(gender.diff$group) <- list(Young = "1", Old = "2")
ggline(gender.diff, x = "sit", y = "x", add = c("mean_se", "jitter"),
color = "gender", palette = "jco", position = position_dodge(0.3)) +
labs(title = "Gender difference in money giving", x = "Situation", y = "Money (NTD)", colour = "Gender") +
theme(plot.title = element_text(hjust = 0.5, size= 15)) +
stat_compare_means(aes(group = gender), label = "p.signif",
label.y = 250)
ggline(gender.diff, x = "sit", y = "x", add = c("mean_se", "point"),
color = "gender", palette = "jco", facet.by = "group",add.params = list(color = "gender"), position = position_dodge()) +
labs(title = "Gender difference in money giving by group", x = "Situation", y = "Money (NTD)", colour = "Gender") +
theme(plot.title = element_text(hjust = 0.5, size= 15)) +
stat_compare_means(aes(group = gender), label = "p.signif",
label.y = 250)
ggline(gender.diff, x = "sit", y = "x",
add = "mean_se", color = "gender", palette = "jco", add.params = list(group = "gender"),
position = position_dodge(10) # Adjust the space between bars
)
####
anova(lm(x ~ gender*group*sit, gender.diff))
tapply(behavior.df$giveM, list(behavior.df$SubjectN, behavior.df$SITtag), mean)
|
ce267e25d22961138fd6de4f40d3f2f2fe81bcad
|
3f869c3ddc067a08c929d5d326e50f4f5605534d
|
/R/autoflow.R
|
2e97b4fe5f547da9d0a1bf1d880e536306d17f65
|
[
"Apache-2.0"
] |
permissive
|
floesche/gpflowr
|
4a076ad4b415240584e1a12d481486f089e002bd
|
df76da315e28acc696e3193b744f4d6218c22713
|
refs/heads/master
| 2021-01-20T05:54:28.148853
| 2016-11-30T04:08:15
| 2016-11-30T04:08:15
| 89,821,231
| 0
| 0
| null | 2017-04-30T00:46:07
| 2017-04-30T00:46:07
| null |
UTF-8
|
R
| false
| false
| 2,385
|
r
|
autoflow.R
|
# autoflow trickery
# This function is designed for use on methods of the Parameterized class
# (below).
#
# The idea is that methods that compute relevant quantities (such as
# predictions) can define a tf graph which we automatically run when the
# (decorated) function is called. Not only is the syntax cleaner, but multiple
# calls to the method will result in the graph being constructed only once.
#
# the function `autoflow()` (below) should be used in the `initialize()` method,
# to overwrite a public method by wrapping it with this
# name is a string saying which method to overwrite
# dots enables the user to pass a list of placeholders corresponding to the arguments of the mehtod
# function to apply AutoFlow to an already defined method in an R6 generator's
# initialize() method. dots accepts dtype objects to create placeholders for the
# arguments of the method being overwritten
autoflow <- function(name, ...) {
# list of placeholder tensors
placeholder_list <- list(...)
# grab the R6 object and the function we're overwriting
self <- parent.frame()$self
tf_method <- self[[name]]
# create a storage name
storage_name <- sprintf('_%s_AF_storage', name)
# define the function
runnable <- function (...) {
# if it's already defined, grab the graph and session
if (has(self[['.tf_mode_storage']], storage_name)) {
storage <- self[['.tf_mode_storage']][[storage_name]]
} else {
# otherwise, build the graph and session
storage <- list()
storage[['session']] <- tf$Session()
storage[['tf_args']] <- placeholder_list
storage[['free_vars']] <- tf$placeholder(tf$float64, shape(NULL))
self$make_tf_array(storage[['free_vars']])
storage[['tf_result']] <- do.call(tf_method, storage[['tf_args']])
# store the storage object for next time
self[['.tf_mode_storage']][[storage_name]] <- storage
}
# create an appropriate dict
feed_dict <- dictify(placeholder_list, list(...))
# execute the method, using the newly created dict
storage[['session']]$run(storage[['tf_result']],
feed_dict = feed_dict)
}
# unlock the method, assign the new function, and relock
unlockBinding(name, self)
self[[name]] <- runnable
lockBinding(name, self)
}
|
bd5e82cd191ab159a76267ab9435950aa554ddb3
|
2e9bca3f8f367a943f1c729bff3ac2f29aa83a18
|
/man/transformation.Rd
|
2442650da8e19b2e8c5fd11661b03f633bf76e56
|
[] |
no_license
|
wenbostar/metaX
|
3bee68e13d2cf12c5279ea224ea2cd27ecdba93d
|
08c56fed31f2144f00afeff75f1c5f10781a3dc4
|
refs/heads/master
| 2023-06-26T15:14:56.327264
| 2023-06-16T06:11:56
| 2023-06-16T06:11:56
| 36,590,811
| 19
| 12
| null | null | null | null |
UTF-8
|
R
| false
| true
| 928
|
rd
|
transformation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep.R
\name{transformation}
\alias{transformation}
\title{Data transformation}
\usage{
transformation(para, method = 1, valueID = "valueNorm", ...)
}
\arguments{
\item{para}{An metaX object}
\item{method}{The method for transformation, 0=none, 1=log, 2=Cube root, 3=glog}
\item{valueID}{The name of column used for transformation}
\item{...}{Additional parameter}
}
\value{
An new metaX object
}
\description{
Data transformation
}
\examples{
para <- new("metaXpara")
pfile <- system.file("extdata/MTBLS79.txt",package = "metaX")
sfile <- system.file("extdata/MTBLS79_sampleList.txt",package = "metaX")
rawPeaks(para) <- read.delim(pfile,check.names = FALSE)
sampleListFile(para) <- sfile
para <- reSetPeaksData(para)
para <- missingValueImpute(para)
para <- transformation(para,valueID = "value")
}
\author{
Bo Wen \email{wenbostar@gmail.com}
}
|
e2207127fac32c5884d608b01534526f62caa1b7
|
d9c2fb53d478dbecee1a7eb76465e2b1e7193a3f
|
/first-post.R
|
77128c13a029bc845d5b90e4b5098285dc21f00d
|
[] |
no_license
|
nibedeeta/blog-posts
|
e2aedce9574d5958e5000e7775fafa80a22fa505
|
28128790393d3a7dbd3d9c7d2633df6f8ea76ca8
|
refs/heads/master
| 2020-03-15T21:24:23.803431
| 2018-05-06T16:06:03
| 2018-05-06T16:06:03
| 132,354,367
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,606
|
r
|
first-post.R
|
library(readr)
library(tidyr)
library(R2jags)
# Import data -------------------------------------------------------------
cleague2017 <- read_csv("first-post-data.csv")
# variable names have spaces (i.e. <Home Team>) and this bothers me IMMENSLY, so:
names(cleague2017) <- c("Round", "Date", "Location", "HomeTeam",
"AwayTeam", "Group", "Result")
# split result column into goals for the home team <HomeGoals> and for the away
# team <AwayGoals>
cleague2017 <- separate(cleague2017, col = "Result", into = c("HomeGoals", "AwayGoals"), sep = " - ", convert = T)
# convert everything we can into factor:
## this we are going to use in jags
cleague2017$HomeTeam <- factor(cleague2017$`HomeTeam`)
cleague2017$AwayTeam <- factor(cleague2017$`AwayTeam`, levels = levels(cleague2017$`HomeTeam`))
## this we may use in jags
cleague2017$Round <- factor(cleague2017$Round)
# levels are a mess, there is one different value for each round ARGH!
levels(cleague2017$Round) <- c( "Girone", "Girone", "Girone", "Girone","Girone", "Girone",
"Qtr Finals", "Qtr Finals", "Round of 16", "Round of 16",
"Semi Finals", "Semi Finals")
cleague2017$Group <- factor(cleague2017$Group)
#there are 32 teams & 124 games:
K <- length(unique(cleague2017$HomeTeam))
n <- nrow(cleague2017)
R <- length(unique(cleague2017$Round))
cleague.data <- list(n = n, K = K, R = R,
HomeTeam = cleague2017$HomeTeam,
AwayTeam = cleague2017$AwayTeam,
HomeGoals = cleague2017$HomeGoals,
AwayGoals = cleague2017$AwayGoals,
Round = cleague2017$Round)
# Model 1 -----------------------------------------------------------------
# Let us start from the easiest model: no mean effect, no round effect, no time
# effect... nothing basically
model1 = "model{
for (i in 1:n){
# stochastic component
HomeGoals[i]~dpois(lambdaH[i])
AwayGoals[i]~dpois(lambdaA[i])
# link and linear predictor
log(lambdaH[i])<- home + a[ HomeTeam[i] ] + d[ AwayTeam[i] ]
log(lambdaA[i])<- a[ AwayTeam[i] ] + d[ HomeTeam[i] ]
}
# STZ constraints
a[1]<- -sum( a[2:K] )
d[1]<- -sum( d[2:K] )
# prior distributions
home~dnorm(0,0.001)
for (i in 2:K){
a[i]~dnorm(0,0.01)
d[i]~dnorm(0,0.01)
}
}"
# Initialize
soccer.init = list(list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )))
# parameters that we whish to retrieve
soccer.param = c("a", "d", "home", "HomeGoals", "AwayGoals")
# <a> and <d> are just for interpretation
# <HomeGoals> and <AwayGoals> is for prediction
# Run the model
soccer.jags = jags(textConnection(model1), data = cleague.data, inits = soccer.init,
parameters.to.save = soccer.param,
n.chains = 4,
n.iter = 100000
)
print(soccer.jags)
Hteams = levels(cleague2017$HomeTeam)
Ateams = levels(cleague2017$AwayTeam)
# semifinals scores prediction:
pred <- cbind("H-Team" = Hteams[cleague.data$HomeTeam[121:124]],
"A-Team" = Ateams[cleague.data$AwayTeam[121:124]],
"H-Goal" = soccer.jags$BUGSoutput$mean$HomeGoals[121:124],
"A-Goal" = soccer.jags$BUGSoutput$mean$AwayGoals[121:124])
pred
res = cbind(soccer.jags$BUGSoutput$summary[249:280,1],
soccer.jags$BUGSoutput$summary[281:312,1])
rownames(res) = Hteams
colnames(res) = c("attack", "defence")
res
save(res, file = "latent.RData")
# the final prediction ----------------------------------------------------
thewinner = "model{
for (i in 1:n){
# stochastic component
HomeGoals[i]~dpois(lambdaH[i])
AwayGoals[i]~dpois(lambdaA[i])
# link and linear predictor
log(lambdaH[i])<- home + a[ HomeTeam[i] ] + d[ AwayTeam[i] ]
log(lambdaA[i])<- a[ AwayTeam[i] ] + d[ HomeTeam[i] ]
}
# change the model for the finals (there is no home effect, both teams are playing in Kiev)
GoalsF1~dpois(lambdaF1)
GoalsF2~dpois(lambdaF2)
log(lambdaF1)<- a[ HomeTeam[n+1] ] + d[ AwayTeam[n+1] ]
log(lambdaF2)<- a[ AwayTeam[n+1] ] + d[ HomeTeam[n+1] ]
# STZ constraints
a[1]<- -sum( a[2:K] )
d[1]<- -sum( d[2:K] )
# prior distributions
mu~dnorm(0,0.001)
home~dnorm(0,0.001)
for (i in 2:K){
a[i]~dnorm(0,0.01)
d[i]~dnorm(0,0.01)
}
}"
# parameters we wish to retrieve
soccer.param.Final = c("a", "d", "home", "GoalsF1", "GoalsF2")
whoisthewinner = function(team1, team2, data = cleague2017){
Hteams = levels(cleague2017$HomeTeam)
Ateams = levels(cleague2017$AwayTeam)
idx1 = which(Hteams == team1)
idx2 = which(Hteams == team2)
cleague.data.Final = list(n = n, K = K,
HomeTeam = c(cleague2017$HomeTeam, idx1),
AwayTeam = c(cleague2017$AwayTeam, idx2),
HomeGoals = c(cleague2017$HomeGoals[1:120], 5, 1, 2, 4, NA),
AwayGoals = c(cleague2017$AwayGoals[1:120], 2, 2, 2, 2, NA)
)
soccer.jags.Final = jags(textConnection(thewinner), data = cleague.data.Final, inits = soccer.init,
parameters.to.save = soccer.param.Final,
n.chains = 4,
n.iter = 100000)
# semifinals scores prediction:
predF <- c(soccer.jags.Final$BUGSoutput$mean$GoalsF1,
soccer.jags.Final$BUGSoutput$mean$GoalsF2)
names(predF) = c(team1, team2)
# probability of winning
m = apply( (soccer.jags.Final$BUGSoutput$sims.list$GoalsF1 - soccer.jags.Final$BUGSoutput$sims.list$GoalsF2), 2, mean)
p = apply( (soccer.jags.Final$BUGSoutput$sims.list$GoalsF1 - soccer.jags.Final$BUGSoutput$sims.list$GoalsF2)>0, 2, mean)
p2 = apply( (soccer.jags.Final$BUGSoutput$sims.list$GoalsF1 - soccer.jags.Final$BUGSoutput$sims.list$GoalsF2)<0, 2, mean)
probW = cbind(team1, team2,
soccer.jags.Final$BUGSoutput$mean$GoalsF1, soccer.jags.Final$BUGSoutput$mean$GoalsF2,
m,p, p2)
colnames(probW) = c("Team1", "Team2", "Goal Team1", "Goal Team2",
"Mean Difference", "p > 0", "p < 0")
return(list(jags.out = soccer.jags.Final, pred = predF, probW = probW ))
}
# Liverpool - Bayern Munich
winnerLB = whoisthewinner( "Liverpool","Bayern Munich")
# Liverpool - Real Madrid
winnerLR = whoisthewinner( "Liverpool","Real Madrid")
# Roma - Real Madrid
winnerRR = whoisthewinner( "Roma","Real Madrid")
# Roma - Bayern Munich
winnerRB = whoisthewinner( "Roma","Bayern Munich")
winner.mat = rbind(winnerLB$pred, winnerLR$pred, winnerRB$pred, winnerRR$pred)
winner.prob = rbind(winnerLB$probW, winnerLR$probW, winnerRB$probW, winnerRR$probW)
save(winner.mat, winner.prob, file = "winnermat.RData")
traceplot(winnerLB$jags.out)
print(winnerLR$jags.out)
print(winner$jags.out)
# Is there a phase effect ------------------------------------------------
# We know that teams that get closer to the final are better, but do they play
# extra-better because of the pressure? in other words, is there a phase effect
# that makes the scoring intensity higher when the competition becomes more and
# more real?
model.phase = "model{
for (i in 1:n){
# stochastic component
HomeGoals[i]~dpois(lambdaH[i])
AwayGoals[i]~dpois(lambdaA[i])
# link and linear predictor
log(lambdaH[i])<- home + a[ HomeTeam[i] ] + d[ AwayTeam[i] ] + r[ Round[i] ]
log(lambdaA[i])<- a[ AwayTeam[i] ] + d[ HomeTeam[i] ] + r[ Round[i] ]
}
# STZ constraints
a[1]<- -sum( a[2:K] )
d[1]<- -sum( d[2:K] )
# prior distributions
mu~dnorm(0,0.001)
home~dnorm(0,0.001)
for (i in 2:K){
a[i]~dnorm(0,0.01)
d[i]~dnorm(0,0.01)
}
r[1] <- -sum(r[2:R])
for(i in 2:R){
r[i]~dnorm(0,0.01)
}
}"
# in this case however we also need the scores for the first round of the
# semifinals, otherwise we cannot estimate the phase effect
cleague.data.phase = list(n = n, K = K, R = R,
HomeTeam = c(cleague2017$HomeTeam),
AwayTeam = c(cleague2017$AwayTeam),
HomeGoals = c(cleague2017$HomeGoals[1:120], 5, 1, NA, NA),
AwayGoals = c(cleague2017$AwayGoals[1:120], 2, 2, NA, NA),
Round = cleague2017$Round
)
soccer.param = c("a", "d", "home", "HomeGoals", "AwayGoals", "r")
# <a> and <d> are just for interpretation
# <HomeGoals> and <AwayGoals> is for prediction
# <r> is for the "phase" effect
# initialize the phase effect to be 0 as well
soccer.init.phase = list(list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) ), r = c(NA, rep(0, R-1))),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) ), r = c(NA, rep(0, R-1))),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) ), r = c(NA, rep(0, R-1))),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) ), r = c(NA, rep(0, R-1))))
# Run the model
soccer.jags.phase = jags(textConnection(model.phase), data = cleague.data.phase, inits = soccer.init.phase,
parameters.to.save = soccer.param,
n.chains = 4,
n.iter = 100000 )
Hteams = levels(cleague2017$HomeTeam)
Ateams = levels(cleague2017$AwayTeam)
# semifinals scores prediction:
pred.phase <- cbind("H-Team" = Hteams[cleague.data$HomeTeam[121:124]],
"A-Team" = Ateams[cleague.data$AwayTeam[121:124]],
"H-Goal" = soccer.jags.phase$BUGSoutput$mean$HomeGoals[121:124],
"A-Goal" = soccer.jags.phase$BUGSoutput$mean$AwayGoals[121:124])
pred.phase
summary(soccer.jags.phase$BUGSoutput$sims.list["r"]$r)
apply(soccer.jags.phase$BUGSoutput$sims.list["r"]$r, 2, quantile, probs = c(0.025, 0.975))
traceplot(soccer.jags.phase)
# well, the results are rather inconclusive, aren't they...
# With semi-final data ----------------------------------------------------
cleague.data.phase = list(n = n, K = K, R = R,
HomeTeam = c(cleague2017$HomeTeam),
AwayTeam = c(cleague2017$AwayTeam),
HomeGoals = c(cleague2017$HomeGoals[1:120], 5, 1, NA, NA),
AwayGoals = c(cleague2017$AwayGoals[1:120], 2, 2, NA, NA),
Round = cleague2017$Round
)
soccer.init = list(list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )),
list( "home"=0.5, "a"=c(NA, rep(0, K-1)) ,
"d"=c(NA, rep(0, K-1) )))
# parameters that we whish to retrieve
soccer.param = c("a", "d", "home", "HomeGoals", "AwayGoals")
# <a> and <d> are just for interpretation
# <HomeGoals> and <AwayGoals> is for prediction
# Run the model
soccer.jags2 = jags(textConnection(model1), data = cleague.data.phase, inits = soccer.init,
parameters.to.save = soccer.param,
n.chains = 4,
n.iter = 100000
)
print(soccer.jags2)
Hteams = levels(cleague2017$HomeTeam)
Ateams = levels(cleague2017$AwayTeam)
# Winning probs and prob --------------------------------------------------
winning.prob = function(jags.output, game){
m = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game]), 2, mean)
med = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game]), 2, median)
l = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game]), 2, quantile, p =0.025)
u = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game]), 2, quantile, p = 0.975)
p = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game])>0, 2, mean)
mep = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game])>0, 2, median)
lp = p - 1.96*sqrt(p*(1-p)/length(jags.output$BUGSoutput$sims.list$HomeGoals[,game]))
up = p + 1.96*sqrt(p*(1-p)/length(jags.output$BUGSoutput$sims.list$HomeGoals[,game]))
p2 = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game])<0, 2, mean)
mep2 = apply( (jags.output$BUGSoutput$sims.list$HomeGoals[,game] - jags.output$BUGSoutput$sims.list$AwayGoals[,game])<0, 2, median)
lp2 = p2 - 1.96*sqrt(p2*(1-p2)/length(jags.output$BUGSoutput$sims.list$HomeGoals[,game]))
up2 = p2 + 1.96*sqrt(p2*(1-p2)/length(jags.output$BUGSoutput$sims.list$HomeGoals[,game]))
out = cbind("H-Team" = Hteams[cleague.data$HomeTeam[game]],
"A-Team" = Ateams[cleague.data$AwayTeam[game]],
"H-Goal - Mean" = jags.output$BUGSoutput$mean$HomeGoals[game],
"H-Goal - Median" = jags.output$BUGSoutput$median$HomeGoals[game],
"H-Goal - Sd" = jags.output$BUGSoutput$sd$HomeGoals[game],
"A-Goal - Mean" = jags.output$BUGSoutput$mean$AwayGoals[game],
"A-Goal - Median" = jags.output$BUGSoutput$median$AwayGoals[game],
"A-Goal - Sd" = jags.output$BUGSoutput$sd$AwayGoals[game],
"Mean Difference" = m,
"Median Difference" = med,
"0.025 quantile for the Difference" = l,
"0.975 quantile for the Difference" = u,
"p > 0" = p,
"p > 0 - Median" = mep,
"0.025 quantile for p > 0" = lp,
"0.975 quantile for p > 0" = up,
"p < 0" = p2,
"p < 0 - Median" = mep2,
"0.025 quantile for p < 0" = lp2,
"0.975 quantile for p < 0" = up2
)
out
}
# semifinals scores prediction:
semi.fin = winning.prob(soccer.jags, 121:124)
semi.fin2 = winning.prob(soccer.jags2, 121:124)
semi.fin.phase = winning.prob(soccer.jags.phase, 121:124)
save(semi.fin, semi.fin2, semi.fin.phase, file = "probmat.RData")
winning.prob2 = function(jags.output, team1, team2){
m = apply( (jags.output$BUGSoutput$sims.list$GoalsF1 - jags.output$BUGSoutput$sims.list$GoalsF2), 2, mean)
med = apply( (jags.output$BUGSoutput$sims.list$GoalsF1 - jags.output$BUGSoutput$sims.list$GoalsF2), 2, median)
l = apply( (jags.output$BUGSoutput$sims.list$GoalsF1 - jags.output$BUGSoutput$sims.list$GoalsF2), 2, quantile, p =0.025)
u = apply( (jags.output$BUGSoutput$sims.list$GoalsF1 - jags.output$BUGSoutput$sims.list$GoalsF2), 2, quantile, p = 0.975)
p = apply( (jags.output$BUGSoutput$sims.list$GoalsF1 - jags.output$BUGSoutput$sims.list$GoalsF2)>0, 2, mean)
lp = p - 1.96*sqrt(p*(1-p)/length(jags.output$BUGSoutput$sims.list$GoalsF1))
up = p + 1.96*sqrt(p*(1-p)/length(jags.output$BUGSoutput$sims.list$GoalsF1))
p2 = apply( (jags.output$BUGSoutput$sims.list$GoalsF1 - jags.output$BUGSoutput$sims.list$GoalsF2)<0, 2, mean)
lp2 = p2 - 1.96*sqrt(p2*(1-p2)/length(jags.output$BUGSoutput$sims.list$GoalsF1))
up2 = p2 + 1.96*sqrt(p2*(1-p2)/length(jags.output$BUGSoutput$sims.list$GoalsF1))
out = cbind("Team 1" = team1,
"Team 2" = team2,
"Goal 1 - Mean" = jags.output$BUGSoutput$mean$GoalsF1,
"Goal 1 - Median" = jags.output$BUGSoutput$median$GoalsF1,
"Goal 1 - Sd" = jags.output$BUGSoutput$sd$GoalsF1,
"Goal 2 - Mean" = jags.output$BUGSoutput$mean$GoalsF2,
"Goal 2 - Median" = jags.output$BUGSoutput$median$GoalsF2,
"Goal 2 - Sd" = jags.output$BUGSoutput$sd$GoalsF2,
"Mean Difference" = m,
"Median Difference" = med,
"0.025 quantile for the Difference" = l,
"0.975 quantile for the Difference" = u,
"p > 0" = p,
"0.025 quantile for p > 0" = lp,
"0.975 quantile for p > 0" = up,
"p < 0" = p2,
"0.025 quantile for p < 0" = lp2,
"0.975 quantile for p < 0" = up2
)
out
}
winner.prob = rbind(winning.prob2(winnerLB$jags.out, "Liverpool", "Bayern Munich"),
winning.prob2(winnerLR$jags.out, "Liverpool", "Real Madrid"),
winning.prob2(winnerRR$jags.out, "Roma", "Real Madrid"),
winning.prob2(winnerRB$jags.out, "Roma", "Bayern Munich"))
save(winner.prob, file = "winnermat.RData")
|
05ef1d27e218759e4e94aad1349d2fa9a82562e4
|
838b2ec1bd23ac02712cade84f0bf1fc3e3165bb
|
/GO_E03_NDVI.r
|
d39e8309cc84873b20f9bb0295b4461987a130c0
|
[] |
no_license
|
mlcastellan/GO
|
65a542336d94be93384bf26d88bc480b4f453aef
|
e5559cb6e7c1faa17483e0d83187efc1514e4bbc
|
refs/heads/master
| 2020-04-13T16:31:09.135428
| 2019-01-02T18:28:35
| 2019-01-02T18:28:35
| 163,322,793
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,590
|
r
|
GO_E03_NDVI.r
|
####################################
## Funcion NDVI para sentinel 2 ####
## tal como sale de sen2cor ####
## ####
####################################
GO_E3_NDVI_SEN2COR=function(sentinel_2_folder="/home/martin-r/05_Rasters/SENTINEL_HDE_invierno",res=10){
######
library(raster)
library(rgdal)
library(gdalUtils)
library(stringr)
#### ETAPA 1: Paso previo a generar el NDVI
#### GENERO los patterns a buscar en las imagenes S2_2A
pattern_r=paste("B04_",res,"m.jp2$",sep="")
pattern_nir=paste("B08_",res,"m.jp2$",sep="")
##### creo dos listas de las bandas R y NIR en el folder S2_A
R_band_list=list.files(path=sentinel_2_folder,pattern=pattern_r,recursive=TRUE,ignore.case=TRUE,full.names=TRUE)
NIR_band_list=list.files(path=sentinel_2_folder,pattern=pattern_nir,recursive=TRUE,ignore.case=TRUE,full.names=TRUE)
### creo una lista para los nombres del ndvi a generar
ndvi_filename_list=list()
### genero los nombres del ndvi para cada ndvi a generar
for(j in 1:length(R_band_list)){
ndvi_filename_list[[j]]=paste("NDVI_",str_replace(basename(R_band_list[j]),pattern="_B04_10m.jp2",replacement=".tif"),sep="")
}
### ETAPA 2: Generar el NDVI y exportarlo.
######################################
## para trabajar con jpg 2000 tal ###
## como sale de sen2cor esta fun ###
######################################
for(i in 1:length(R_band_list)){
###
rband_name=R_band_list[[i]]
nirband_name=NIR_band_list[[i]]
###
ndvi_filename=ndvi_filename_list[[i]]
dir=paste("NDVI/Full/",ndvi_filename,sep="")
###paso no necesario
#r_band=readGDAL(rband_name)
#nir_band=readGDAL(nirband_name)
### Conviero JPG2000 con Rgdal en Geotiff.
r_band=gdal_translate(rband_name,"r_band.tif")
nir_band=gdal_translate(nirband_name,"nir_band.tif")
### Importo el Geotiff como Raster
r_band<- raster("r_band.tif")
nir_band<- raster("nir_band.tif")
### Genero el NDVI
ndvi=(nir_band-r_band)/(nir_band+r_band)
### Exporto el raster NDVI, con el nombre y path dado en dir.
writeRaster(ndvi,filename=dir,format="GTiff",overwrite=TRUE)
### opcion para devolver un objeto en vez de exportar el raster
#return(ndvi)
### Elimino los tif que use para tranformar
### jpg 2000 en tif
file.remove("r_band.tif")
file.remove("nir_band.tif")
###cierro el for loop
}
########
}
######## FIN DE LA FUNCION ######
#filename=file.choose()
#r1=stack(filename)
#r1_ndvi=(r1[[4]]-r1[[1]])/(r1[[4]]+r1[[1]])
#outname=basename(filename)
#outname=tools::file_path_sans_ext(outname)
#outname=paste("ndvi_",outname,".tif",sep="")
#writeRaster(r1_ndvi,filename=outname,format="GTiff",overwrite=TRUE)
|
4b4b0da4240469f11639ad0cccd67698f8456f5c
|
8a25fc2ad84f215f147ce1190a90daeec67eba99
|
/cachematrix.R
|
d7e090e024bcfe01a01b961a6d73d12ca695f26d
|
[] |
no_license
|
ahujarv/ProgrammingAssignment2
|
6456ab991c673c40d5973154b22597e1ec268ae5
|
01ba73030bd2ae77ca52603704ad49aae6947a6f
|
refs/heads/master
| 2021-01-22T18:28:37.092560
| 2014-04-25T04:30:08
| 2014-04-25T04:30:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,678
|
r
|
cachematrix.R
|
## Set of two functions that
## 1. create a matrix that can cache inverse of the square matrix provided
## 2. check in cache if the inverse of provided square matrix already exists
## 3. return the inverse matrix from cache if found in cache
## 3. calculate the inverse matrix, if not found in cache then cache and return it.
## This function creates a special square matrix that can hold the inverse
## matrix of the square matrix provided as input.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
setMtrx <- function(mtrx) {
X <<- mtrx
inv <<- NULL
}
getMtrx <- function() {
x
}
setInvMtrx <- function(invmtrx) {
inv <<- invmtrx
}
getInvMtrx <- function() {
inv
}
list(setMtrx = setMtrx, getMtrx = getMtrx, getInvMtrx = getInvMtrx,
setInvMtrx = setInvMtrx)
}
## This function calculates the inverse of the square matrix provided.
## If the incoming squre matrix is the same as one provided before,
## then this function returns the inverse from cache instead of calculating it
## else it calculates the inverse,
## stores the calcualted inverse in cache and
## returns the calculated inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
invMtrx <- x$getInvMtrx()
if(!is.null(invMtrx)) {
message("getting cached Inverse Matrix")
return(invMtrx)
}
mtrx <- x$getMtrx()
invMtrx <- solve(mtrx, ...)
x$setInvMtrx(invMtrx)
invMtrx
}
|
943a92fd7d0ffab96b5182359945d3a8c523700d
|
6dd7f5baec15db0b45477659c6f268848ee6c5b7
|
/start_analysis.R
|
c7eb2ccaf118cb430b4b332b30467ccfac9b6a6c
|
[] |
no_license
|
andy400400/PTTCrawler
|
c29ffec47175a68970c4fc23961c7c3c7d25e7d7
|
c826f60849d5fd7b4456157523fd7a0a30ecd887
|
refs/heads/master
| 2021-01-01T03:35:01.249765
| 2016-04-17T12:04:04
| 2016-04-17T12:04:04
| 56,434,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,925
|
r
|
start_analysis.R
|
#載入資料
{
x1_path<-paste("D:/000/yahoo_movie.csv")
yahoo_movie<-read.csv(x1_path,header = TRUE,stringsAsFactors = FALSE)
x2_path<-paste("D:/000/3607_3912.csv")
ptt_movie<-read.csv(x2_path,header = TRUE,stringsAsFactors = FALSE)
}
#找雷文
{
class_ptt_title<-NULL
for (x in 1:nrow(ptt_movie)) {
#先找有沒有"]"
ptt_title_check<-unlist(strsplit(ptt_movie$title[x],split="",fixed=T))
if (sum(ptt_title_check == "]")>0) {
#切割找"雷"
ptt_title_split<-unlist(strsplit(ptt_movie$title[x],split="]",fixed=T))
ptt_title_one_split<-unlist(strsplit(ptt_title_split[1],split="",fixed=T))
if (sum(ptt_title_one_split == "雷")>0) {
class_ptt_title<-c(class_ptt_title,x)
}
}
}
}
#雷文彙總
{
ptt_movie_ray<-NULL
for (y in 1:length(class_ptt_title)) {
ptt_movie_ray<-rbind(ptt_movie_ray,ptt_movie[class_ptt_title[y],])
}
}
#依排名分類
{
#超人自己做
{
w<-1
yahoo_movie_title_split<-unlist(strsplit(yahoo_movie$cn_name[w],split="",fixed=T))
yahoo_movie_title_split_en<-c("b","B","v","V","s","S")
#電影名稱長度>3使用
if (nchar(yahoo_movie$cn_name[w])>3) {
ptt_by_yahoo<-NULL
for (z in 1:nrow(ptt_movie_ray)) {
ptt_title_ray_split_first<-unlist(strsplit(ptt_movie_ray$title[z],split="]",fixed=T))
ptt_title_ray_split<-unlist(strsplit(ptt_title_ray_split_first[2],split="",fixed=T))
#比較字元yahoo:ptt標題
b<-0
c<-1
d<-0
#中文比對
for (a in 1:length(yahoo_movie_title_split)) {
if (sum(grepl(yahoo_movie_title_split[a],ptt_title_ray_split))>0) {
b<-b+c
}
}
#英文比對(額外)
for (a in 1:length(yahoo_movie_title_split_en)) {
if (sum(grepl(yahoo_movie_title_split_en[a],ptt_title_ray_split))>0) {
d<-d+c
}
}
#字元比對中文相同數>2,英文>2
if (b>2 | d>2) {
ptt_by_yahoo<-rbind(ptt_by_yahoo,ptt_movie_ray[z,])
}
}
new_path<-paste("D:/111/",w,".CSV",sep = "")
write.csv(ptt_by_yahoo, file = new_path)
}
}
#2-20名
for (w in 2:nrow(yahoo_movie)) {
#拆字
yahoo_movie_title_split<-unlist(strsplit(yahoo_movie$cn_name[w],split="",fixed=T))
#電影名稱長度<=3使用
if (nchar(yahoo_movie$cn_name[w])<=3) {
ptt_by_yahoo<-NULL
for (z in 1:nrow(ptt_movie_ray)) {
ptt_title_ray_split_first<-unlist(strsplit(ptt_movie_ray$title[z],split="]",fixed=T))
ptt_title_ray_split<-unlist(strsplit(ptt_title_ray_split_first[2],split="",fixed=T))
#比較字元yahoo:ptt標題
b<-0
for (a in 1:length(yahoo_movie_title_split)) {
if (sum(grepl(yahoo_movie_title_split[a],ptt_title_ray_split))>0) {
c<-1
b<-b+c
}
}
#字元數相同>2
if (b>1) {
ptt_by_yahoo<-rbind(ptt_by_yahoo,ptt_movie_ray[z,])
}
}
}
#電影名稱長度>3使用
if (nchar(yahoo_movie$cn_name[w])>3) {
ptt_by_yahoo<-NULL
for (z in 1:nrow(ptt_movie_ray)) {
ptt_title_ray_split_first<-unlist(strsplit(ptt_movie_ray$title[z],split="]",fixed=T))
ptt_title_ray_split<-unlist(strsplit(ptt_title_ray_split_first[2],split="",fixed=T))
#比較字元yahoo:ptt標題
b<-0
for (a in 1:length(yahoo_movie_title_split)) {
if (sum(grepl(yahoo_movie_title_split[a],ptt_title_ray_split))>0) {
c<-1
b<-b+c
}
}
#字元數相同>2
if (b>2) {
ptt_by_yahoo<-rbind(ptt_by_yahoo,ptt_movie_ray[z,])
}
}
}
new_path<-paste("D:/111/",w,".CSV",sep = "")
write.csv(ptt_by_yahoo, file = new_path)
}
}
|
598c611c1ca99054362f70584716b1a4a9009257
|
91a45e4f5b58561dcacddbf0ca91f9f0649e641c
|
/man/scrape_countries.Rd
|
b21fd75666d25c5ea4eebf7c5d3410708acd0c62
|
[
"MIT"
] |
permissive
|
jebyrnes/wikiISO31662
|
b044cd9ed5cbca5d10d260ca5b7c988aeb47edd1
|
d4fb6f6bf44d6dd65a3158ca2b7b1f796a830321
|
refs/heads/master
| 2022-10-16T11:41:21.919779
| 2020-06-01T21:20:49
| 2020-06-01T21:20:49
| 267,416,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 520
|
rd
|
scrape_countries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape_countries.R
\name{scrape_countries}
\alias{scrape_countries}
\title{Scrape ISO 3166-2 Country Codes from Wikipedia}
\usage{
scrape_countries()
}
\value{
A tibble of country codes and country names
}
\description{
Scrape ISO 3166-2 Country Codes from Wikipedia
}
\examples{
\dontrun{
iso_countries <- scrape_countries()
head(iso_countries)
}
}
\references{
Wikipedia ISO-3166-2 Entry: \url{https://en.wikipedia.org/wiki/ISO_3166-2}
}
|
f491ba5159a196cd4493bed010a4107972707f8f
|
b82e5927759426b6f457b10f710f04d4fe70e485
|
/logistic_reg.r
|
391553a780cb06beebc56d6d2aafbbe8475d7ef4
|
[] |
no_license
|
anuj-dimri25/islr
|
e80572a2dcf821de01f645c939cfce22d335eaea
|
de224533b58756e040285ede6e5b65fa32a2f9c2
|
refs/heads/master
| 2020-03-19T00:14:34.102245
| 2018-06-12T21:49:17
| 2018-06-12T21:49:17
| 135,466,075
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,421
|
r
|
logistic_reg.r
|
require(ISLR) # similar to library
names(Smarket)
# predicting direction -- binary response
# plotting all variables
pairs(Smarket,col=Smarket$Direction)
#logistic regression
model=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,
data=Smarket, family=binomial)
summary(model)
prediction=predict(model,type="response")
prediction[1:10]
final_predictions=ifelse(prediction>0.5,"Up","Down")\
attach(Smarket)
# table of training prediction
table(final_predictions,Direction)
mean(final_predictions==Direction)
###### training and test set
train=Year<2005
model2=glm(Direction~Lag1+Lag2+Lag3+Lag4+Lag5+Volume,
data=Smarket, family=binomial, subset=train)
summary(model2)
prediction2=predict(model2,newdata=Smarket[!train,], type="response")
prediction2[1:10]
final_predictions2=ifelse(prediction2>0.5,"Up","Down")
Direction.2005=Smarket$Direction[!train]
# table of training prediction
table(final_predictions2,Direction.2005)
mean(final_predictions2==Direction.2005)
## output shows we are overfitting
# lets use a smaller model
model3=glm(Direction~Lag1+Lag2,
data=Smarket, family=binomial, subset=train)
summary(model3)
prediction3=predict(model3,newdata=Smarket[!train,], type="response")
prediction3[1:10]
final_predictions3=ifelse(prediction3>0.5,"Up","Down")
# table of training prediction
table(final_predictions3,Direction.2005)
mean(final_predictions3==Direction.2005)
|
33b8e9a33cf07be7a0f111041474ebdd5cfa96fa
|
57834fe94033c7ca485e91976ae6efc579578b51
|
/Rprog/cachematrix.R
|
b09213e779c3d7dc12e6b1974b63e35e76b6dc11
|
[] |
no_license
|
cbeltis/datasciencecoursera
|
d94e7ac1aa36e8b490d98ffd3560a03bef1c00bc
|
d5ef7ea9dbabe03da8710effa1c4fcb0b01b2c12
|
refs/heads/master
| 2020-05-17T05:40:28.393982
| 2015-07-16T19:04:21
| 2015-07-16T19:04:21
| 30,840,258
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 670
|
r
|
cachematrix.R
|
makeCacheMatrix <- function(x = matrix())
# makeCacheMatrix makes a matrix that can cache its inverse
{
xinv <- NULL
set <- function(y)
{
x <<- y
xinv <<- NULL
}
get <- function() x
setInv <- function(inv) xinv <<- inv
getInv <- function() xinv
list(setInv = setInv,
getInv = getInv,
set = set,
get = get)
}
cacheSolve <- function(x, ...)
# cacheSolve calculates the inverse of the matrix resulting from the code above, makeCacheMatrix
{
mat <- x$getInv()
if(!is.null(mat))
{
message("calculating...")
return(mat)
}
d01 <- x$get()
mat <- solve(d01)
x$setInv(mat)
mat
}
|
babbff419d3182d15e74f2cfe5d5f75e3c064de2
|
602980a2b335336d9bac17d1a924ddc690449691
|
/R/EpivizBpData-class.R
|
7b577a85e88d16a05a36cccf467d24533e6550ae
|
[] |
no_license
|
epiviz/epivizr-release
|
55211407cb8bf781ce8c5706479299d81ad3a5f8
|
798b350442b74334fdf3ac834c65a878e0d436e0
|
refs/heads/master
| 2021-01-13T01:40:53.888146
| 2015-06-09T17:08:33
| 2015-06-09T17:08:33
| 18,971,179
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,468
|
r
|
EpivizBpData-class.R
|
EpivizBpData <- setRefClass("EpivizBpData",
contains="EpivizTrackData",
methods=list(
.checkColumns=function(columns) {
all(columns %in% names(mcols(object)))
},
.getColumns=function() {
names(mcols(object))
},
.getNAs=function() {
if (length(columns) == 0) {
return(integer())
}
naMat <- is.na(mcols(object)[,columns])
if (!is.matrix(naMat))
naMat <- cbind(naMat)
which(rowSums(naMat)>0)
},
.checkLimits=function(ylim) {
if (!is.matrix(ylim))
return(FALSE)
if (nrow(ylim) != 2)
return(FALSE)
if (ncol(ylim) != length(columns))
return(FALSE)
TRUE
},
.getLimits=function() {
colIndex <- match(columns, colnames(mcols(object)))
suppressWarnings(unname(sapply(colIndex, function(i) range(pretty(range(mcols(object)[,i], na.rm=TRUE))))))
},
plot=function(...) {
mgr$lineChart(ms=getMeasurements(), ...)
}
)
)
.valid.EpivizBpData.ylim <- function(x) {
if(!is(x$ylim, "matrix"))
return("'ylim' must be a matrix")
if(nrow(x$ylim) != 2)
return("'ylim' must have two rows")
if(ncol(x$ylim) != length(x$columns))
return("'ylim' must have 'length(columns)' columns")
NULL
}
.valid.EpivizBpData <- function(x) {
c(.valid.EpivizBpData.ylim(x))
}
setValidity2("EpivizBpData", .valid.EpivizBpData)
EpivizBpData$methods(
getMeasurements=function() {
out <- lapply(columns, function(curCol) {
m <- match(curCol, columns)
list(id=curCol,
name=curCol,
type="feature",
datasourceId=id,
datasourceGroup=id,
defaultChartType="Line Track",
annotation=NULL,
minValue=ylim[1,m],
maxValue=ylim[2,m],
metadata=NULL)
})
#out <- paste(name, columns, sep="$")
#nms <- paste(id, columns, sep="__")
#names(out) <- nms
out
},
.getMetadata=function(curHits, metadata) {
return(NULL)
},
.getValues=function(curHits, measurement, round=FALSE) {
if(!measurement %in% columns) {
stop("could not find measurement", measurement)
}
vals <- unname(mcols(object)[curHits,measurement])
if (round) {
vals <- round(vals, 3)
}
vals
},
parseMeasurement=function(msId) {
column <- strsplit(msId, split="__")[[1]][2]
if(!.checkColumns(column)) {
stop("invalid parsed measurement")
}
column
}
)
|
24240c56c6613663010fdd2bbd6eaaf9e5aaef74
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/spsann/R/optimCLHS.R
|
ba8344d27a58dd640fcd747afeb02b3b2caf911d
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,208
|
r
|
optimCLHS.R
|
#' Optimization of sample configurations for spatial trend identification and estimation (IV)
#'
#' Optimize a sample configuration for spatial trend identification and estimation using the method proposed
#' by Minasny and McBratney (2006), known as the conditioned Latin hypercube sampling. An utility function
#' _U_ is defined so that the sample reproduces the marginal distribution and correlation matrix of the
#' numeric covariates, and the class proportions of the factor covariates (__CLHS__). The utility function
#' is obtained aggregating three objective functions: __O1__, __O2__, and __O3__.
#'
# @inheritParams spJitter
#' @template spSANN_doc
#' @inheritParams optimACDC
#' @template spJitter_doc
#'
#' @param clhs.version (Optional) Character value setting the CLHS version that should be used. Available
#' options are: `"paper"`, for the formulations of __O1__, __O2__, and __O3__ as presented in the original
#' paper by Minasny and McBratney (2006); `"fortran"`, for the formulations of __O1__ and __O3__ that include
#' a scaling factor as implemented in the late Fortran code by Budiman Minasny (ca. 2015); and `"update"`, for
#' formulations of __O1__, __O2__, and __O3__ that include the modifications proposed the authors of this
#' package in 2018 (see below). Defaults to `clhs.version = "paper"`.
#'
#' @details
#' \subsection{Marginal sampling strata}{
#' Reproducing the marginal distribution of the numeric covariates depends upon the definition of marginal
#' sampling strata. _Equal-area_ marginal sampling strata are defined using the sample quantiles estimated
#' with \code{\link[stats]{quantile}} using a continuous function (`type = 7`), that is, a function that
#' interpolates between existing covariate values to estimate the sample quantiles. This is the procedure
#' implemented in the original method of Minasny and McBratney (2006), which creates breakpoints that do not
#' occur in the population of existing covariate values. Depending on the level of discretization of the
#' covariate values, that is, how many significant digits they have, this can create repeated breakpoints,
#' resulting in empty marginal sampling strata. The number of empty marginal sampling strata will ultimately
#' depend on the frequency distribution of the covariate and on the number of sampling points. The effect of
#' these features on the spatial modelling outcome still is poorly understood.
#' }
#' \subsection{Correlation between numeric covariates}{
#' The _correlation_ between two numeric covariates is measured using the sample Pearson's _r_, a descriptive
#' statistic that ranges from -1 to +1. This statistic is also known as the sample linear correlation
#' coefficient. The effect of ignoring the correlation among factor covariates and between factor and numeric
#' covariates on the spatial modelling outcome still is poorly understood.
#' }
#' \subsection{Multi-objective combinatorial optimization}{
#' A method of solving a multi-objective combinatorial optimization problem (MOCOP) is to aggregate the
#' objective functions into a single utility function _U_. In the __spsann__ package, as in the original
#' implementation of the CLHS by Minasny and McBratney (2006), the aggregation is performed using the
#' __weighted sum method__, which uses weights to incorporate the __a priori__ preferences of the user about
#' the relative importance of each objective function. When the user has no preference, the objective functions
#' receive equal weights.
#'
#' The weighted sum method is affected by the relative magnitude of the different objective function values.
#' The objective functions implemented in `optimCLHS` have different units and orders of magnitude. The
#' consequence is that the objective function with the largest values, generally __O1__, may have a numerical
#' dominance during the optimization. In other words, the weights may not express the true preferences of the
#' user, resulting that the meaning of the utility function becomes unclear because the optimization will
#' likely favour the objective function which is numerically dominant.
#'
#' An efficient solution to avoid numerical dominance is to scale the objective functions so that they are
#' constrained to the same approximate range of values, at least in the end of the optimization. In the
#' original implementation of the CLHS by Minasny and McBratney (2006), `clhs.version = "paper"`, `optimCLHS`
#' uses the naive aggregation method, which ignores that the three objective functions have different units
#' and orders of magnitude. In a 2015 Fortran implementation of the CLHS, `clhs.version = "fortran"`, scaling
#' factors were included to make the values of the three objective function more comparable. The effect of
#' ignoring the need to scale the objective functions, or using arbitrary scaling factors, on the spatial
#' modelling outcome still is poorly understood. Thus, an updated version of __O1__, __O2__, and __O3__ has
#' been implemented in the __spsann__ package. The need formulation aim at making the values returned by the
#' objective functions more comparable among themselves without having to resort to arbitrary scaling factors.
#' The effect of using these new formulations have not been tested yet.
#' }
#'
#' @return
#' `optimCLHS` returns an object of class `OptimizedSampleConfiguration`: the optimized sample configuration
#' with details about the optimization.
#'
#' `objCLHS` returns a numeric value: the energy state of the sample configuration -- the objective function
#' value.
#'
#' @references
#' Minasny, B.; McBratney, A. B. A conditioned Latin hypercube method for sampling in the presence of
#' ancillary information. _Computers & Geosciences_, v. 32, p. 1378-1388, 2006.
#'
#' Minasny, B.; McBratney, A. B. Conditioned Latin Hypercube Sampling for calibrating soil sensor data to
#' soil properties. Chapter 9. Viscarra Rossel, R. A.; McBratney, A. B.; Minasny, B. (Eds.) _Proximal Soil
#' Sensing_. Amsterdam: Springer, p. 111-119, 2010.
#'
#' Roudier, P.; Beaudette, D.; Hewitt, A. A conditioned Latin hypercube sampling algorithm incorporating
#' operational constraints. _5th Global Workshop on Digital Soil Mapping_. Sydney, p. 227-231, 2012.
#'
#' @note
#' The (only?) difference of `optimCLHS` to the original Fortran implementation of Minasny and McBratney
#' (2006), and to the `clhs` function implemented in the former
#' __[clhs](https://CRAN.R-project.org/package=clhs)__ package by Pierre Roudier, is
#' the annealing schedule.
#'
#' @author Alessandro Samuel-Rosa \email{alessandrosamuelrosa@@gmail.com}
#' @seealso \code{\link[spsann]{optimACDC}}
#' @concept spatial trend
#' @aliases optimCLHS objCLHS CLHS
#' @export
#' @examples
#' data(meuse.grid, package = "sp")
#' candi <- meuse.grid[1:1000, 1:2]
#' covars <- meuse.grid[1:1000, 5]
#' schedule <- scheduleSPSANN(
#' chains = 1, initial.temperature = 20, x.max = 1540, y.max = 2060,
#' x.min = 0, y.min = 0, cellsize = 40)
#' set.seed(2001)
#' res <- optimCLHS(
#' points = 10, candi = candi, covars = covars, use.coords = TRUE,
#' clhs.version = "fortran", weights = list(O1 = 0.5, O3 = 0.5), schedule = schedule)
#' objSPSANN(res) - objCLHS(
#' points = res, candi = candi, covars = covars, use.coords = TRUE,
#' clhs.version = "fortran", weights = list(O1 = 0.5, O3 = 0.5))
# MAIN FUNCTION ###############################################################################################
optimCLHS <-
function (points, candi,
# O1, O2, and O3
covars, use.coords = FALSE, clhs.version = c("paper", "fortran", "update"),
# SPSANN
schedule = scheduleSPSANN(), plotit = FALSE, track = FALSE,
boundary, progress = "txt", verbose = FALSE,
# MOOP
weights) {
# weights = list(O1 = 1/3, O2 = 1/3, O3 = 1/3)) {
# Objective function name
objective <- "CLHS"
# Check spsann arguments
eval(.check_spsann_arguments())
# Check other arguments
check <- .optimCLHScheck(candi = candi, covars = covars, use.coords = use.coords)
if (!is.null(check)) { stop (check, call. = FALSE) }
# Set plotting options
eval(.plotting_options())
# Prepare points and candi
eval(.prepare_points())
# Prepare for jittering
eval(.prepare_jittering())
# Prepare 'covars' and base data
eval(.prepare_clhs_covars())
# Identify CLHS version
clhs.version <- match.arg(clhs.version)
# Compute initial energy state
energy0 <- .objCLHS(
sm = sm, breaks = breaks, id_num = id_num, pcm = pcm, id_fac = id_fac, n_pts = n_pts + n_fixed_pts,
pop_count = pop_count, n_candi = n_candi, weights = weights, covars_type = covars_type,
clhs.version = clhs.version)
# Other settings for the simulated annealing algorithm
old_sm <- sm
new_sm <- sm
best_sm <- sm
old_energy <- energy0
best_energy <- .bestEnergyCLHS(covars_type = covars_type)
actual_temp <- schedule$initial.temperature
k <- 0 # count the number of jitters
# Set progress bar
eval(.set_progress())
# Initiate the annealing schedule
for (i in 1:schedule$chains) {
n_accept <- 0
for (j in 1:schedule$chain.length) { # Initiate one chain
for (wp in 1:n_pts) { # Initiate loop through points
k <- k + 1
# Plotting and jittering
eval(.plot_and_jitter())
# Update sample matrix and compute the new energy state
new_sm[wp, ] <- covars[new_conf[wp, 1], ]
new_energy <- .objCLHS(
sm = new_sm, breaks = breaks, id_num = id_num, pcm = pcm, id_fac = id_fac,
n_pts = n_pts + n_fixed_pts, pop_count = pop_count, n_candi = n_candi, weights = weights,
covars_type = covars_type, clhs.version = clhs.version)
# Evaluate the new system configuration
accept <- .acceptSPSANN(old_energy[[1]], new_energy[[1]], actual_temp)
if (accept) {
old_conf <- new_conf
old_energy <- new_energy
old_sm <- new_sm
n_accept <- n_accept + 1
} else {
new_energy <- old_energy
new_conf <- old_conf
new_sm <- old_sm
}
if (track) energies[k, ] <- new_energy
# Record best energy state
if (new_energy[[1]] < best_energy[[1]] / 1.0000001) {
best_k <- k
best_conf <- new_conf
best_energy <- new_energy
best_old_energy <- old_energy
old_conf <- old_conf
best_sm <- new_sm
best_old_sm <- old_sm
}
# Update progress bar
eval(.update_progress())
} # End loop through points
} # End the chain
# Check the proportion of accepted jitters in the first chain
eval(.check_first_chain())
# Count the number of chains without any change in the objective function.
# Restart with the previously best configuration if it exists.
if (n_accept == 0) {
no_change <- no_change + 1
if (no_change > schedule$stopping) {
# if (new_energy[[1]] > best_energy[[1]] * 1.000001) {
# old_conf <- old_conf
# new_conf <- best_conf
# old_energy <- best_old_energy
# new_energy <- best_energy
# new_sm <- best_sm
# old_sm <- best_old_sm
# no_change <- 0
# cat("\nrestarting with previously best configuration\n")
# } else {
break
# }
}
if (verbose) {
cat("\n", no_change, "chain(s) with no improvement... stops at", schedule$stopping, "\n")
}
} else {
no_change <- 0
}
# Update control parameters
actual_temp <- actual_temp * schedule$temperature.decrease
x.max <- x_max0 - (i / schedule$chains) * (x_max0 - x.min) + cellsize[1]
y.max <- y_max0 - (i / schedule$chains) * (y_max0 - y.min) + cellsize[2]
} # End the annealing schedule
# Prepare output
eval(.prepare_output())
}
# INTERNAL FUNCTION - CHECK ARGUMENTS #########################################################################
# candi: candidate locations
# covars: covariates
# use.coords: should the coordinates be used
.optimCLHScheck <-
function (candi, covars, use.coords) {
# covars
if (is.vector(covars)) {
if (use.coords == FALSE) {
return ("'covars' must have two or more columns")
}
if (nrow(candi) != length(covars)) {
return ("'candi' and 'covars' must have the same number of rows")
}
} else {
if (nrow(candi) != nrow(covars)) {
return ("'candi' and 'covars' must have the same number of rows")
}
}
}
# INTERNAL FUNCTION - CALCULATE THE CRITERION VALUE ###########################################################
# This function is used to calculate the criterion value of CLHS.
# Aggregation is done using the weighted sum method.
.objCLHS <-
function (sm, breaks, id_num, pcm, id_fac, n_pts, pop_count, n_candi, weights, covars_type, clhs.version) {
# Objective functions
if (any(covars_type == c("numeric", "both"))) {
obj_O1 <- weights$O1 * .objO1(sm = sm, breaks = breaks, id_num = id_num, clhs.version = clhs.version)
obj_O3 <- weights$O3 * .objO3(sm = sm, id_num = id_num, pcm = pcm, clhs.version = clhs.version)
}
if (any(covars_type == c("factor", "both"))) {
obj_O2 <- weights$O2 *
# .objO2(sm = sm, id_fac = id_fac, n_pts = n_pts, pop_prop = pop_prop, clhs.version = clhs.version)
.objO2(sm = sm, id_fac = id_fac, n_pts = n_pts, pop_count = pop_count, n_candi = n_candi,
clhs.version = clhs.version)
}
# Prepare output, a data.frame with the weighted sum in the first column followed by the values of the
# constituent objective functions (IN ALPHABETICAL ORDER).
if (covars_type == "both") {
res <- data.frame(
obj = obj_O1 + obj_O2 + obj_O3,
O1 = obj_O1,
O2 = obj_O2,
O3 = obj_O3)
} else if (covars_type == "numeric") {
res <- data.frame(
obj = obj_O1 + obj_O3,
O1 = obj_O1,
O3 = obj_O3)
} else {
res <- data.frame(
obj = obj_O2)
}
return (res)
# } else {
# if (covars_type == "numeric") {
# return (data.frame(obj = obj_O1 + obj_O3, O1 = obj_O1, O3 = obj_O3))
# } else {
# return (data.frame(obj = obj_O2))
# }
# }
}
# CALCULATE OBJECTIVE FUNCTION VALUE ##########################################################################
#' @rdname optimCLHS
#' @export
objCLHS <-
function (points, candi, covars, use.coords = FALSE,
clhs.version = c("paper", "fortran", "update"),
weights) {
# weights = list(O1 = 1/3, O2 = 1/3, O3 = 1/3)) {
# Check arguments
check <- .optimCLHScheck(candi = candi, covars = covars, use.coords = use.coords)
if (!is.null(check)) stop (check, call. = FALSE)
# Prepare points and candi
eval(.prepare_points())
# Prepare 'covars' and and base data
eval(.prepare_clhs_covars())
# Identify CLHS version
clhs.version <- match.arg(clhs.version)
# Output energy state
out <- .objCLHS(
sm = sm, breaks = breaks, id_num = id_num, pcm = pcm, id_fac = id_fac, n_pts = n_pts, n_candi = n_candi,
pop_count = pop_count, weights = weights, covars_type = covars_type, clhs.version = clhs.version)
return(out)
}
# INTERNAL FUNCTION - CALCULATE THE CRITERION VALUE (O1) ######################################################
# sm: sample matrix
# breaks: break points of the marginal sampling strata
# id_num: number of the column containing numeric covariates
# clhs.version: CLHS version
.objO1 <-
function (sm, breaks, id_num, clhs.version) {
# Count the number of points per marginal sampling strata
sm_count <- sapply(1:length(id_num), function (i)
graphics::hist(sm[id_num][, i], breaks[[i]], plot = FALSE)$counts)
out <- switch (clhs.version,
paper = {
# Minasny and McBratney (2006)
sum(abs(sm_count - 1))
},
fortran = {
# The late FORTRAN code of Budiman Minasny -- ca. 2015 -- implements scaling factors so that values
# are "more" comparable among objective functions. For O1, the scaling factor is defined as the number
# of samples, nrow(sm), multiplied by the number of continuous variables, length(id_num), that is, the
# total number of marginal sampling strata among all continuous variables.
n <- nrow(sm) * length(id_num)
sum(abs(sm_count - 1)) / n
},
update = {
# Dick Brus (Jul 2018) proposes to compute O1 as the mean of the absolute deviations of marginal
# stratum sample sizes. This should be the same as implemented in the FORTRAN code.
mean(abs(sm_count - 1))
})
# Output
# return (sum(abs(sm_count)) / n)
return (out)
}
# INTERNAL FUNCTION - CALCULATE THE CRITERION VALUE (O2) ######################################################
# sm: sample matrix
# n_pts: number of points
# id_fac: columns of sm containing factor covariates
# pop_prop: population class proportions (DEPRECATED)
# pop_count: population class counts
# n_candi: number of candidate locations (population)
# clhs.version: CLHS version
.objO2 <-
# function (sm, id_fac, n_pts, pop_prop, clhs.version) {
function (sm, id_fac, n_pts, pop_count, n_candi, clhs.version) {
# Count the number of sample points per class
sm_count <- lapply(sm[, id_fac], function(x) table(x))
# Compute the sample proportions (DEPRECATED)
# sm_prop <- lapply(sm[, id_fac], function(x) table(x) / n_pts)
# Compare the sample and population proportions (DEPRECATED)
# sm_prop <- sapply(1:length(id_fac), function (i)
# sum(abs(sm_prop[[i]] - pop_prop[[i]])))
out <- switch (clhs.version,
paper = {
# Minasny and McBratney (2006)
sm_prop <- lapply(sm_count, function (x) x / n_pts)
pop_prop <- lapply(pop_count, function (x) x / n_candi)
sum(sapply(1:length(id_fac), function (i) sum(abs(sm_prop[[i]] - pop_prop[[i]]))))
},
fortran = {
# Minasny and McBratney (2006)
sm_prop <- lapply(sm_count, function (x) x / n_pts)
pop_prop <- lapply(pop_count, function (x) x / n_candi)
sum(sapply(1:length(id_fac), function (i) sum(abs(sm_prop[[i]] - pop_prop[[i]]))))
},
update = {
# Dick Brus (Jul 2018) proposes to compute O2 as the mean of the absolute deviations of marginal
# stratum sample sizes, defined just like O1 in terms of sample sizes. Defined in this alternative
# way O1 and O2 should be fully comparable.
# mean(abs(n_realized - n_populational)
mean(sapply(1:length(id_fac), function (i) abs(pop_count[[i]] - pop_count[[i]])))
})
# Output
# return (sum(sm_prop))
return(out)
}
# INTERNAL FUNCTION - CALCULATE THE CRITERION VALUE (O3) ######################################################
# sm: sample matrix
# id_num: columns of sm containing numeric covariates
# pcm: population correlation matrix
# clhs.version: CLHS version
.objO3 <-
function (sm, id_num, pcm, clhs.version) {
# Calculate sample correlation matrix
scm <- stats::cor(x = sm[, id_num], use = "complete.obs")
out <- switch (clhs.version,
paper = {
# Minasny and McBratney (2006)
sum(abs(pcm - scm))
},
fortran = {
# The late FORTRAN code of Budiman Minasny -- ca. 2015 -- implements scaling factors so that values
# are "more" comparable among objective functions. For O3, the scaling factor is defined as
# n * n / 2 + n. The rationale for this scaling factor is not clear.
n <- length(id_num)
n <- n * n / 2 + n
sum(abs(pcm - scm)) / n
},
update = {
# Dick Brus (Jul 2018) proposes to compute O3 as the mean of the off diagonal elements of the matrix
# of absolute differences between sample and population correlation matrices. Defined in this
# alternative way, O3 should be fully comparable with O1 and O2.
r_diff <- abs(pcm - scm)
mean(r_diff[row(r_diff) != col(r_diff)])
})
# Output
# return(sum(abs(pcm - scm)) / n)
return(out)
}
# INTERNAL FUNCTION - PREPARE OBJECT TO STORE THE BEST ENERGY STATE ###########################################
.bestEnergyCLHS <-
function (covars_type) {
if (covars_type == "both") {
return (data.frame(obj = Inf, O1 = Inf, O2 = Inf, O3 = Inf))
} else {
if (covars_type == "numeric") {
return (data.frame(obj = Inf, O1 = Inf, O3 = Inf))
} else {
return (data.frame(obj = Inf))
}
}
}
|
c47a70e715798ba610dd0da9556c676b18a33077
|
ccd06ad4d52aec1366f03d2d73a95f4bfaed328e
|
/F0101-BvZINB4.R
|
0d4b49fd66ebbecb7e80d0a3bc09dd6e60c47d0c
|
[] |
no_license
|
Hunyong/SCdep
|
7e2fd15fec411108f811f6ef39cf9dd5678736d5
|
fa1184fc9291aa0b5113122cb2d2760d9de28945
|
refs/heads/master
| 2021-01-23T00:48:40.483227
| 2019-01-04T02:33:34
| 2019-01-04T02:33:34
| 92,848,572
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,968
|
r
|
F0101-BvZINB4.R
|
# BvZINB4: BvZINB3 + varying zero inflation parameters
library(rootSolve)
source("F0101-BvNB3.R")
source("F0101-BvZINB4-supp.R")
dBvZINB4 <- function(x, y, a0, a1, a2, b1, b2, p1, p2, p3, p4, log=FALSE) {
dxy <- dBvNB3(x=x, y=y, a0=a0, a1=a1, a2=a2, b1=b1, b2=b2, log=FALSE)
dx <- dnbinom(x=x, a0+a1, 1/(1+b1))
dy <- dnbinom(x=y, a0+a2, 1/(1+b2))
result <- dxy * p1 + dx * ifelse(y==0,p2,0) + dy * ifelse(x==0,p3,0) + ifelse(x+y==0,p4,0)
return(ifelse(log, log(result), result))
}
if (FALSE) {
dBvZINB4(1,1,1,1,1,1,.5,.25,.25,.25,.25)
tmp <- sapply(0:50, function(r) sapply (0:50, function(s) dBvZINB4(s,r,1,1,1,1,.5,.25,.25,.25,.25)))
sum(tmp)
}
dBvZINB4.vec <- Vectorize(dBvZINB4)
lik.BvZINB4 <- function(x, y, param) {
sum(log(dBvZINB4.vec(x, y, param[1], param[2], param[3], param[4], param[5], param[6], param[7], param[8], param[9])))
}
rBvZINB4 <- function(n, a0, a1, a2, b1, b2, p1, p2, p3, p4, param=NULL) {
if (!is.null(param)) {a0 = param[1]; a1 = param[2]; a2 = param[3]; b1 = param[4]; b2 = param[5]
p1 = param[6]; p2 = param[7]; p3 = param[8]; p4 = param[9]
}
rmat <- matrix(rgamma(n*3, shape = c(a0, a1, a2), rate = 1/b1), n, 3, byrow=TRUE)
rmat2 <- rmat
rmat2[,3] <- rmat2[,1] + rmat2[,3]
rmat2[,2] <- rmat2[,1] + rmat2[,2]
rmat2 <- rmat2[,2:3]
rmat2[,2] <- rmat2[,2]*b2/b1
uv <- matrix(rpois(n*2, rmat2), n, 2)
E <- t(rmultinom(n, 1, c(p1, p2, p3, p4)))
z <- cbind(E[,1]+E[,2], E[,1]+E[,3])
xy <- uv * z
colnames(xy) <- c("x", "y")
return(xy)
}
### 2.EM
### nonzero cells: (1-pp) was not multiplied by!!! this caused decreasing likelihood in EM
dBvZINB4.Expt <- function(x, y, a0, a1, a2, b1, b2, p1, p2, p3, p4, debug = FALSE) {
# Base density
t1 = (b1 + b2 + 1) /(b1 + 1); t2 = (b1 + b2 + 1) /(b2 + 1)
adj.A <- adj.B1 <- adj.C <- adj.sum <- 0
l1 <- function(k, m, adjj=0) exp(lgamma(a1 + k) - lgamma(k+1) - lgamma(a1) + lgamma(x + y + a0 -m -k) - lgamma(x -k +1) - lgamma(a0 + y - m)
+ lgamma(m + a2) - lgamma(m+1) - lgamma(a2) + lgamma(y +a0 -m) - lgamma(y -m +1) - lgamma(a0) - adjj)
l1.C <- function(k, m, adjj=0) exp(k *log(t1) + m *log(t2) - adjj)
l1.B <- - (+x+y+a0)*log(1 + b1 + b2) + x * log(b1) + y * log(b2) - a1 * log(1 + b1) - a2 * log(1 + b2)
# l1.B to be updated several lines later depending on l2.B ~ l4.B
l2.B <- exp(- (x + a0 + a1)*log(1 + b1) + x * log(b1) + adj.B1) * p2 * ifelse(y==0, 1, 0)
l3.B <- exp(- (y + a0 + a2)*log(1 + b2) + y * log(b2) + adj.B1) * p3 * ifelse(x==0, 1, 0)
l4.B <- p4 * ifelse(x + y == 0, 1, 0) * exp(adj.B1)
#l2.A, l3.A added.
l2.A <- function(k, adjj=0) exp( lgamma(x +a0 -k) + lgamma(k + a1) - lgamma(a0) - lgamma(x-k+1) - lgamma(a1) - lgamma(k+1) - adjj)
l3.A <- function(m, adjj=0) exp( lgamma(y +a0 -m) + lgamma(m + a2) - lgamma(a0) - lgamma(y-m+1) - lgamma(a2) - lgamma(m+1) - adjj)
# l1.AC For numerical stability use only.
l1.AC <- function(k, m, adjj=0) exp(lgamma(a1 + k) - lgamma(k+1) - lgamma(a1) + lgamma(x + y + a0 -m -k) - lgamma(x -k +1) - lgamma(a0 + y - m)
+ lgamma(m + a2) - lgamma(m+1) - lgamma(a2) + lgamma(y +a0 -m) - lgamma(y -m +1) - lgamma(a0) + k *log(t1) + m *log(t2) - adjj)
# cat("l1.B ", l1.B,"\n")
if (l1.B < - 200 & log(l2.B + l3.B + l4.B) < 0) {
if (debug) cat("adjustment activated for l1.B\n")
adj.B1 = ((-l1.B - 200) %/% 100) * 100 # prevent exp(l1.B) from being 0
l1.B = l1.B + adj.B1
}
l1.B <- exp(l1.B) * p1
if (debug) cat("l1.B ", l1.B,"\n")
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
l2.A.mat <- sapply(0:x, l2.A, adjj = adj.A) # %>% print
l3.A.mat <- sapply(0:y, l3.A, adjj = adj.A) # %>% print
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
while (log(sum( l.A.mat)) > 250) {
### may have to be updated for l2.A.mat and l3.A.mat ###
if (debug) cat("adjustment activated for A.mat\n")
adj.A = adj.A + 200
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
}
while (log(sum( l.C.mat)) > 250) {
if (debug) cat("adjustment activated for C.mat\n")
adj.C = adj.C + 200
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
}
# print(l.C.mat)
# if (is.infinite(sum( l.A.mat))) {
# cat("activated once")
# adj.A = 200
# l.A.mat <- sapply(0:x, function(k) sapply(0:y, function(m) {l1(k =k, m = m) *exp(-adj.A)}))
# if (is.infinite(sum( l.A.mat))) { ## added for further adjustment
# cat("activated twice")
# adj.A = 500
# l.A.mat <- sapply(0:x, function(k) sapply(0:y, function(m) {l1(k =k, m = m) *exp(-adj.A)}))
# }
# } #%>%print
#adjustment is cancelled out for each Expectation, so can be ignored. But for the final likelihood it should be adjusted at the end.
sum.AC <- sum(l.A.mat * l.C.mat)
if (is.infinite(sum.AC)| log(sum.AC) > 200) {
if (debug) cat("adjustment activated for AC.mat (too large)\n")
adj.A = adj.A + 100
adj.C = adj.C + 100
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
sum.AC <- sum(l.A.mat * l.C.mat)
} else if (log(sum.AC) < - 100) {
if (debug) cat("adjustment activated for AC.mat (too small)\n")
adj.A = adj.A - 200 # floor(log(sum(l.A.mat)/x/y)*2/3)
adj.C = adj.C - 200
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
l.AC.mat <- sapply(0:x, function(k) sapply(0:y, l1.AC, k = k, adjj = adj.C + adj.A))
sum.AC <- sum(l.AC.mat)
# abcde.1 <<- l.A.mat
# abcde.2 <<- l.C.mat
# abcde.3 <<- l.AC.mat
}
sum.A <- sum(l.A.mat)
l.sum <- sum.AC * l1.B + sum.A * sum (l2.B + l3.B + l4.B) * exp(-adj.C)
if (l.sum == 0) {
adj.sum = -floor(log(sum.AC)*2/3 + log(l1.B)*2/3)
if (debug) cat("adjustment activated for l.sum (adj = ", adj.sum, ")\n")
l.sum <- sum.AC * exp(adj.sum) * l1.B + sum.A * (exp(adj.sum) * sum (l2.B + l3.B + l4.B)) * exp(-adj.C)
# abcde.4 <<- c(l.sum = l.sum, sum.AC = sum.AC, l1.B = l1.B, sum.A = sum.A, l2.B = l2.B, l3.B = l3.B, l4.B = l4.B, adj.C = adj.C)
## paranthesis matters. sum.A = some number, exp(adj.sum) = almost inf, sum(l2.B + l3.B + l4.B) = 0, ...
# Then without paranthesis, Inf * 0 = NaN,
# But with paranthesis, c * (large number * 0) = c * 0 = 0
}
if (debug) {
cat("sum.AC", sum.AC,"\n\n")
cat("sum.A", sum.A,"\n\n")
cat("sum(l.C.mat)", sum(l.C.mat),"\n\n")
cat("l1.B", l1.B,"\n\n")
cat("l2.B", l2.B,"\n\n")
cat("l3.B", l3.B,"\n\n")
cat("l4.B", l4.B,"\n\n")
cat("l.sum ", l.sum, "\n")
}
# print(c(l.sum, log(l.sum))); print(l.A.mat); print(l.C.mat); print(c(l1.B, l2.B, l3.B, l4.B, adj.A)) #####
# expectation components
R0.E1 <- function(k, m) {x - k + y - m + a0}
log.R0.E1 <- function(k, m) {digamma(x - k + y - m + a0)}
log.R0.E2 <- function(k) {digamma(x - k + a0)}
log.R0.E3 <- function(m) {digamma(y - m + a0)}
R0.E1.B <- b1/(1 + b1 + b2)
R0.E2.B <- b1/(1 + b1)
R0.E3.B <- b1/(1 + b2)
R0.E4.B <- b1
R1.E1 <- function(k) {k + a1}
log.R1.E1 <- function(k) {digamma(k + a1)}
log.R1.E2 <- function(k) {digamma(k + a1)}
R1.E1.B <- b1/(1 + b1)
R1.E2.B <- b1/(1 + b1)
R1.E3.B <- b1
R1.E4.B <- b1
R2.E1 <- function(m) {m + a2}
log.R2.E1 <- function(m) {digamma(m + a2)}
log.R2.E3 <- function(m) {digamma(m + a2)}
R2.E1.B <- b1/(1 + b2)
R2.E2.B <- b1
R2.E3.B <- b1/(1 + b2)
R2.E4.B <- b1
R0.mat <- sapply (0:x, function(k) sapply(0:y, R0.E1, k=k))
R0.mat <- R0.mat * l.A.mat
R0.E <- sum(R0.mat * l.C.mat * exp(adj.sum) * l1.B * R0.E1.B) / l.sum +
sum(R0.mat*( l2.B * R0.E2.B
+ l3.B * R0.E3.B
+ l4.B * R0.E4.B)*exp(-adj.C + adj.sum)) / l.sum
# cat("R0.E ", R0.E, "\n")
R1.mat <- t(matrix(sapply(0:x, R1.E1), x+1, y+1))
R1.mat <- R1.mat * l.A.mat
R1.E <- sum(R1.mat * l.C.mat * exp(adj.sum) * l1.B * R1.E1.B) / l.sum +
sum(R1.mat*( l2.B * R1.E2.B
+ l3.B * R1.E3.B
+ l4.B * R1.E4.B)*exp(-adj.C + adj.sum)) / l.sum
# cat("R1.E ", R1.E, "\n")
R2.mat <- matrix(sapply(0:y, R2.E1), y+1, x+1) #%>% print
R2.mat <- R2.mat * l.A.mat
R2.E <- sum(R2.mat * l.C.mat * exp(adj.sum) * l1.B * R2.E1.B) / l.sum +
sum(R2.mat*( l2.B * R2.E2.B
+ l3.B * R2.E3.B
+ l4.B * R2.E4.B)*exp(-adj.C + adj.sum)) / l.sum
# cat("R2.E ", R2.E, "\n")
log.R0.mat <- sapply(0:x, function(k) sapply(0:y, log.R0.E1, k=k))
log.R0.mat <- l.A.mat * (log.R0.mat + log (R0.E1.B))
log.R0.mat2 <- sapply(0:x, log.R0.E2)
log.R0.mat2 <- l2.A.mat * (log.R0.mat2 + log (R0.E2.B))
log.R0.mat3 <- sapply(0:y, log.R0.E3)
log.R0.mat3 <- l3.A.mat * (log.R0.mat3 + log (R0.E3.B))
log.R0.E <-
sum(log.R0.mat * l.C.mat) * exp(adj.sum - adj.C) * l1.B +
sum(log.R0.mat2 * l2.B) * exp(adj.sum) +
sum(log.R0.mat3 * l3.B) * exp(adj.sum) +
(digamma(a0) + log(b1)) * exp(adj.sum) * l4.B
log.R0.E <- log.R0.E / l.sum
log.R1.mat <- sapply(0:x, log.R1.E1)
log.R1.mat2 <- log.R1.mat # saving a vector form
log.R1.mat2 <- l2.A.mat * (log.R1.mat2 + log (R1.E2.B))
log.R1.mat <- t(matrix(log.R1.mat, x+1, y+1))
log.R1.mat <- l.A.mat * (log.R1.mat + log (R1.E1.B))
log.R1.mat3 <- l3.A.mat * (digamma(a1) + log(R1.E3.B))
log.R1.E <-
sum(log.R1.mat * l.C.mat) * exp(adj.sum - adj.C) * l1.B +
sum(log.R1.mat2 * l2.B) * exp(adj.sum) +
sum(log.R1.mat3 * l3.B) * exp(adj.sum) +
(digamma(a1) + log(b1)) * exp(adj.sum) * l4.B
log.R1.E <- log.R1.E / l.sum
log.R2.mat <- sapply(0:y, log.R2.E1)
log.R2.mat3 <- log.R2.mat # saving a vector form
log.R2.mat3 <- l3.A.mat * (log.R2.mat3 + log (R2.E3.B))
log.R2.mat <- matrix(log.R2.mat, y+1, x+1)
log.R2.mat <- l.A.mat * (log.R2.mat + log (R2.E1.B))
log.R2.mat2 <- l2.A.mat * (digamma(a2) + log(R2.E2.B))
log.R2.E <-
sum(log.R2.mat * l.C.mat) * exp(adj.sum - adj.C) * l1.B +
sum(log.R2.mat2 * l2.B) * exp(adj.sum) +
sum(log.R2.mat3 * l3.B) * exp(adj.sum) +
(digamma(a2) + log(b1)) * exp(adj.sum) * l4.B
log.R2.E <- log.R2.E / l.sum
# cat("log.R2.E ", log.R2.E, "\n")
E.E <- c(sum.AC * exp(adj.sum) * l1.B, sum.A * c(l2.B, l3.B, l4.B)*exp(-adj.C + adj.sum))
E.E <- E.E/sum(E.E)
# cat("E.E ", E.E, "\n")
v.E <- ifelse(y == 0, 0, y) + (a0 + a2) * b2 * sum(E.E[c(2,4)])
# v.E <- (sum.AC * exp(adj.sum) * l1.B * y +
# sum.A * l2.B * a2 * b2*exp(-adj.C + adj.sum) +
# dnbinom(x, a0 + a1 + 1, b1/(1+b1)) * exp(-adj.A - adj.C + adj.sum) * a0 * b2 * p2 * ifelse(y==0, 1, 0) +
# sum.A * l3.B * y *exp(-adj.C + adj.sum) +
# sum.A * l4.B * (a0 + a2) * b2 *exp(-adj.C + adj.sum)) / l.sum
result <- c(log(l.sum) + adj.A -adj.B1 + adj.C - adj.sum, R0.E, R1.E, R2.E, log.R0.E, log.R1.E, log.R2.E, E.E, v.E) #%>%print
names(result) <- c("logdensity", paste0("R", 0:2, ".E"), paste0("log.R", 0:2, ".E"), paste0("E",1:4,".E"), "v.E")
return(result)
}
### nonzero cells: (1-pp) was not multiplied by!!! this caused decreasing likelihood in EM
dBvZINB4.Expt.wrong <- function(x, y, a0, a1, a2, b1, b2, p1, p2, p3, p4, debug = FALSE) {
# Base density
t1 = (b1 + b2 + 1) /(b1 + 1); t2 = (b1 + b2 + 1) /(b2 + 1)
adj.A <- adj.B1 <- adj.C <- adj.sum <- 0
l1 <- function(k, m, adjj=0) exp(lgamma(a1 + k) - lgamma(k+1) - lgamma(a1) + lgamma(x + y + a0 -m -k) - lgamma(x -k +1) - lgamma(a0 + y - m)
+ lgamma(m + a2) - lgamma(m+1) - lgamma(a2) + lgamma(y +a0 -m) - lgamma(y -m +1) - lgamma(a0) - adjj)
l1.C <- function(k, m, adjj=0) exp(k *log(t1) + m *log(t2) - adjj)
l1.B <- - (+x+y+a0)*log(1 + b1 + b2) + x * log(b1) + y * log(b2) - a1 * log(1 + b1) - a2 * log(1 + b2)
# l1.B to be updated several lines later depending on l2.B ~ l4.B
l2.B <- exp(- (x + a0 + a1)*log(1 + b1) + x * log(b1) + adj.B1) * p2 * ifelse(y==0, 1, 0)
l3.B <- exp(- (y + a0 + a2)*log(1 + b2) + y * log(b2) + adj.B1) * p3 * ifelse(x==0, 1, 0)
l4.B <- p4 * ifelse(x + y == 0, 1, 0) * exp(adj.B1)
# l1.AC For numerical stability use only.
l1.AC <- function(k, m, adjj=0) exp(lgamma(a1 + k) - lgamma(k+1) - lgamma(a1) + lgamma(x + y + a0 -m -k) - lgamma(x -k +1) - lgamma(a0 + y - m)
+ lgamma(m + a2) - lgamma(m+1) - lgamma(a2) + lgamma(y +a0 -m) - lgamma(y -m +1) - lgamma(a0) + k *log(t1) + m *log(t2) - adjj)
# cat("l1.B ", l1.B,"\n")
if (l1.B < - 200 & log(l2.B + l3.B + l4.B) < 0) {
if (debug) cat("adjustment activated for l1.B\n")
adj.B1 = ((-l1.B - 200) %/% 100) * 100 # prevent exp(l1.B) from being 0
l1.B = l1.B + adj.B1
}
l1.B <- exp(l1.B) * p1
if (debug) cat("l1.B ", l1.B,"\n")
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
while (log(sum( l.A.mat)) > 250) {
if (debug) cat("adjustment activated for A.mat\n")
adj.A = adj.A + 200
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
}
while (log(sum( l.C.mat)) > 250) {
if (debug) cat("adjustment activated for C.mat\n")
adj.C = adj.C + 200
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
}
# print(l.C.mat)
# if (is.infinite(sum( l.A.mat))) {
# cat("activated once")
# adj.A = 200
# l.A.mat <- sapply(0:x, function(k) sapply(0:y, function(m) {l1(k =k, m = m) *exp(-adj.A)}))
# if (is.infinite(sum( l.A.mat))) { ## added for further adjustment
# cat("activated twice")
# adj.A = 500
# l.A.mat <- sapply(0:x, function(k) sapply(0:y, function(m) {l1(k =k, m = m) *exp(-adj.A)}))
# }
# } #%>%print
#adjustment is cancelled out for each Expectation, so can be ignored. But for the final likelihood it should be adjusted at the end.
sum.AC <- sum(l.A.mat * l.C.mat)
if (is.infinite(sum.AC)| log(sum.AC) > 200) {
if (debug) cat("adjustment activated for AC.mat (too large)\n")
adj.A = adj.A + 100
adj.C = adj.C + 100
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
sum.AC <- sum(l.A.mat * l.C.mat)
} else if (log(sum.AC) < - 100) {
if (debug) cat("adjustment activated for AC.mat (too small)\n")
adj.A = adj.A - 200 # floor(log(sum(l.A.mat)/x/y)*2/3)
adj.C = adj.C - 200
l.A.mat <- sapply(0:x, function(k) sapply(0:y, l1, k = k, adjj = adj.A)) # %>% print
l.C.mat <- sapply(0:x, function(k) sapply(0:y, l1.C, k = k, adjj = adj.C)) # %>% print
l.AC.mat <- sapply(0:x, function(k) sapply(0:y, l1.AC, k = k, adjj = adj.C + adj.A))
sum.AC <- sum(l.AC.mat)
# abcde.1 <<- l.A.mat
# abcde.2 <<- l.C.mat
# abcde.3 <<- l.AC.mat
}
sum.A <- sum(l.A.mat)
l.sum <- sum.AC * l1.B + sum.A * sum (l2.B + l3.B + l4.B) * exp(-adj.C)
if (l.sum == 0) {
adj.sum = -floor(log(sum.AC)*2/3 + log(l1.B)*2/3)
if (debug) cat("adjustment activated for l.sum (adj = ", adj.sum, ")\n")
l.sum <- sum.AC * exp(adj.sum) * l1.B + sum.A * (exp(adj.sum) * sum (l2.B + l3.B + l4.B)) * exp(-adj.C)
# abcde.4 <<- c(l.sum = l.sum, sum.AC = sum.AC, l1.B = l1.B, sum.A = sum.A, l2.B = l2.B, l3.B = l3.B, l4.B = l4.B, adj.C = adj.C)
## paranthesis matters. sum.A = some number, exp(adj.sum) = almost inf, sum(l2.B + l3.B + l4.B) = 0, ...
# Then without paranthesis, Inf * 0 = NaN,
# But with paranthesis, c * (large number * 0) = c * 0 = 0
}
if (debug) {
cat("sum.AC", sum.AC,"\n\n")
cat("sum.A", sum.A,"\n\n")
cat("sum(l.C.mat)", sum(l.C.mat),"\n\n")
cat("l1.B", l1.B,"\n\n")
cat("l2.B", l2.B,"\n\n")
cat("l3.B", l3.B,"\n\n")
cat("l4.B", l4.B,"\n\n")
cat("l.sum ", l.sum, "\n")
}
# print(c(l.sum, log(l.sum))); print(l.A.mat); print(l.C.mat); print(c(l1.B, l2.B, l3.B, l4.B, adj.A)) #####
# expectation components
R0.E1 <- function(k, m) {x - k + y - m + a0}
log.R0.E1 <- function(k, m) {digamma(x - k + y - m + a0)}
R0.E1.B <- b1/(1 + b1 + b2)
R0.E2.B <- b1/(1 + b1)
R0.E3.B <- b1/(1 + b2)
R0.E4.B <- b1
R1.E1 <- function(k) {k + a1}
log.R1.E1 <- function(k) {digamma(k + a1)}
R1.E1.B <- b1/(1 + b1)
R1.E2.B <- b1/(1 + b1)
R1.E3.B <- b1
R1.E4.B <- b1
R2.E1 <- function(m) {m + a2}
log.R2.E1 <- function(m) {digamma(m + a2)}
R2.E1.B <- b1/(1 + b2)
R2.E2.B <- b1
R2.E3.B <- b1/(1 + b2)
R2.E4.B <- b1
R0.mat <- sapply (0:x, function(k) sapply(0:y, R0.E1, k=k))
R0.mat <- R0.mat * l.A.mat
R0.E <- sum(R0.mat * l.C.mat * exp(adj.sum) * l1.B * R0.E1.B) / l.sum +
sum(R0.mat*( l2.B * R0.E2.B
+ l3.B * R0.E3.B
+ l4.B * R0.E4.B)*exp(-adj.C + adj.sum)) / l.sum
# cat("R0.E ", R0.E, "\n")
R1.mat <- t(matrix(sapply(0:x, R1.E1), x+1, y+1))
R1.mat <- R1.mat * l.A.mat
R1.E <- sum(R1.mat * l.C.mat * exp(adj.sum) * l1.B * R1.E1.B) / l.sum +
sum(R1.mat*( l2.B * R1.E2.B
+ l3.B * R1.E3.B
+ l4.B * R1.E4.B)*exp(-adj.C + adj.sum)) / l.sum
# cat("R1.E ", R1.E, "\n")
R2.mat <- matrix(sapply(0:y, R2.E1), y+1, x+1) #%>% print
R2.mat <- R2.mat * l.A.mat
R2.E <- sum(R2.mat * l.C.mat * exp(adj.sum) * l1.B * R2.E1.B) / l.sum +
sum(R2.mat*( l2.B * R2.E2.B
+ l3.B * R2.E3.B
+ l4.B * R2.E4.B)*exp(-adj.C + adj.sum)) / l.sum
# cat("R2.E ", R2.E, "\n")
log.R0.mat <- sapply(0:x, function(k) sapply(0:y, log.R0.E1, k=k))
log.R0.mat <- log.R0.mat * l.A.mat
log.R0.E <- sum(log.R0.mat * l.C.mat) * exp(adj.sum) * l1.B + sum(log.R0.mat) * c(l2.B + l3.B + l4.B)*exp(-adj.C + adj.sum)
log.R0.E <- log.R0.E +
sum.AC * exp(adj.sum) * l1.B * log (R0.E1.B) +
sum.A * c(l2.B, l3.B, l4.B) %*% log (c(R0.E2.B, R0.E3.B, R0.E4.B)) *exp(-adj.C + adj.sum)
log.R0.E <- log.R0.E / l.sum
# cat("log.R0.E ", log.R0.E, "\n")
log.R1.mat <- t(matrix(sapply(0:x, log.R1.E1), x+1, y+1))
log.R1.mat <- log.R1.mat * l.A.mat
log.R1.E <- sum(log.R1.mat * l.C.mat) * exp(adj.sum) * l1.B + sum(log.R1.mat) * c(l2.B + l3.B + l4.B)*exp(-adj.C + adj.sum)
log.R1.E <- log.R1.E +
sum.AC * exp(adj.sum) * l1.B * log (R1.E1.B) +
sum.A * c(l2.B, l3.B, l4.B) %*% log (c(R1.E2.B, R1.E3.B, R1.E4.B))*exp(-adj.C + adj.sum)
log.R1.E <- log.R1.E / l.sum
# cat("log.R1.E ", log.R1.E, "\n")
log.R2.mat <- matrix(sapply(0:y, log.R2.E1), y+1, x+1)
log.R2.mat <- log.R2.mat * l.A.mat
log.R2.E <- sum(log.R2.mat * l.C.mat) * exp(adj.sum) * l1.B + sum(log.R2.mat) * c(l2.B + l3.B + l4.B)*exp(-adj.C + adj.sum)
log.R2.E <- log.R2.E +
sum.AC * exp(adj.sum) * l1.B * log (R2.E1.B) +
sum.A * c(l2.B, l3.B, l4.B) %*% log (c(R2.E2.B, R2.E3.B, R2.E4.B))*exp(-adj.C + adj.sum)
log.R2.E <- log.R2.E / l.sum
# cat("log.R2.E ", log.R2.E, "\n")
E.E <- c(sum.AC * exp(adj.sum) * l1.B, sum.A * c(l2.B, l3.B, l4.B)*exp(-adj.C + adj.sum))
E.E <- E.E/sum(E.E)
# cat("E.E ", E.E, "\n")
#v.E <- ifelse(y == 0, 0, y) + (a0 + a2) * b2 * sum(E.E[c(2,4)])
v.E <- (sum.AC * exp(adj.sum) * l1.B * y +
sum.A * l2.B * a2 * b2*exp(-adj.C + adj.sum) +
dnbinom(x, a0 + a1 + 1, b1/(1+b1)) * exp(-adj.A - adj.C + adj.sum) * a0 * b2 * p2 * ifelse(y==0, 1, 0) +
sum.A * l3.B * y *exp(-adj.C + adj.sum) +
sum.A * l4.B * (a0 + a2) * b2 *exp(-adj.C + adj.sum)) / l.sum
result <- c(log(l.sum) + adj.A -adj.B1 + adj.C - adj.sum, R0.E, R1.E, R2.E, log.R0.E, log.R1.E, log.R2.E, E.E, v.E) #%>%print
names(result) <- c("logdensity", paste0("R", 0:2, ".E"), paste0("log.R", 0:2, ".E"), paste0("E",1:4,".E"), "v.E")
return(result)
}
dBvZINB4.Expt.vec <- Vectorize(dBvZINB4.Expt)
if (FALSE) {
tmp <- dBvZINB4.Expt.vec(c(1,1,1),c(0,1,2),1,1,1,1,2,.25,.25,.25,.25)
tmp <- dBvZINB4.Expt.vec(c(0,1,1),c(0,1,2),1,1,1,1,2,.25,.25,.25,.25)
tmp <- dBvZINB4.Expt.vec(extractor(1),extractor(2),1,1,1,1,2,.25,.25,.25,.25)
t(tmp)[21:40,]
dBvZINB4.Expt.vec(c(10,1,2),c(10,1,1), 1.193013282, 0.003336139, 0.002745513, 3.618842924, 3.341625901, .25,.25,.25,.25)
}
# maxiter control added, output =param + lik + #iter
ML.BvZINB4 <- function (xvec, yvec, initial = NULL, tol=1e-8, maxiter = 200, showFlag=FALSE, showPlot = FALSE) {
xy.reduced <- as.data.frame(table(xvec,yvec))
names(xy.reduced) <- c("x", "y","freq")
xy.reduced <- xy.reduced[xy.reduced$freq != 0,]
xy.reduced$x <- as.numeric(as.character(xy.reduced$x))
xy.reduced$y <- as.numeric(as.character(xy.reduced$y))
xy.reduced$freq <- as.numeric(as.character(xy.reduced$freq))
n <- sum(xy.reduced$freq)
if (max(xvec)==0 & max(yvec)==0) {return(c(rep(1e-10,5),1,0,0,0, 0, 1, 0))} # 9 params, lik, iter, pureCor
#print(xy.reduced)
# initial guess
if (is.null(initial)) {
xbar <- mean(xvec); ybar <- mean(yvec); xybar <- mean(c(xbar, ybar))
s2.x <- var(xvec); s2.y <- var(yvec); if(is.na(s2.x)) {s2.x <- s2.y <- 1}
cor.xy <- cor(xvec,yvec); if (is.na(cor.xy)) {cor.xy <- 0}
zero <- sum(xvec == 0 & yvec == 0) / n
initial <- rep(NA,9)
initial[4] <- s2.x /xbar
initial[5] <- s2.y /ybar
initial[2:3] <- c(xbar,ybar)/initial[4:5]
initial[1] <- min(initial[2:3]) * abs(cor.xy)
initial[2:3] <- initial[2:3] - initial[1]
initial[6:9] <- bin.profile(xvec, yvec) # freq of each zero-nonzero profile
initial[6:9] <- initial[6:9]/sum(initial[6:9]) # relative freq
initial <- pmax(initial, 1e-5)
#print(initial) ###
}
cor.trace <<- data.frame(iter=1, pureCor=1)
iter = 0
param = initial
if (showFlag) {print(c("iter", "a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik", "pureCor"))}
if (showPlot) {
par(mfrow=c(2,1))
par(mar=c(2,4,1,4))
}
repeat {
iter = iter + 1
# print(c(param))
# print(lik(vec, pp=param[1], m0=param[2], m1=param[3], m2=param[4])) # debug
param.old <- param # saving old parameters
# updating
expt <- dBvZINB4.Expt.vec(xy.reduced$x, xy.reduced$y,
a0 = param[1], a1 = param[2], a2 = param[3], b1 = param[4], b2 = param[5], p1 = param[6], p2 = param[7], p3 = param[8], p4 = param[9])
expt <- as.vector(expt %*% xy.reduced$freq / n) #%>% print
# loglik = expt[1] * n
delta <- expt[12] / (expt[2] + expt[4]) # delta = E(V) / (E(xi0 + xi2))
param[6:9] = expt[8:11] # pi = E(Z)
opt.vec <- function(par.ab) {
par.ab <- exp(par.ab)
r1 <- sum(expt[2:4]) - sum(par.ab[1:3]) * par.ab[4]
r2 <- expt[5:7] - digamma(par.ab[1:3]) - log(par.ab[4])
# print(c(r1,r2)) ###
return(c(r1,r2))
}
param.l <- log(param)
result <- try(multiroot(opt.vec, start=param.l[1:4])$root, silent=TRUE)
if (class(result)=="try-error") {
initial = rep(1,4)
result <- multiroot(opt.vec, start = initial[1:4], rtol=1e-20)$root
}
param[1:4] <- exp(result)
param[5] <- param[4] * delta # b2
pureCor <- stat.BvZINB4(param = param, measure = "pureCor")
cor.trace[iter,] <<- c(iter,pureCor)
if (showPlot & (iter %% 20 == 0)) {
span <- min(max(iter-200+1,1),101):iter
span2 <- max(iter-100+1,1):iter
yspan <- c(min(0.2, min(cor.trace[span,2]-0.05)),max (max(cor.trace[span,2])+0.05,0.4))
yspan2 <- c(min(max(cor.trace[span2,2]) - 0.001, min(cor.trace[span2,2]-0.001)),max (max(cor.trace[span2,2])+0.001,0.4))
plot(cor.trace[span,"iter"], cor.trace[span,"pureCor"], xlab="iteration", ylab="pureCorrelation", pch=".", col="blue", ylim = yspan)
plot(cor.trace[span2,"iter"], cor.trace[span2,"pureCor"], xlab="iteration", ylab="pureCorrelation", pch=20, col="red")
}
#print (expt) #####
if (showFlag) {print(c(iter, round(param,4), expt[1] * n, pureCor))} #lik: lik of previous iteration
if (maxiter <= iter) {
lik <- lik.BvZINB4(xvec, yvec, param = param)
result <- c(param, lik, iter, pureCor)
names(result) <- c("a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik","iter", "pureCor")
return(result)
}
if (max(abs(param - param.old)) <= tol) {
lik <- lik.BvZINB4(xvec, yvec, param = param)
result <- c(param, lik, iter, pureCor)
names(result) <- c("a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik","iter", "pureCor")
return(result)
}
}
#result <- data.frame(a0 = param[1], a1 = param[2], a2 = param[3], b1 = param[4], b2 = param[5], pi = param[6])
#return(result)
}
# simple tests
if (FALSE) {
ML.BvZINB4(c(10,1,1),c(10,1,2), showFlag=TRUE) # c(1.193014333 0.002745514 0.003336045 3.341621165 3.618839217 0.000000000 )
# lik.BvZINB4(c(10,1,1),c(10,1,2),c(0.7186211, 0.4954254, 0.5652637, 2.9788157, 3.0834235, 1,0,0,0))
# [1] -13.82585
# lik.BvZINB4(c(10,1,1),c(10,1,2),c(1.193014333, 0.002745514, 0.003336045, 3.341621165, 3.618839217, 1,0,0,0))
# [1] -12.90997
ML.BvZINB4(c(0,1,1),c(0,1,5), showFlag=TRUE)
tt(1)
ML.BvZINB4(extractor(1), extractor(4), showFlag=TRUE)
ML.BvZINB4(extractor(5), extractor(6), showFlag=TRUE)
tt(2)
ML.BvZINB4(extractor(1), extractor(3), showFlag=TRUE)
# 0.000799916 0.015057420 0.006208375 67.414607790 9.180617081 0.361266622
lik.BvZINB3(extractor(1), extractor(4),c(0.0004349035, 0.009488825, 0.003788559, 68.25597, 9.835188, .95)) # -391.5657
lik.BvNB3(extractor(1), extractor(4),c(0.0004349035, 0.009488825, 0.003788559, 68.25597, 9.835188)) # -308.7
#1,8 -1522.5424 -483.66650 2077.7510
#7,17 BvNB2 2352 -> 860
# 1 13 1008.3853 -> not much
# 8 53 1720.4281 -> not much
# 8 36 2733.8509 -> not much < 2670?
# 6 38 -1632.6652 -544.85949 2175.6113
# 4 44 3977.0302 -> 1200
# 3 23 -1581.2832 -481.11177 2200.3428
# 2 58 3660.0371 -> 1200
# 5 36 1475.9486 -> not much
# 5 38 2399.4086 -> not much
# 9 28 3055.5890 -> 1060
# 10 16 -808.4026 -226.31388 1164.1775 not much
# 11 18 -1859.2748 -588.25860 2542.0324 not much
# 17 18 -1729.3567 -546.02464 2366.6640 not much
#3.404230e-05 9.740676e-03 5.435834e-03 7.059027e+01 6.627206e+00 9.500000e-01
tt(2) # 31secs
tt(1)
ML.BvZINB4(extractor(1), extractor(3),showFlag=TRUE)
ML.BvZINB3(extractor(1), extractor(3),initial=c(1.733055e-05, 0.009879464, 0.05864169, 69.22358, 134.6264,0),showFlag=TRUE)
ML.BvNB3(extractor(1), extractor(3),showFlag=TRUE)
lik.BvZINB3(extractor(1), extractor(3),c(1.733055e-05, 0.009879464, 0.05864169, 69.22358, 134.6264,0)) #1485.486
tt(2) #8sec
tt(1)
ML.BvNB3(extractor(1), extractor(38), method="BFGS", showFlag=TRUE)
ML.BvNB3(extractor(1), extractor(38), method="Nelder-Mead", showFlag=TRUE)
tt(2) #31sec
#lik.BvNB3(extractor(1), extractor(38), c(5.790158e-03, 4.300688e-03, 7.836757e-02, 7.586956e+01, 1.015767e+02))
#lik.BvNB3(extractor(1), extractor(38), c())
}
# EM with booster
# maxiter control added, output =param + lik + #iter
# Mar 15, 2018: Print pureCor instead of cor
ML.BvZINB4.2 <- function (xvec, yvec, initial = NULL, tol=1e-8, maxiter=200, showFlag=FALSE, showPlot=FALSE, cor.conv = FALSE, boosting=TRUE, debug = FALSE) {
if (debug) {showFlag=TRUE}
xy.reduced <- as.data.frame(table(xvec,yvec))
names(xy.reduced) <- c("x", "y","freq")
xy.reduced <- xy.reduced[xy.reduced$freq != 0,]
xy.reduced$x <- as.numeric(as.character(xy.reduced$x))
xy.reduced$y <- as.numeric(as.character(xy.reduced$y))
xy.reduced$freq <- as.numeric(as.character(xy.reduced$freq))
n <- sum(xy.reduced$freq)
if (max(xvec)==0 & max(yvec)==0) {return(c(rep(1e-10,5),1,0,0,0, 0, 1, 0))} # 9 params, lik, iter, pureCor
#print(xy.reduced)
# initial guess
if (is.null(initial)) {
xbar <- mean(xvec); ybar <- mean(yvec); xybar <- mean(c(xbar, ybar))
s2.x <- var(xvec); s2.y <- var(yvec); if(is.na(s2.x)|is.na(s2.y)) {s2.x <- s2.y <- 1}
cor.xy <- cor(xvec,yvec); if (is.na(cor.xy)) {cor.xy <- 0}
zero <- sum(xvec == 0 & yvec == 0) / n
initial <- rep(NA,9)
initial[4] <- s2.x /ifelse(xbar==0,1e-4, xbar) #%>% print
initial[5] <- s2.y /ifelse(ybar==0,1e-4, ybar) #%>% print
initial[2:3] <- c(xbar,ybar)/pmax(initial[4:5], c(0.1,0.1)) #%>% print
initial[1] <- min(initial[2:3]) * abs(cor.xy) #%>% print
initial[2:3] <- initial[2:3] - initial[1] #%>% print
initial[6:9] <- bin.profile(xvec, yvec) # freq of each zero-nonzero profile
initial[6:9] <- initial[6:9]/sum(initial[6:9]) # relative freq
initial <- pmax(initial, 1e-5)
if(is.na(sum(initial))) { initial[is.na(initial)] <- 1}
# print(initial) ###
}
# print(initial)
booster <- function (param.matrix, xvec, yvec, n.cand = 10) {
param.matrix[,6:9] <- qlogis(param.matrix[,6:9]) # logit transformation for probs
param.matrix[,1:5] <- log(param.matrix[,1:5]) # log transformation for positives
a <- param.matrix[1,]
b <- param.matrix[5,]
candidate <- matrix(b, byrow=TRUE, ncol=9, nrow = n.cand)
index <- which((abs(b-a) > 1e-5) & is.finite(b) & is.finite(a)) # target param for grid search
for (s in 1:n.cand) {
candidate[s,index] <- b[index] + (b[index] - a[index]) * 3^(s-1)
}
candidate[,6:9] <- plogis(candidate[,6:9]) # back-transformation
candidate[,6:9] <- candidate[,6:9]/ apply(candidate[,6:9],1,sum) # normalize
candidate[,1:5] <- exp(candidate[,1:5]) # back-transformation for probs
#print(candidate[,1:4]) #debug
lik <- sapply(1:n.cand, function(s) {lik.BvZINB4(xvec, yvec, candidate[s,])})
lik <- ifelse(is.infinite(lik), -Inf, lik) # sometimes likelihood is inf which is nonsense. force it to -Inf
if (sum(!is.finite(lik)) > 0) {
return(cbind(candidate,lik)[1:max(min(which(!is.finite(lik)))-1,1),])
} else {return(cbind(candidate,lik))}
}
cor.trace <<- data.frame(iter=1, pureCor=1)
iter = 0
param = initial
lik = Inf
pureCor = 0
boost = 0
index = 1 # previous boosting index
if (showPlot) {
par(mfrow=c(2,1))
par(mar=c(2,4,1,4))
}
# cat(442)
repeat {
iter = iter + 1
param.old <- param # saving old parameters
abcd.old <<- param.old
if (debug) {lik.old <- lik} #debugging
pureCor.old <- pureCor
# updating
# cat(449)
expt <- dBvZINB4.Expt.vec(xy.reduced$x, xy.reduced$y,
a0 = param[1], a1 = param[2], a2 = param[3], b1 = param[4], b2 = param[5], p1 = param[6], p2 = param[7], p3 = param[8], p4 = param[9])
abc <<- expt
expt <- as.vector(expt %*% xy.reduced$freq / n)
# cat(453)
# loglik = expt[1] * n
delta <- expt[12] / (expt[2] + expt[4]) # delta = E(V) / (E(xi0 + xi2))
param[6:9] = expt[8:11] # pi = E(Z)
# cat(457)
abcd <<- param
opt.vec <- function(par.ab) {
par.ab <- exp(par.ab)
r1 <- sum(expt[2:4]) - sum(par.ab[1:3]) * par.ab[4]
r2 <- expt[5:7] - digamma(par.ab[1:3]) - log(par.ab[4])
# print(c(r1,r2)) ###
return(c(r1,r2))
}
param.l <- log(param)
#expt %>% print
#param.l %>% print
result <- try(multiroot(opt.vec, start=param.l[1:4])$root, silent=TRUE)
if (class(result)=="try-error") {
initial = rep(1,4)
result <- multiroot(opt.vec, start = initial[1:4], rtol=1e-20)$root
}
param[1:4] <- exp(result)
param[5] <- param[4] * delta # b2
pureCor <- stat.BvZINB4(param = param, measure = "pureCor")
if (debug) {
lik <- lik.BvZINB4(xvec, yvec, param = param) #debugging
if (lik < lik.old) warnings("likelihood decreased!")
}
cor.trace[iter,] <<- c(iter,pureCor)
if (showPlot & (iter %% 20 == 0)) {
span <- min(max(iter-200+1,1),101):iter
span2 <- max(iter-100+1,1):iter
yspan <- c(min(0.2, min(cor.trace[span,2]-0.05)),max (max(cor.trace[span,2])+0.05,0.4))
yspan2 <- c(min(max(cor.trace[span2,2]) - 0.001, min(cor.trace[span2,2]-0.001)),max (max(cor.trace[span2,2])+0.001,0.4))
plot(cor.trace[span,"iter"], cor.trace[span,"pureCor"], xlab="iteration", ylab="pureCorrelation", pch=".", col="blue", ylim = yspan)
plot(cor.trace[span2,"iter"], cor.trace[span2,"pureCor"], xlab="iteration", ylab="pureCorrelation", pch=20, col="red")
}
# boosting
if (boosting) {
if (iter == 6 + boost*5) { # Creating an empty matrix
param.boost <- matrix(NA, nrow = 5, ncol = 9)
}
if (iter >= 6 + boost*5 & iter <= 10 + boost*5 ) { # Storing last ten params
param.boost[iter - (5 + boost*5),] <- param
}
if (iter == 10 + boost*5) {
param.boost <- booster(param.boost, xvec, yvec, n.cand = min(max(5, index * 2),20))
tmp.bbbb <<-param.boost
# print(dim(param.boost)); print(length(param.boost))
if (showFlag) {print(param.boost)}
if (is.null (dim(param.boost))) {
param <- param.boost[1:9]
} else {
index <- which.max(param.boost[,10])
param <- param.boost[index,1:9]
if (showFlag) {print(paste0("Jump to the ",index, "th parameter"))}
}
boost <- boost + 1
}
}
#print (expt) #####
if (showFlag) {cat("iter ", iter, "parm:", round(param,4), if (debug) {c("D.lik=", round(lik - lik.old, 2))},
"lik=", expt[1] * n, "p.Cor=", pureCor, "\n")} #lik: lik of previous iteration
if (maxiter <= iter) {
lik <- lik.BvZINB4(xvec, yvec, param = param)
result <- c(param, lik, iter, pureCor)
names(result) <- c("a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik","iter", "pureCor")
return(result)
}
if (max(abs(param - param.old)) <= tol) {
lik <- lik.BvZINB4(xvec, yvec, param = param)
result <- c(param, lik, iter, pureCor)
names(result) <- c("a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik","iter", "pureCor")
return(result)
}
if (cor.conv & abs(pureCor - pureCor.old) <= tol) { # if pureCor is converged, then done!
lik <- lik.BvZINB4(xvec, yvec, param = param)
result <- c(param, lik, iter, pureCor)
names(result) <- c("a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik","iter", "pureCor")
return(result)
}
}
#result <- data.frame(a0 = param[1], a1 = param[2], a2 = param[3], b1 = param[4], b2 = param[5], pi = param[6])
#return(result)
}
ML.BvZINB4.2b <- function(xvec, yvec, ...) {
result <- try(ML.BvZINB4.2(xvec,yvec,initial = c(as.numeric(ML.BvNB3(xvec,yvec)), .94,.02,.02,.02), ...))
if (class(result)=="try-error") {
result <- c(rep(NA,5+4), NA, 0)
}
return(result)
}
if (FALSE) {
ML.BvZINB4.2(extractor(11), extractor(16),showFlag=TRUE, boosting=FALSE, debug=TRUE) #>-804.44 >10mins
ML.BvZINB4.2(extractor(1), extractor(5),showFlag=TRUE, boosting=FALSE, debug=TRUE) #>-804.44 >10mins
ML.BvZINB4.2(extractor(1), extractor(5),showFlag=TRUE)
ML.BvZINB4.2b(extractor(11), extractor(16),showFlag=TRUE)
}
if (FALSE) {
# making an empty shell
MLE.BvZINB4 <- as.data.frame(matrix(NA,1,11))
names(MLE.BvZINB4) <- c("a0", "a1", "a2", "b1", "b2", paste0("p",1:4), "lik","iter")
MLE.BvZINB4 <- cbind(MLE.Geneset1$BP[,1:3], MLE.BvZINB4)
# fill in MLE's
tt(1)
for (i in 1:dim(MLE.BvZINB4)[1]) {
if (is.na(MLE.BvZINB4[i,14])) {
if (a1 < as.numeric(MLE.BvZINB4[i,1])) {
saveRDS(MLE.BvZINB4, "result-BvZINB4.rds")
}
a1 <- as.numeric(MLE.BvZINB4[i,1]); a2 <- as.numeric(MLE.BvZINB4[i,2])
MLE.BvZINB4[i,4:14] <- ML.BvZINB4.2b(extractor(a1), extractor(a2),maxiter=200)
print(MLE.BvZINB4[i,])
}
}
saveRDS(MLE.BvZINB4, "result-BvZINB4.rds")
tt(2)
}
#3. Simulation
rBZINB4 <- function(n, a0, a1, a2, b1, b2, p1, p2, p3, p4, param = NULL) {
if (!is.null(param)) {
a0 = param[1]; a1 = param[2]; a2 = param[3]
b1 = param[4]; b2 = param[5]
pp = param[6:9]
} else { pp = c(p1,p2,p3,p4)}
E <- rmultinom(n, 1, pp)
E1 <- E[1,] + E[2,]
E2 <- E[1,] + E[3,]
x0 <- rgamma(n, a0, 1/b1)
x1 <- rgamma(n, a1, 1/b1)
x2 <- rgamma(n, a2, 1/b1)
x <- rpois(n, x0 + x1)
y <- rpois(n, (x0 + x2)*b2/b1)
x <- x*E1
y <- y*E2
return(data.frame(x=x, y=y))
}
# rBZINB4.vec <- Vectorize(rBZINB4)
if (FALSE) {
# param for pair 1 and 2
param <- c(4.375187e-04, 1.012747e-02, 1.821521e-03, 6.016255e+01, 3.122548e+01, 9.486775e-01, 1.893068e-02, 1.847954e-02, 1.391224e-02)
set.seed(1)
tmp <- rBZINB4 (800, param=param)
table(tmp$x, tmp$y)
table(extractor(1),extractor(2))
tt(1)
ML.BvZINB4.2b(tmp$x, tmp$y, maxiter=500, showFlag=TRUE)
tt(2)
# 500 iterations 1.09 mins
# 5.159358e-04 1.092834e-02 3.847828e-03 5.408111e+01 9.785946e+00 9.539740e-01 1.986086e-02
# 1.855661e-02 7.608509e-03 -3.202578e+02 5.000000e+02
# real data: 500 iterations 1.16 mins
tt(1)
ML.BvZINB4.2b(extractor(1), extractor(2), maxiter=500, showFlag=TRUE)
tt(2)
param <- c(1,1,1,1,1, .95, .02, .02, .01)
set.seed(2)
tmp <- rBZINB4 (800, param=param)
table(tmp)
tt(1)
ML.BvZINB4.2b(tmp$x, tmp$y, maxiter=500, showFlag=TRUE)
tt(2) # 1.86 mins
}
|
6e8e37adcad0a2cc0953b689a4a3f7b67ef2405e
|
0df4238793e23d933f27172f6381789d00a81bce
|
/01/01.R
|
3f4079e34571f60e537a8922cbdce4965061c384
|
[] |
no_license
|
honzikom/AoC2020
|
1ae94c501b9dc6cbc00f10f79711eb32accca782
|
61864b794ba7f3630cbe8b6b5af31c0d55b84e0f
|
refs/heads/main
| 2023-02-03T06:38:27.654276
| 2020-12-22T14:42:33
| 2020-12-22T14:42:33
| 317,928,055
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
r
|
01.R
|
setwd("~/AoC/2020/01")
input <- read.table("./input01.txt")
input <- as.numeric(unlist(c(input)))
l <- length(input)
#star 1
for (i in 1:(l - 1)) {
for (j in (i + 1):(l)) {
if (input[i] + input[j] == 2020) {
print(input[i] * input[j])
}
}
}
#star 2
for (i in (1:(l - 2))) {
for (j in ((i + 1):(l - 1))) {
for (k in ((j + 1):l)) {
if ((input[i] + input[j] + input[k]) == 2020) {
print(input[i] * input[j] * input[k])
}
}
}
}
|
7b65da10a8e2451065de941cea2e9dcfeb034941
|
688185e8e8df9b6e3c4a31fc2d43064f460665f1
|
/man/demo.vowels.f0.Rd
|
c3486c11170b3852bd22d304d7527a6f2e69027c
|
[] |
no_license
|
IPS-LMU/emuR
|
4b084971c56e4fed9032e40999eeeacfeb4896e8
|
eb703f23c8295c76952aa786d149c67a7b2df9b2
|
refs/heads/master
| 2023-06-09T03:51:37.328416
| 2023-05-26T11:17:13
| 2023-05-26T11:17:13
| 21,941,175
| 17
| 22
| null | 2023-05-29T12:35:55
| 2014-07-17T12:32:58
|
R
|
UTF-8
|
R
| false
| true
| 806
|
rd
|
demo.vowels.f0.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/emuR-dataDocs.R
\name{demo.vowels.f0}
\alias{demo.vowels.f0}
\title{F0 track data for segment list demo.vowels}
\format{
An object with $index, $ftime and $data
index: a two columned matrix with the range of the $data rows that belong
to the segment ftime: a two columned matrix with the times marks of the
segment data: a one columned matrix with the F0 values
}
\description{
A track list of the demo database that is part of the Emu system. It is the
result of get F0 data for the segment list demo.vowels (see
data(demo.vowels)).
}
\details{
A track list is created via the \code{\link{get_trackdata}} function.
}
\seealso{
\code{\link{demo.all.rms}} \code{\link{segmentlist}}
\code{\link{trackdata}}
}
\keyword{datasets}
|
2ec69012cf9efafc46bdeba269e3822c481e48ad
|
77f4e1f47de39a5f6a324d13821c53c78063fc3f
|
/man/param.Rd
|
844b6cf89101bf36c1b2a72fa2b45f7f78156dec
|
[] |
no_license
|
bashlee/rcloud.params
|
38d60ba34b94f9e15507a80dbaf5d49c554f7594
|
be017d1e6ace14af5dbfc06d7dab73588702a303
|
refs/heads/master
| 2021-01-24T12:52:17.698233
| 2018-11-07T15:26:02
| 2018-11-07T15:26:02
| 123,157,268
| 0
| 1
| null | 2018-02-27T16:33:20
| 2018-02-27T16:33:20
| null |
UTF-8
|
R
| false
| true
| 474
|
rd
|
param.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rcloud.params.R
\name{param}
\alias{param}
\title{Pass parameters to javascript}
\usage{
param(inputTag, name, varClass, inputVal = NA, label = "")
}
\arguments{
\item{inputTag}{HTML string to create widget}
\item{name}{varibale name}
\item{varClass}{class of variable}
}
\description{
Takes HTML string and passes to javascript. Variables can be updated through widget then passed back to R
}
|
2ae72f5e97dc571edb3ddbac8f74191038f74ba9
|
11a34b0073a682ffe3de8fc2e80e32d4a69d93a5
|
/F0_otherRaw/snp_freebays_filter.R
|
4eef88e3200ead65f72890308f625027a7c70be6
|
[] |
no_license
|
xinwenzhg/yeastAse_stats
|
60ec644f5604da44f36613be9298acc90582b2a6
|
cc25a702c713c8038f6ded46032ae8ba35ee6e8d
|
refs/heads/master
| 2022-04-16T16:14:31.110385
| 2020-03-17T22:52:29
| 2020-03-17T22:52:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,554
|
r
|
snp_freebays_filter.R
|
setwd('~/cloud/project/otherRaw')
# remove mummer unmatch snps
yr_pos <- read.table(header=T,file='yr5B.posmap')
names(yr_pos)[1:6] <- c('ypsChrom','ypsPosit','ypsN','rmChrom','rmPosit','rmN')
ry_pos <- read.table(header=T,file='ryB5.posmap')
names(ry_pos)[1:6] <- c('rmChrom','rmPosit','rmN','ypsChrom','ypsPosit','ypsN')
ry_pos_exchg <- ry_pos[,c(4,5,6,1,2,3,7)]
res <- merge.data.frame(yr_pos,ry_pos_exchg,by.x = c(1,2,4,5),by.y=c(1,2,4,5),all=T,sort=F)
res_good <- res[which(res$drct.x == res$drct.y),] # 86351
yps_block <- res_good[,1:4] %>% arrange(ypsChrom,ypsPosit)
yps_block_tf <- vector(length=nrow(yps_block))
yps_block_tf[1] <- TRUE
for(i in 2:nrow(yps_block)) {
yps_block_tf[i] <-
mydisc(yps_block[i-1,"ypsChrom"], yps_block[i-1,"ypsPosit"],
yps_block[i,"ypsChrom"], yps_block[i,"ypsPosit"]) > 700000
}
yps_rm_86351_group <- cbind(yps_block,yps_block_tf,gN= tf2groupName(yps_block_tf))
write.table(unique(yps_rm_86351_group[,c(1,2)]),file="~/cloud/project/snpfilter/1_5Bsnp/yps128_5_snpls_nofilter",row.names = F,quote=F,col.names = F)
write.table(unique(yps_rm_86351_group[,c(3,4)]),file="~/cloud/project/snpfilter/1_5Bsnp/rm11_B_snpls_nofilter",row.names = F,quote=F,col.names = F)
write.table(unique(yps_rm_86351_group[,c(1,2,6)]),file="~/cloud/project/snpfilter/1_5Bsnp/yps128_5_snpls_nofilter_group",row.names = F,quote=F,col.names = F)
write.table(unique(yps_rm_86351_group[,c(3,4,6)]),file="~/cloud/project/snpfilter/1_5Bsnp/rm11_B_snpls_nofilter_group",row.names = F,quote=F,col.names = F)
|
ddd4bfbc4016440a8b265ab0e45d6b8e4a80eabf
|
fce53c0e4a1d45c9c4d3470684f6b0fde6c939eb
|
/Chapter_4/Gapminder_enhanced/ui.R
|
e57152085d6b76ac1013f2d447f24155c4e867ae
|
[] |
no_license
|
himynameismarcel/Web-Application-Development-with-R-Using-Shiny
|
579ff710e977519cf1b33256f8165f586aa68d35
|
7dff0ddcb992725c892c65c44edafd94c4458faa
|
refs/heads/master
| 2020-06-02T03:52:12.919588
| 2019-06-16T12:03:07
| 2019-06-16T12:03:07
| 191,026,674
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,494
|
r
|
ui.R
|
## Marcel Kropp
## 08.06.2019
## Shiny Application, Gapminder
## Following the book: Web Application with Shiny R (Breeley, 2018)
library(leaflet)
library(DT)
fluidPage(
titlePanel("Gapminder"),
sidebarLayout(
sidebarPanel(
sliderInput(inputId = "year",
label = "Years included",
min = 1952,
max = 2007,
value = c(1952, 2007),
sep = "",
step = 5
),
# checkboxInput("linear", label = "Add trend line?", value = FALSE),
conditionalPanel(
condition = "input.theTabs == 'trend'",
checkboxInput("linear", label = "Add trend line?",
value = FALSE)
),
uiOutput("yearSelectorUI"),
# Modal (elements from Bootstrap, pop-up messages)
actionButton("showModal", "Launch loyalty test")
),
mainPanel(
tabsetPanel(id = "theTabs",
tabPanel("Summary", textOutput("summary"),
value = "summary"),
tabPanel("Trend", plotOutput("trend"),
value = "trend"),
tabPanel("Map", leafletOutput("map"),
p("Map data is from the most recent year in the selected range;
radius of circles is scaled to life expectancy"),
value = "map"),
tabPanel("Table", dataTableOutput("countryTable"),
value = "table")
)
)
)
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.