blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cd3c38a11b0803f6f2a3fe18b205faf59d449530
|
9ecec80b4649a01517c11061b1fdb2a48dbc20a2
|
/Data_Masterfile.R
|
9955a75ec7506b824a515e24e923b1714de45676
|
[] |
no_license
|
evanczopek/Madness-Analytics
|
b7262f71b509b6f531d3683869a64f8477aa5c0e
|
f81e5fc2fec7c59b20171a597165ba30e0790974
|
refs/heads/master
| 2021-01-23T04:45:08.665857
| 2017-03-15T23:08:40
| 2017-03-15T23:08:40
| 80,380,332
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,431
|
r
|
Data_Masterfile.R
|
# Evan Czopek - 1/29/2017 6:00pm
# Input for desired analyzed categories and weights.
cat_choose = c('wlp', 'sos', 'points_against', 'trb', 'tov')
cat_weight = c(3, 5, 2, 1, 1)
# Query of Sports Reference to get relevant table on all stats for NCAA teams.
library(XML)
url_one <- "http://www.sports-reference.com/cbb/seasons/2017-school-stats.html"
cc = c(rep('character',2), rep('integer',3), rep('numeric',3), rep('integer',8), 'character', rep('integer',3),
'numeric', rep('integer',2), 'numeric', rep('integer',2), 'numeric', rep('integer',7))
all_data_df <- readHTMLTable(url_one, which = 1, colClasses = cc )
# Clears all rows that hold column headers.
del_rows = c(which(all_data_df$Rk == ''),which(all_data_df$Rk == 'Rk'))
all_data_df = all_data_df[-del_rows,]
# Clear redundant first column 'Rk'
all_data_df$Rk = NULL
all_data_df$`Â ` = NULL
# Renaming columns to more clear entries for manipulation. See README for definitions.
colnames(all_data_df) = c('school', 'gp', 'wins', 'loss', 'wlp', 'srs', 'sos', 'conf_wins',
'conf_loss', 'home_wins', 'home_loss', 'away_wins', 'away_loss',
'points_for', 'points_against', 'min_played', 'fg', 'fga', 'fgp',
'thrp', 'thrpa', 'thrpp', 'ft', 'fta', 'ftp', 'orb', 'trb', 'ast', 'stl',
'blk', 'tov', 'pf')
# Adding new columns. Two pointers made, attempted and percentage.
library(dplyr)
all_data_df = mutate(all_data_df, twop = fg - thrp, twopa = fga - thrpa, twopp = twop / twopa)
all_data_df[109,13] = 1
# Generate the names of the teams in each bracket. In order 1-16 seed.
midwest = c('Kansas', 'Louisville', 'Oregon', 'Purdue', 'Iowa State', "Creighton",
'Michigan', 'Miami (FL)', 'Michigan State', 'Oklahoma State',
'Rhode Island', 'Nevada', 'Vermont', 'Iona', 'Jacksonville State',
'UC-Davis')
east = c('Villanova', 'Duke', 'Baylor', 'Florida', 'Virginia', 'Southern Methodist',
'South Carolina', 'Wisconsin', 'Virginia Tech', 'Marquette', 'Southern California',
'North Carolina-Wilmington', 'East Tennessee State', 'New Mexico State', 'Troy', "Mount St. Mary's")
west = c('Gonzaga', 'Arizona', 'Florida State', 'West Virginia', 'Notre Dame', 'Maryland', "Saint Mary's (CA)",
'Northwestern', 'Vanderbilt', 'Virginia Commonwealth', 'Xavier', 'Princeton',
'Bucknell', 'Florida Gulf Coast', 'North Dakota', 'South Dakota State')
south = c('North Carolina', 'Kentucky', 'UCLA', 'Butler', 'Minnesota', 'Cincinnati',
'Dayton', 'Arkansas', 'Seton Hall', 'Wichita State', 'Kansas State', 'Middle Tennessee',
'Winthrop', 'Kent State', 'Northern Kentucky', 'Texas Southern')
# Combine bracket teams in order, 1-4 seeds as 1 seeds.
teams = c(east, midwest, west, south)
# Choose which rows to save from large, original data set.
save_row = matrix(NA, nrow = 1, ncol = 64)
for (x in 1:length(teams)) {
save_row[x] = which(all_data_df$school == paste(teams[x],'*'))
}
# Keep data fromonly desired teams/rows.
t_data = all_data_df[save_row,]
# Generate scoring criteria for categories where larger numbers are better. All
# values are generated as ratios compared to the best scoring team in that
# category i.e. the team with most wins will score a 1 in the wins category, a
# team with half as many wins will score a .5.
p_data = matrix(NA, nrow = 64, ncol = length(t_data))
p_data[, 1] = as.character(t_data$school)
big_good = c(2, 3, 5, 6, 7, 8, 10, 12, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
33, 34, 35)
for (x in 1:length(big_good)) {
j = big_good[x]
max = max(t_data[, j])
for (i in 1:64) {
p_data[i, j] = t_data[i, j]/max
}
}
# Does the same as above for remaining rows where smaller numbers are better.
small_good = c(4, 9, 11, 13, 15, 31, 32)
for (x in 1:length(small_good)) {
j = small_good[x]
min = min(t_data[, j])
for (i in 1:64) {
p_data[i, j] = min/(t_data[i, j] + 1)
}
}
# Converts points data to data frame. For some reason the conversion wants to
# turn all numbers into strings, hence the loop to convert them back to numeric.
p_data = as.data.frame(p_data, stringsAsFactors = FALSE)
for (x in 2:length(p_data)) {
p_data[, x] = as.numeric(p_data[, x])
}
# Gives appropriate column names to data frame.
colnames(p_data) = c('school', 'gp', 'wins', 'loss', 'wlp', 'srs', 'sos', 'conf_wins',
'conf_loss', 'home_wins', 'home_loss', 'away_wins', 'away_loss',
'points_for', 'points_against', 'min_played', 'fg', 'fga', 'fgp',
'thrp', 'thrpa', 'thrpp', 'ft', 'fta', 'ftp', 'orb', 'trb', 'ast', 'stl',
'blk', 'tov', 'pf', 'twop', 'twopa', 'twopp')
# Find the index of the categories to be saved in category choices.
cnames = c('school', 'gp', 'wins', 'loss', 'wlp', 'srs', 'sos', 'conf_wins',
'conf_loss', 'home_wins', 'home_loss', 'away_wins', 'away_loss',
'points_for', 'points_against', 'min_played', 'fg', 'fga', 'fgp',
'thrp', 'thrpa', 'thrpp', 'ft', 'fta', 'ftp', 'orb', 'trb', 'ast', 'stl',
'blk', 'tov', 'pf', 'twop', 'twopa', 'twopp')
save_cols = matrix(NA, nrow = 1, ncol = length(cat_choose) + 1)
save_cols[1] = which(cnames == 'school')
for (x in 1:length(cat_choose)) {
name = cat_choose[x]
save_cols[x + 1] = which(cnames == name)
}
# Creates final matrix with only the chosen columns.
fin_data = matrix(NA, nrow = 64, ncol = length(save_cols))
for (x in 1:length(save_cols)) {
fin_data[, x] = p_data[, save_cols[x]]
}
# Converts the matrix to a data frame and applies the desired weights as
# specified in the beginning.
fin_data = as.data.frame(fin_data, stringsAsFactors = FALSE)
for (x in 2:length(fin_data)) {
fin_data[, x] = as.numeric(fin_data[, x])
fin_data[, x] = fin_data[, x]*cat_weight[x - 1]
}
# Sums desired columns for total score and gives names to the data frame
# columns.
fin_data = transform(fin_data, totalscore = rowSums(fin_data[, 2:length(fin_data)]))
colnames(fin_data) = c('school', cat_choose, 'totalscore')
bracket = matrix(NA, nrow = 64, ncol = 7)
game_sim = function(team1, team2) {
if (fin_data[which(fin_data$school == team1),length(fin_data)] > fin_data[which(fin_data$school == team2), length(fin_data)]) {
winner = team1
}
else {
winner = team2
}
return(winner)
}
bracket[,1] = fin_data[,1]
# Round 1 simulation.
nums = c(0, 16, 32, 48)
for (y in 1:4) {
z = nums[y]
for (x in 1:8) {
bracket[x + (z / 2), 2] = game_sim(bracket[x + z,1], bracket[(17 + z) - x, 1])
}
}
# Round 2 simulation.
nums = c(0, 8, 16, 24)
for (y in 1:4) {
z = nums[y]
for (x in 1:4) {
bracket[x + (z / 2), 3] = game_sim(bracket[x + z, 2], bracket[(9 + z) - x, 2])
}
}
# Sweet 16
nums = c(0, 4, 8, 12)
for (y in 1:4) {
z = nums[y]
for (x in 1:2) {
bracket[x + (z / 2), 4] = game_sim(bracket[x + z, 3], bracket[(5 + z) - x, 3])
}
}
# Elite 8
bracket[1, 5] = game_sim(bracket[1, 4], bracket[2, 4])
bracket[2, 5] = game_sim(bracket[3, 4], bracket[4, 4])
bracket[3, 5] = game_sim(bracket[5, 4], bracket[6, 4])
bracket[4, 5] = game_sim(bracket[7, 4], bracket[8, 4])
# Final 4
bracket[1, 6] = game_sim(bracket[1, 5], bracket[3, 5])
bracket[2, 6] = game_sim(bracket[2, 5], bracket[4, 5])
# Championship
bracket[1, 7] = game_sim(bracket[1, 6], bracket[2, 6])
|
22cce926998410b35f71b06069e2f3a35f765766
|
ac771259d6e3469b75e0fdac251839ab1d070767
|
/R/VTLSession.R
|
4a156ab7e7f5d1802b921362da062e092d23a7a3
|
[] |
no_license
|
amattioc/RVTL
|
7a4e0259e21d52e8df1efe9a663ca20a7d130b15
|
630a41f27d0f5530d7c3df7266ecfaf25fe4803a
|
refs/heads/main
| 2023-04-27T17:52:39.093386
| 2021-05-14T09:22:24
| 2021-05-14T09:22:24
| 304,639,834
| 0
| 1
| null | 2020-10-19T19:19:41
| 2020-10-16T13:46:02
|
JavaScript
|
UTF-8
|
R
| false
| false
| 9,789
|
r
|
VTLSession.R
|
#
# Copyright © 2020 Banca D'Italia
#
# Licensed under the EUPL, Version 1.2 (the "License");
# You may not use this work except in compliance with the
# License.
# You may obtain a copy of the License at:
#
# https://joinup.ec.europa.eu/sites/default/files/custom-page/attachment/2020-03/EUPL-1.2%20EN.txt
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the License is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
#
# See the License for the specific language governing
# permissions and limitations under the License.
#
#' Manage VTL sessions
#'
#' @description
#' VTL Session
#'
#' @details
#' R6 Class for interacting with a VTL session instance.
#'
#' @export
VTLSession <- R6Class("VTLSession",
public = list(
#' @field name The name of this VTL session.
name = character(0),
#' @field text The temporary code buffer of this VTL session used by the editor.
text = "",
#' @description
#' Creates a new VTL session with a given name.
#' @details
#' This method should not be called by the application.
#' @param name
#' The name to identify this session
initialize = function (name = character(0)) {
if (!is.character(name) || length(name) != 1 || nchar(name) == 0)
stop("name must be a non-empty character vector with 1 element")
self$name <- name
},
#' @description
#' Terminates this VTL session.
#' @details
#' This method should not be called by the application.
finalize = function() {
finalized <- T
private$instance <- NULL
return(invisible())
},
#' @description
#' Check if this session was compiled.
#' @details
#' Returns \code{TRUE} if this VTL session has already been compiled.
isCompiled = function() { !is.null(private$instance) },
#' @description
#' Overrides the default print behaviour.
print = function() { print(self$name); return(invisible(self)) },
#' @description
#' Changes the editor text in the session buffer.
#' @param code
#' The editor code to associate this session
setText = function(code) { self$text <- code; return(invisible(self)) },
#' @description
#' Replace or add new VTL statements to this session and updates the session code buffer.
#' @param statements
#' The code to add to this session
#' @param restart
#' TRUE if old code must be discarded before adding the new
addStatements = function(statements, restart = T) {
if (restart) {
private$instance <- NULL
self$text <- ''
}
self$text = paste0(self$text, statements)
private$checkInstance()$addStatements(statements)
return(self)
},
#' @description
#' Compiles the VTL statements submitted for this session.
compile = function () { private$checkInstance()$compile() },
#' @description
#' Obtains a named list of all the VTL statements submitted for this session.
getStatements = function () { private$checkInstance()$getStatements() },
#' @description
#' Obtains a named list of all rules and values submitted for this session.
getNodes = function () {
if (is.null(private$instance))
return(list())
return(jdx::convertToR(private$checkInstance()$getNodes()))
},
#' @description
#' Returns a list of data frames containing the values of the named nodes defined in this session.
#' @param nodes
#' a list of names of nodes to compute from this session
getValues = function (nodes) {
jnodes <- sapply(X = nodes, private$checkInstance()$resolve)
nodesdf <- lapply(names(jnodes), FUN = function(x, jnodes, jstructs) {
jnode <- jnodes[[x]]
if (jnode %instanceof% "it.bancaditalia.oss.vtl.model.data.ScalarValue") {
df <- as.data.frame(list(Scalar = jnode$get()))
}
else if (jnode %instanceof% "it.bancaditalia.oss.vtl.model.data.DataSet") {
pager <- .jnew("it.bancaditalia.oss.vtl.util.Paginator",
.jcast(jnode, "it.bancaditalia.oss.vtl.model.data.DataSet"))
df <- convertToDF(tryCatch({ pager$more(-1L) }, finally = { pager$close() }))
role <- J("it.bancaditalia.oss.vtl.model.data.ComponentRole")
# handle the explicit conversions for data types that are not well supported by jri
if(pager$isToBeCast()){
casting = pager$getToBeCast()
if(!is.null(casting)){
casting = jdx::convertToR(casting)
for(x in names(casting)){
toType = casting[[x]]
if(toType == 'boolean'){
df[, x] = as.logical(df[, x])
}
else if(toType == 'date'){
df[, x] = as.Date(df[, x])
}
}
}
}
attr(df, 'measures') <- sapply(jnode$getComponents(attr(role$Measure, 'jobj')), function(x) { x$getName() })
attr(df, 'identifiers') <- sapply(jnode$getComponents(attr(role$Identifier, 'jobj')), function(x) { x$getName() })
}
else
stop(paste0("Unsupported result class: ", jnode$getClass()$getName()))
return (df)
}, jnodes, jstructs)
names(nodesdf) <- names(jnodes)
return(nodesdf)
},
#' @description
#' Returns a lineage for the value of the named node defined in this session.
#' @param alias
#' a name of a node to compute from this session
getLineage = function (alias) {
instance <- private$checkInstance()
jds <- instance$resolve(alias)
viewer <- new(J("it.bancaditalia.oss.vtl.util.LineageViewer"), jds)
matrix <- viewer$generateAdiacenceMatrix(instance)
df <- data.frame(source = matrix$getFirst(),
target = matrix$getSecond(),
value = sapply(matrix$getThird(), function (x) { x$longValue() }),
stringsAsFactors = F)
return(df)
},
#' @description
#' Creates a fore network representation of all nodes defined in this VTL session.
#' @param distance
#' The distance between dots
#' @param charge
#' The repelling force between dots
#' @importFrom igraph make_graph
getTopology = function(distance = 100, charge = -100) {
if (is.null(private$instance))
return(NULL)
jedges <- private$checkInstance()$getTopology()
edges <- .jcall(jedges, "[Ljava/lang/Object;","toArray")
inNodes <- sapply(edges[[1]], .jstrVal)
outNodes <- sapply(edges[[2]], .jstrVal)
allNodes <- unique(c(inNodes, outNodes))
statements <- sapply(private$checkInstance()$getStatements()$entrySet(),
function (x) setNames(list(x$getValue()), x$getKey()))
primitiveNodes <- allNodes[which(!allNodes %in% names(statements))]
primitives <- rep('PRIMITIVE NODE', times=length(primitiveNodes))
names(primitives) <- primitiveNodes
statements <- append(statements, primitives)
net = networkD3::igraph_to_networkD3(make_graph(c(rbind(outNodes, inNodes))))
net$links$value=rep(3, length(inNodes))
net$nodes$statement=as.character(statements[as.character(net$nodes$name)])
return(networkD3::forceNetwork(Links = net$links,
Nodes = net$nodes,
Source = 'source',
Target = 'target',
NodeID = 'name',
Group = 'statement',
Value = 'value',
linkDistance = distance,
charge = charge,
fontSize = 20,
opacity = 1,
zoom =T,
arrows = T,
opacityNoHover = 1,
clickAction = 'alert(d.group);',
bounded = T
))
}
),
private = list(
instance = NULL,
finalized = F,
checkInstance = function() {
if (private$finalized)
stop('Session ', self$name, ' was finalized')
else if (is.null(private$instance)) {
private$instance <- .jnew("it.bancaditalia.oss.vtl.impl.session.VTLSessionImpl")
}
return(invisible(private$instance))
}
)
)
as.character.VTLSession <- function(x, ...) { return(x$name) }
|
b75b17c0066e5612ce3408f0946324b4a8691113
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ggstatsplot/examples/subtitle_anova_parametric.Rd.R
|
9b314eb2a9a9cb841703595a40831d9d9219895e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 487
|
r
|
subtitle_anova_parametric.Rd.R
|
library(ggstatsplot)
### Name: subtitle_anova_parametric
### Title: Making text subtitle for the between-subject anova designs.
### Aliases: subtitle_anova_parametric
### ** Examples
# with defaults
subtitle_anova_parametric(
data = ggplot2::msleep,
x = vore,
y = sleep_rem,
k = 3
)
# modifying the defaults
subtitle_anova_parametric(
data = ggplot2::msleep,
x = vore,
y = sleep_rem,
effsize.type = "biased",
partial = FALSE,
var.equal = TRUE,
nboot = 10
)
|
552b22890485a4aaad0d3772e53ac17fccdeb108
|
04cbd24eb8b30234998386e79198875da960c541
|
/man/scale_max.Rd
|
8c55f9aedfaea5bb45676142a6bbd85c04ef942a
|
[] |
no_license
|
neuroimaginador/dl4ni
|
3ce88244575d1496a0575b01558affb6ce89c2ce
|
d0646921cdb6fb23f6b3b11e13fe345ebe12baba
|
refs/heads/master
| 2020-04-04T09:59:47.966496
| 2018-11-02T15:15:59
| 2018-11-02T15:15:59
| 155,838,762
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 303
|
rd
|
scale_max.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scales.R
\name{scale_max}
\alias{scale_max}
\title{FUNCTION_TITLE}
\usage{
scale_max(V)
}
\arguments{
\item{V}{(name) PARAM_DESCRIPTION}
}
\value{
OUTPUT_DESCRIPTION
}
\description{
FUNCTION_DESCRIPTION
}
\details{
DETAILS
}
|
86247e5d351fab952c3f79c75eb7ac5ecbc19351
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/DAKS/R/state2imp.r
|
22ab7840ef6559e91a090f585f124592a7623522
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 921
|
r
|
state2imp.r
|
##################
# tranformations #
##################
####################################################
# #
# This function transforms a set of knowledge #
# states to the corresponding set of implications. #
# #
####################################################
state2imp<-function(P){
#Base
base_list<-list()
for(i in 1:ncol(P)){
base_list[[i]]<-set(i)
tmp<-P[which(P[,i] == 1),]
for(j in (1:ncol(P))[-i]){
if(length(which(P[,i] == 1)) == 1){if(sum(tmp[j]) == 1){base_list[[i]]<-set_union(base_list[[i]], set(j))}}
if(length(which(P[,i] == 1)) > 1){if(sum(tmp[,j]) == nrow(tmp)){base_list[[i]]<-set_union(base_list[[i]], set(j))}}
}
}
imp<-set()
for(i in 1:ncol(P)){
for(j in 1:ncol(P)){
if(i != j && set_is_subset(base_list[[i]], base_list[[j]])){imp<-set_union(imp,set(tuple(i,j)))}
}
}
return(imp)
}
|
d67017cf35b1658da1bb0c1d73d420d5dde28f78
|
7a236c31e12686fb79fda5e4e5d1c1c4688e8068
|
/man/tumors.Rd
|
ba79c46a79276e72ef12b97e9ce988cf98c00612
|
[] |
no_license
|
cran/denoiseR
|
abbe62dc2b658d3d57216fd1447d237de63a6284
|
496a05fc8db5b7628d2432add4c60028bcb6a733
|
refs/heads/master
| 2021-01-09T20:38:00.611851
| 2020-02-26T06:10:09
| 2020-02-26T06:10:09
| 64,213,349
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 919
|
rd
|
tumors.Rd
|
\name{tumors}
\alias{tumors}
\docType{data}
\title{
Brain tumors data.
}
\description{
43 brain tumors and 356 continuous variables corresponding to the
expression data and 1 categorical variable corresponding to the type of tumors (4 types).
}
\usage{data(tumors)}
\format{
A data frame with 43 rows and 357 columns. Rows represent the tumors,
columns represent the expression and the type of tumor.
}
\details{
A genetic data frame.
}
\source{
M. de Tayrac, S. Le, M. Aubry, J. Mosser, and F. Husson. Simultaneous analysis of distinct omics data sets with integration of biological knowledge: Multiple factor analysis approach. BMC Genomics, 10(1):32, 2009.
}
\examples{
data(tumors)
\dontrun{
res.ada <- adashrink(tumors[, -ncol(tumors)], method = "SURE")
res.hcpc <- HCPC(as.data.frame(res.ada$mu.hat), graph=F, consol = FALSE)
plot.HCPC(res.hcpc, choice = "map", draw.tree = "FALSE")
}
}
\keyword{datasets}
|
f63868d88928ca2be34f6c67ddd607eb3880fa5d
|
f6a0dd0987b0286c22411eb4a57173838f714141
|
/metadata/bialystokMetadata.R
|
825a1c61d17dc47ea056651c8641d41deab94712
|
[] |
no_license
|
kontrabanda/mgr-project
|
735442f0360856dbd61e7d464d207532af2e98d4
|
f0bdeebcc7ab3f8493142b147627fbdfe54d670b
|
refs/heads/master
| 2021-09-03T02:26:36.676601
| 2018-01-04T22:31:16
| 2018-01-04T22:31:16
| 106,453,365
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,236
|
r
|
bialystokMetadata.R
|
library(raster)
library(sp)
library(latticeExtra)
library(RColorBrewer)
library(ggplot2)
bialystok <- shapefile("../../data/bialystok/bialystok.shp")
bialystok <- spTransform(bialystok, CRS("+init=epsg:4326"))
crime <- read.csv("../../data/Polska/zdarzenia_rsow.csv", sep = "|")
crime <- crime[!is.na(crime$LAT)&!is.na(crime$LNG), ]
# zmiana z , na . w danych (inaczej traktowane są jako string)
crime$LAT <- as.numeric(gsub(",", ".", gsub("\\.", "", crime[["LAT"]])))
crime$LNG <- as.numeric(gsub(",", ".", gsub("\\.", "", crime[["LNG"]])))
coordinates(crime) =~ LNG+LAT
projection(crime) = projection(bialystok)
## wybranie przestępstw tylko z Białegostoku
crimeBialystok <- intersect(crime, bialystok)
#### statystyki podstawowe
nrow(crimeBialystok@data)
ncol(crimeBialystok@data)
####
#### statystyki podstawowe kategorii
length(unique(crimeBialystok$KAT))
unique(crimeBialystok$KAT)
####
#### maksymalna i minimalna data
crimeBialystokDF <- data.frame(crimeBialystok)
crimeBialystokDF[which.max(crimeBialystok@data$DATA), ]
crimeBialystokDF[which.min(crimeBialystok@data$DATA), ]
####
#### stworzenie mapy Bialegostoku z przestępstwami jako pkt
png('../plot/metadata/bialystok_przestepstwa.png', width=2500, height=3000, res=300)
plot(bialystok, col='light green')
points(crimeBialystok, col='red', cex=1, pch='+')
dev.off()
####
#### stworzenie mapy z agregacją do poziomu osiedla
settlements <- aggregate(bialystok, by='jpt_nazwa_')
crimeWithSettlements <- over(crimeBialystok, settlements)
crimeWithSettlements <- cbind(data.frame(crimeBialystok), crimeWithSettlements)
sumBySettlements <- function(x) {
dat <- crimeWithSettlements$jpt_nazwa_ == x
sum(dat)
}
settlementsnames <- unique(settlements$jpt_nazwa_)
sumOfCrimesBySettlementName <- sapply(settlementsnames, sumBySettlements)
png('../plot/metadata/bialystok_osiedla_dotchart.png', width=2500, height=3000, res=300)
dotchart(tail(sort(sumOfCrimesBySettlementName), 10), cex=0.65)
dev.off()
sumOfCrimesBySettlementName1 <- data.frame(jpt_nazwa_=names(sumOfCrimesBySettlementName), sumOfCrimesBySettlementName)
settlementsWithSumOfCrimes <- merge(settlements, sumOfCrimesBySettlementName1, by='jpt_nazwa_')
my.palette <- brewer.pal(n = 9, name = "YlOrBr")
png('../plot/metadata/bialystok_osiedla.png', width=2500, height=3000, res=300)
spplot(settlementsWithSumOfCrimes, 'sumOfCrimesBySettlementName', col.regions = my.palette, cuts = 8)
dev.off()
####
#### liczba przestępstw z podziałem kategorii
categoryNames <- unique(crimeBialystokDF$KAT)
sumByCategory <- function(x) {
sum(crimeBialystokDF$KAT == x)
}
crimeInCategories <- sapply(categoryNames, sumByCategory)
crimeInCategories <- data.frame(NAME=categoryNames, crimeInCategories)
####
#### duplicates
crime <- read.csv("../../data/Polska/zdarzenia_rsow.csv", sep = "|")
crime <- crime[!is.na(crime$LAT)&!is.na(crime$LNG), ]
# zmiana z , na . w danych (inaczej traktowane są jako string)
crime$LAT <- as.numeric(gsub(",", ".", gsub("\\.", "", crime[["LAT"]])))
crime$LNG <- as.numeric(gsub(",", ".", gsub("\\.", "", crime[["LNG"]])))
drops <- c("ID")
test <- crime[ , !(names(crime) %in% drops)]
test1 <- test[duplicated(test), ]
sum(duplicated(test))
|
ae84ea1411e3b11042d7ed5f76b8ba0449ede652
|
d4a2668077fe1c2561e4fac54a1f3b36523fec3d
|
/R/IdentifiedORFswithSAPsAltINDELsIsoform.R
|
8802ad6bfc5e797bbc74e975eee94f4c8582c08b
|
[] |
no_license
|
saha-shyamasree/Proteomics
|
23c58cc00b812140e85638911f603b1737599151
|
3c07a069c87dcc1c09f2665da0ac29e055e40da2
|
refs/heads/master
| 2020-12-06T20:36:11.158420
| 2016-05-27T12:53:56
| 2016-05-27T12:53:56
| 24,229,701
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,508
|
r
|
IdentifiedORFswithSAPsAltINDELsIsoform.R
|
##This code takes list of known proteins, known proteins with SAPs and possible isoforms produced by 'IdentifyProteinIsoformSAP.py' and
##identify how many of these are found from peptide and protein identification.
source("D:/Code/Proteomics/R/RLib.R")
readList<-function(filepath)
{
as.matrix(read.csv(file=filepath, header=TRUE))
}
identifiedORFsClassUnknown<-function(identifiedORFs, knownProteinList, knownProteinSAPList, isoformList, isoformSAPList, outFile)
{
Mat=identifiedORFs[-(which(identifiedORFs[,'description'] %in% knownProteinList[,'ORF.Id'])),]
print(dim(Mat))
Mat=Mat[-(which(Mat[,'description'] %in% knownProteinSAPList[,'ORF.Id'])),]
print(dim(Mat))
Mat=Mat[-(which(Mat[,'description'] %in% isoformList[,'ORF.Id'])),]
print(dim(Mat))
Mat=Mat[-(which(Mat[,'description'] %in% isoformSAPList[,'ORF.Id'])),]
print(dim(Mat))
write.table(Mat,file=outFile,sep='\t',quote = FALSE,row.names = FALSE, col.names=TRUE)
}
#identifiedORFs is filtered and protein accession replaced by uniprot ids matrix of identified ORFs, knownProteinList is list of ORFs with exact match to an Uniprot protein
identifiedORFsClassKnownProtein<-function(identifiedORFs, knownProteinList, outFile)
{
Mat=identifiedORFs[which(identifiedORFs[,'description'] %in% knownProteinList[,'ORF.Id']),]
write.table(Mat,file=outFile,sep='\t',quote = FALSE,row.names = FALSE, col.names=TRUE)
}
identifiedORFsClassKnownProteinSAP<-function(identifiedORFs, knownProteinSAPList, outFile)
{
Mat=identifiedORFs[which(identifiedORFs[,'description'] %in% knownProteinSAPList[,'ORF.Id']),]
write.table(Mat,file=outFile,sep='\t',quote = FALSE,row.names = FALSE, col.names=TRUE)
}
identifiedORFsClassIsoform<-function(identifiedORFs, isoformList, outFile)
{
Mat=identifiedORFs[which(identifiedORFs[,'description'] %in% isoformList[,'ORF.Id']),]
index=match(Mat[,'description'],isoformList[,'ORF.Id'])
Mat=cbind(Mat,isoformList[index,'Type'])
write.table(Mat,file=outFile,sep='\t',quote = FALSE,row.names = FALSE, col.names=TRUE)
}
################################# 20-05-2015 ##############################################
#### PASA assembly for human adenovirus
##File Paths and other thresholding values
identifiedORFsDir="D:/data/Results/Human-Adeno/Identification/PASA/sORF/"
identifiedORFsFilename="pasa_assemblyV1+fdr+th+grouping+prt.csv"
knownProteinPath="D:/data/blast/blastCSV/human_adeno_mydb_pasa.assemblies_ORFs_knownProteinsV2.csv"
knownProteinSAPPath="D:/data/blast/blastCSV/human_adeno_mydb_pasa.assemblies_ORFs_knownProteinsSAPsV2.csv"
isoformPath="D:/data/blast/blastCSV/human_adeno_mydb_pasa.assemblies_ORFs_IsoformsV2.csv"
isoformSAPsPath="D:/data/blast/blastCSV/human_adeno_mydb_pasa.assemblies_ORFs_IsoformsSAPsV2.csv"
knownIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_knownProteinsV2.tsv")
knownSAPIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_knownSAPProteinsV2.tsv")
isoformIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_isoformProteinsV2.tsv")
isoformSAPsIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_isoformSAPsProteinsV2.tsv")
################################# 24-08-2015 ##############################################
#### PASA assembly for human adenovirus
##File Paths and other thresholding values
identifiedORFsDir="D:/data/Results/Human-Adeno/Identification/PASA/sORF/"
identifiedORFsFilename="pasa_assemblyV1+fdr+th+grouping+prt.csv"
knownProteinPath="D:/data/blast/blastCSV/PASA/Human-Adeno/human_adeno_mydb_pasa.assemblies_ORFs_knownProteinsV7.csv"
knownProteinSAPPath="D:/data/blast/blastCSV/PASA/Human-Adeno/human_adeno_mydb_pasa.assemblies_ORFs_knownProteinsSAPsV7.csv"
isoformPath="D:/data/blast/blastCSV/PASA/Human-Adeno/human_adeno_mydb_pasa.assemblies_ORFs_IsoformsV7.csv"
isoformSAPsPath="D:/data/blast/blastCSV/PASA/Human-Adeno/human_adeno_mydb_pasa.assemblies_ORFs_IsoformsSAPsV7.csv"
knownIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_knownProteinsV7.tsv")
knownSAPIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_knownSAPProteinsV7.tsv")
isoformIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_isoformProteinsV7.tsv")
isoformSAPsIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_isoformSAPsProteinsV7.tsv")
novelIdentifiedProteinsPath=paste(identifiedORFsDir,"pasa_assemblyV1_novelProteinsV7.tsv")
blastFile="human_adeno_mydb_pasa.assemblies_ORFsV1.csv"
blastDir="D:/data/blast/blastCSV/PASA/Human-Adeno/"
upper=0.000000000000000000000000000001
###################################### Reading matrices ###############################################################
identifiedORFs=proteinGrpFilterReplaceIds(identifiedORFsFilename,identifiedORFsDir,blastFile,blastDir,1,1,1,upper)
knownProteinList=readList(knownProteinPath)
knownProteinSAPList=readList(knownProteinSAPPath)
isoformList=readList(isoformPath)
isoformSAPsList=readList(isoformSAPsPath)
identifiedORFsClassUnknown(identifiedORFs, knownProteinList, knownProteinSAPList, isoformList, isoformSAPsList, novelIdentifiedProteinsPath)
identifiedORFsClassKnownProtein(identifiedORFs,knownProteinList,knownIdentifiedProteinsPath)
identifiedORFsClassKnownProteinSAP(identifiedORFs,knownProteinSAPList,knownSAPIdentifiedProteinsPath)
identifiedORFsClassIsoform(identifiedORFs,isoformList,isoformIdentifiedProteinsPath)
identifiedORFsClassIsoform(identifiedORFs,isoformSAPsList,isoformSAPsIdentifiedProteinsPath)
|
008cb6a21dd93477fcd63e47cf9910131ee90581
|
3b0bc265b1c2ebed261cb7d93a3bf778e7e286f7
|
/wine_ratings.R
|
8d22256d9fd7b0379881cc79ef7af4ceb7afec7c
|
[] |
no_license
|
tknoch8/ml_projects
|
942285f015410059468a3a407cbc8e22c45d618e
|
d7a0a2551c560668cc0d024f6dcbff71041d169e
|
refs/heads/master
| 2023-01-24T19:53:51.498566
| 2020-12-04T22:17:44
| 2020-12-04T22:17:44
| 276,287,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 171
|
r
|
wine_ratings.R
|
require(tidyverse)
wine_ratings <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-05-28/winemag-data-130k-v2.csv")
|
12bd4ce0fbb25242595f86f1460fe28732e90d55
|
450cf51141602b88597d17dc8daa0170f3f1dba2
|
/data-raw/HCP_overview.R
|
e03d866fe3da93439b615761143c57d04707fbf8
|
[] |
no_license
|
jacob-ogre/ecosscraper
|
6b16b133738076cb75e1336c28dfc932e1823496
|
63bafcc0213c52a2d2620cc1d17ef290d150d13b
|
refs/heads/master
| 2021-04-30T23:24:42.375356
| 2018-01-23T16:07:03
| 2018-01-23T16:07:03
| 61,709,830
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,656
|
r
|
HCP_overview.R
|
library(ecosscraper)
library(ggplot2)
library(ggthemes)
library(stringr)
library(viridis)
data(HCP_SHA)
data(CCAA)
data(CCA)
data(state)
dim(HCP_SHA)
names(HCP_SHA)
names(HCP_SHA)[12] <- "FR_Date"
names(HCP_SHA)[13] <- "Date_Agmt_Permit"
names(HCP_SHA)[15] <- "Date_Agmt_Permit_Expired"
names(HCP_SHA)[21] <- "Non_Listed_Species"
names(HCP_SHA)[24] <- "Area_Covered"
names(HCP_SHA)[25] <- "Area_Enrolled"
names(HCP_SHA)[30] <- "FR_Documents"
names(CCAA)
names(CCAA)[10] <- "FR_Date"
names(CCAA)[11] <- "Date_Agmt_Permit"
names(CCAA)[13] <- "Date_Agmt_Permit_Expired"
names(CCAA)[21] <- "Area_Covered"
names(CCAA)[22] <- "Area_Enrolled"
names(CCAA)[27] <- "FR_Documents"
names(CCA)
names(CCA)[9] <- "Date_Agmt_Permit"
names(CCA)[11] <- "Date_Agmt_Permit_Expired"
names(CCA)[16] <- "Area_Covered"
names(CCA)[17] <- "Area_Enrolled"
names(CCA)[22] <- "FR_Documents"
shared <- intersect(names(HCP_SHA), intersect(names(CCAA), names(CCA)))
shared
HCP_sub <- HCP_SHA[shared]
CCAA_sub <- CCAA[shared]
CCA_sub <- CCA[shared]
cons_agmt <- rbind(HCP_sub, CCAA_sub, CCA_sub)
dim(cons_agmt)
names(cons_agmt)
summary(as.Date(cons_agmt$Date_Agmt_Permit, format = "%m/%d/%Y"))
summary(as.Date(cons_agmt$Date_Agmt_Permit_Expired, format = "%m/%d/%Y"))
devtools::use_data(cons_agmt)
################################################
# Now to get to the overview
# how many docs are linked?
outs <- strsplit(x = cons_agmt$Outlinks, split = "|", fixed = TRUE)
n_outs <- lapply(outs, length)
hist(unlist(n_outs))
sum(unlist(n_outs > 0))
length(unlist(outs))
# Summary by agreement type
table(cons_agmt$Agreement_Type)
# Summary by FWS_region
table(cons_agmt$USFWS_Regions)
region <- strsplit(x = cons_agmt$USFWS_Regions, split = " , ")
cons_agmt$Region_ls <- region
table(unlist(region))
table(cons_agmt$USFWS_Regions, cons_agmt$Agreement_Type)
# I need to expand the agreement type and region list to a df to table...
# By ES/field office
tab_ESO <- table(cons_agmt$USFWS_Field_Offices)
head(sort(tab_ESO, decreasing = TRUE))
# HCPs and SHAs by NEPA process
table(HCP_SHA$NEPA_Process)
##################################
# now some date conversions and quick glances
tmp <- as.Date(HCP_SHA$FR_Date, format="%m/%d/%Y")
t2 <- as.numeric(tmp)
t3 <- data.frame(t2)
ggplot(t3, aes(x = t2)) +
geom_histogram() +
theme_bw()# Need to add date xlabs
HCP_SHA$FR_Date <- tmp
tmp <- as.Date(cons_agmt$Date_Agmt_Permit, format="%m/%d/%Y")
summary(tmp)
t2 <- as.numeric(tmp)
t3 <- data.frame(Numeric_Date = t2, Agreement_Type = cons_agmt$Agreement_Type)
# A quick plot
ggplot(t3, aes(x = Numeric_Date, fill = Agreement_Type)) +
geom_histogram() +
theme_bw() +
theme(legend.position = "bottom") +
scale_fill_viridis(discrete = TRUE)
# Need to add date xlabs and clean up, e.g., legend
cons_agmt$Date_Agmt_Permit <- tmp
summary(cons_agmt$Date_Agmt_Permit)
tmp <- as.Date(HCP_SHA$Date_Application_Withdrawn, format="%m/%d/%Y")
unique(tmp)
HCP_SHA$Date_Application_Withdrawn <- tmp
tmp <- as.Date(cons_agmt$Date_Agmt_Permit_Expired, format="%m/%d/%Y")
summary(tmp)
cons_agmt$Date_Agmt_Permit_Expired <- tmp
t2 <- as.numeric(tmp)
hist(t2)
tmp <- as.Date(HCP_SHA$Date_Permit_Denied, format="%m/%d/%Y")
unique(tmp)
HCP_SHA$Date_Permit_Denied <- tmp
tmp <- as.Date(HCP_SHA$Date_Permit_Suspended, format="%m/%d/%Y")
unique(tmp)
HCP_SHA$Date_Permit_Suspended <- tmp
tmp <- as.Date(HCP_SHA$Date_Permit_Revoked, format="%m/%d/%Y")
unique(tmp)
HCP_SHA$Date_Permit_Revoked <- tmp
tmp <- HCP_SHA[!is.na(HCP_SHA$Date_Permit_Revoked), ]
glimpse(tmp) # The HCP that was tossed by a court last year
###########################################################################
# Analysis of species: HCP/SHA
tmp <- strsplit(HCP_SHA$Listed_Species,
split = "|",
fixed = TRUE)
t2 <- lapply(tmp, FUN = str_trim)
head(t2)
HCP_SHA$Listed_Species_ls <- t2
t3 <- lapply(HCP_SHA$Listed_Species_ls, FUN = length)
hist(unlist(t3), breaks = 20)
spp_covered <- unlist(HCP_SHA$Listed_Species_ls)
spp_cov_tab <- table(spp_covered)
head(sort(spp_cov_tab, decreasing = TRUE), 10)
tmp <- strsplit(HCP_SHA$Non_Listed_Species,
split = "|",
fixed = TRUE)
t2 <- lapply(tmp, FUN = str_trim)
head(t2)
HCP_SHA$Non_Listed_Species_ls <- t2
spp_covered <- unlist(HCP_SHA$Non_Listed_Species_ls)
spp_cov_tab <- table(spp_covered)
head(sort(spp_cov_tab, decreasing = TRUE), 10)
###########################################################################
# Habitat
length(unique(cons_agmt$Habitat))
sum(cons_agmt$Habitat != "", na.rm = TRUE)
hab_tab <- table(cons_agmt$Habitat)
head(sort(hab_tab, decreasing = TRUE), 8)
# There's something here I think, but probably need to do some NLP on these...
###########################################################################
# States
st_un <- unique(cons_agmt$States)
res <- list()
for(i in 1:length(cons_agmt$States)) {
st <- cons_agmt$States[i]
if(any(grepl(pattern = st, x = state.name))) {
res[[i]] <- st
} else {
sts <- c()
for(j in state.name) {
if(grepl(x = st, pattern = j)) {
sts <- c(sts, j)
}
}
if(length(sts) > 0) {
res[[i]] <- sts
} else {
res[[i]] <- st[i]
}
}
}
table(unlist(res))
cons_agmt$States_ls <- res
###########################################################################
# Location
locs <- cons_agmt$Location
loc_patt <- str_match_all(locs, pattern = "[A-Za-z\\ ]+\\ (Co\\.|County)")
res <- list()
for(i in 1:length(loc_patt)) {
res[[i]] <- str_trim(loc_patt[[i]][,1])
}
res2 <- lapply(res, gsub, pattern = "Co\\.", replacement = "County")
head(res2)
res3 <- str_match_all(res2, pattern = "(\\w+ \\w+|\\w+) County")
head(res3)
res4 <- list()
for(i in 1:length(res3)) {
res4[[i]] <- str_trim(res3[[i]][,1])
}
head(res4)
# res5 <- c()
# for(i in 1:length(res4)) {
# res5 <- c(res5, str_match_all(res4[[i]], pattern = "^\\w+"))
# }
# head(res5)
# unique(unlist(res5))
# head(sort(table(unlist(res5)), decreasing = TRUE), 50)
clean_loc <- lapply(res4, gsub,
pattern = "^in |^Subdivision |^and |^of |^CA |^III |^northern |^southwestern |^City |^within |^adn |^central |^northeast |^northen |^s |^SC |^the |^through |^Eat",
replacement = "")
head(clean_loc, 50)
loc2 <- unlist(res2)
length(unique(loc2))
loc_tab <- table(loc2)
head(sort(loc_tab, decreasing = TRUE))
loc_tab2 <- table(unlist(clean_loc))
head(sort(loc_tab2, decreasing = TRUE))
cons_agmt$Location_ls <- clean_loc
###########################################################################
# Area covered
# unique(cons_agmt$Area_Covered)
ac <- cons_agmt$Area_Covered
head(ac)
ac_patt <- str_match_all(cons_agmt$Area_Covered, pattern = "[0-9\\.]+ acres")
head(ac_patt)
ac_pat2 <- lapply(ac_patt, gsub, pattern = "\\ acres", replacement = "")
head(ac_pat2)
ac_num <- lapply(ac_pat2, as.numeric)
ac_len <- lapply(ac_num, length)
cons_agmt$Acres_Covered_num <- ac_num
sum(unlist(ac_num), na.rm = TRUE) #??? 1.2B acres! How to account for overlap?
hist(unlist(ac_num))
summary(unlist(ac_num))
sub_ac <- unlist(ac_num)
sub_ac <- sub_ac[sub_ac < 100000]
sub_ac <- sub_ac[sub_ac < 100]
hist(sub_ac, breaks = 50)
# huge <- cons_agmt[grep(x = cons_agmt$Area_Covered, pattern = "1611"), ]
# huge$Plan_Name
###########################################################################
# Area enrolled
length(unique(cons_agmt$Area_Enrolled))
enroll_un <- unique(cons_agmt$Area_Enrolled)
enroll_un[!grepl(pattern = "^Data not avail", x = enroll_un)]
# nothing to see
###########################################################################
# Land use
LU <- cons_agmt$Land_Use
unique(LU)
# probably amenable to a bag-of-words analysis, but unstructured here...
###########################################################################
# Durations of agreements
unique(cons_agmt$Duration)
yr_mo <- str_match_all(string = cons_agmt$Duration,
pattern = "[0-9]+\\ years,\\ [0-9]+ months")
yr_mo <- unlist(yr_mo)
head(yr_mo)
table(yr_mo)
dur_yr <- unlist(str_match_all(string = yr_mo, pattern = "^[0-9]+"))
head(dur_yr)
dur_yr <- as.numeric(dur_yr)
hist(dur_yr[dur_yr < 999], breaks = 50)
cons_agmt$Duration_Years <- dur_yr
dur_mo <- unlist(gsub(pattern = ", ", replacement = "",
x = str_match_all(string = yr_mo, pattern = ", [0-9]+")))
head(dur_mo)
dur_mo <- as.numeric(dur_mo)
cons_agmt$Duration_Months <- dur_mo
###########################################################################
# Re-write the data
head(data.frame(cons_agmt))
summary(cons_agmt$Date_Agmt_Permit)
devtools::use_data(cons_agmt, overwrite = TRUE)
|
50a3d56a0fc80f818ab4ff37d4099a9b8a17cf8f
|
c71bb83278a21d2be8ce99ee7560783b762d1d8f
|
/man/taxaaccum.Rd
|
e1ac6dedb68093702f1a041f4eff1f75c62c67ed
|
[] |
no_license
|
hhsieh/biotaxa_Rpackage
|
bc6b6768263984855c372357abe1337e2f99d0e6
|
fbfdae55a1e92a754dfe9120e4132a80728407f3
|
refs/heads/master
| 2018-09-18T21:39:55.095438
| 2018-06-06T06:59:38
| 2018-06-06T06:59:38
| 103,230,516
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 494
|
rd
|
taxaaccum.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxaaccum.R
\name{taxaaccum}
\alias{taxaaccum}
\title{Report the accumulative numbers of a rank of a given taxa overtime}
\usage{
taxaaccum(taxa, rank)
}
\arguments{
\item{taxa}{A string.}
\item{rank}{A string.}
}
\value{
accumulation curve of \code{rank} of a \code{taxa}
}
\description{
Report the accumulative numbers of a rank of a given taxa overtime
}
\examples{
\dontrun{
taxaaccum("Animalia", "Phylum")
}
}
|
5f9f074161831af1bd46671556addd40ee0b1437
|
2c59b1e35becfa9a24751ca29eb120ef38e8a6a2
|
/man/grid_one_raster.Rd
|
28c321f143115f877aa7e7c8e28c22a56ad3b999
|
[] |
no_license
|
land-4-bees/beecoSp
|
c81af0c1b64eff9923e32e5fdc5a5d72da72deb2
|
91fdd28b56fa23b0d1fc17df7adc52aed7e5b720
|
refs/heads/master
| 2023-04-15T21:36:23.236685
| 2023-02-23T21:07:08
| 2023-02-23T21:07:08
| 120,662,758
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,339
|
rd
|
grid_one_raster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grid_one_raster.R
\name{grid_one_raster}
\alias{grid_one_raster}
\title{Split regional or national raster into gridded tiles}
\usage{
grid_one_raster(
rasterpath,
rasterID,
regionalextent = NA,
div,
buffercells = c(0, 0),
NAvalue,
writetiles = T,
tiledir
)
}
\arguments{
\item{rasterpath}{file path of input raster file}
\item{rasterID}{text string to identify output tiles (e.g. CDL for NASS Cropland Data Layer)}
\item{regionalextent}{optional, vector of state names to crop national raster to region of interest}
\item{div}{division factor specifying the number of tiles in x and y dimensions}
\item{buffercells}{number of cells overlapping between adjacent tiles}
\item{NAvalue}{No data or background value of input raster}
\item{writetiles}{logical, write tiles as directory of .tif files?}
\item{tiledir}{path to directory where output tiles should be saved}
}
\value{
A list of raster tiles.
}
\description{
Split regional or national raster into gridded tiles
}
\details{
NAvalue parameter is used to identify raster tiles that are all background (e.g. areas of open water). These background tiles are excluded from list of output tiles and tile directory.
}
\keyword{bees}
\keyword{ecology}
\keyword{landscape}
\keyword{spatial}
|
46193012fa1ef4a2d5223386f5cbc7aa03ffc08d
|
58f3548b037a642c3f771d0e19838339a47b5d77
|
/quora/R/caret.R
|
0000de5ff91b4c88d9923ba64a9f9bb8c67634de
|
[] |
no_license
|
mkearney/competitions_kaggle
|
c99e8333d4193e3fa1edb10b5b3286294a4f2b7c
|
bc968ed349e5d60ebd92927c1709b27cfeb5d180
|
refs/heads/master
| 2021-06-16T06:58:07.959313
| 2017-05-18T01:28:07
| 2017-05-18T01:28:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 457
|
r
|
caret.R
|
library(caret)
traindata <- na.omit(e)[, -1]
trainclass <- na.omit(e)[, 1]
trainclass$is_duplicate <- factor(trainclass$is_duplicate)
mod_caret <- train(is_duplicate ~ .,
data = data.frame(na.omit(e)),
method = "binda",
lambda.freqs = .01,
preProcess = c("log"))
summary(mod_caret)
?getModelInfo()
library(randomForest)
model <- randomForest(taste ~ . - quality, data = train)
|
5723d0b3a48cb8d3dccc145d3fb1ea84e54f329d
|
6c76ee43bf182f66205a03a24d15e628d6a9c277
|
/StatQuest Lasso Ridge ElastN.R
|
c5e7fddca47a336c31e9be3849fbd5c492086fe0
|
[] |
no_license
|
Ismail17stats/Machine-Learning-
|
74159901ea93233f503a4a6aafd1e2246e6d1802
|
5a7f7c8f0c66ab7ab63255c1610dd930f74f7f1a
|
refs/heads/main
| 2023-06-27T16:58:06.548379
| 2021-08-02T13:13:33
| 2021-08-02T13:13:33
| 380,471,272
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,108
|
r
|
StatQuest Lasso Ridge ElastN.R
|
#StatQuest !!!!
library(glmnet)
set.seed(12345)
### Setting up our data
n <- 1000 # made up data set will have 1000 samples
p <- 5000 # parameters to estimate
real_p <- 15 # only 15 parameters will help us predict the outcome
x <- matrix(rnorm(n*p), nrow = n,ncol = p)
# Now we will create a vector y that we will try to predict with the data in x
y <- apply(x[,1:real_p],1,sum)+rnorm(n) # call to apply will return a vector of values that depends on the first 15 columns in x
# To summarize this first part:
### x is a matrix of data that we will use Ridge, Lasso and Elastic Net to predict the values in y
### Partitioning
index = sample(1:n, 0.7*n)
x.train = x[index,] # Create the training data
x.test = x[-index,] # Create the test data
y.train = y[index]
y.test = y[-index]
### Ridge regression
# The first thing we need to do is fit a model to the training data
# cv: using cross validation to find the optimal values for lambda
# we want to use x.train to predict y.train
alpha0.fit <- cv.glmnet(x.train, y.train, type.measure = "mse", alpha = 0
, family ="gaussian")
# now we will use the predict() to apply alpha0.fit to the testing data
# s: is the size of the penalty which is set to one of the optimal values for lambda stored in alpha0.fit
alpha0.predicted <- predict(alpha0.fit, s = alpha0.fit$lambda.1se, newx = x.test)
# NOTE: we are setting s to lambda.1se which is the value of lambda that resulted in
# the simplest model(the model with fewest non zero parameters) and was withtin 1 standard error of the lambda
# that had the smallest sum.
# NOTE2: Alternatively we could set s to be lambda.min, which would be the lambda that resulted in the smallest sum
# However, in this example we will use lambda.1se because, in a statistical sense, it is indistinguishable from lambda.min, but it results in a model with
# fewer parameters
# Okay let's move on
# Now we calculate the mean squared error of the difference between the true values stored in y.test
# and the predicted values stored in alpha0.predicted
mean((y.test - alpha0.predicted)^2)# MSE
### Lasso regression
# just like before we call cv.glmnet() to fit a linear regression (10 fold cv by default) to determine the optimal value of lambda
alpha1.fit <- cv.glmnet(x.train, y.train, type.measure = "mse", alpha = 1
, family ="gaussian")
# call the predict function just like before using alpha1.fit
alpha1.predicted <- predict(alpha1.fit, s = alpha1.fit$lambda.1se, newx = x.test)
# MSE
mean((y.test - alpha1.predicted)^2)
# We got a much smaller value than in Ridge, so Lasso regression is much better with this data than Ridge regression
### Elastic Net regression (which combines both Ridge and Lasso penalties)
alpha0.5.fit <- cv.glmnet(x.train, y.train, type.measure = "mse", alpha = 0.5
, family ="gaussian")
alpha0.5.predicted <- predict(alpha0.5.fit, s = alpha0.5.fit$lambda.1se, newx = x.test)
# MSE
mean((y.test - alpha0.5.predicted)^2)
# slightly larger than the value we got from Lasso regression, so in this case Lasso wins
# HOWEVER
# we need to try a bunch of different values from alpha
# I am going to create a list that will store a bunch of Elastic Net regression fits
list.of.fits <- list()
# create a for-loop to try different values for alpha and fit those values
for(i in 0:10){
fit.name <- paste0("alpha", i/10)
list.of.fits[[fit.name]]<-
cv.glmnet(x.train, y.train, type.measure = "mse", alpha = i/10, family = "gaussian")
}
# we will also create an empty data frame to store the MSE and other things
results <- data.frame()
# Then another for-loop to predict values using the testing dataset in order to find the MSE
for(i in 0:10){
fit.name <- paste0("alpha", i/10)
predicted <-
predict(list.of.fits[[fit.name]], s = list.of.fits[[fit.name]]$lambda.1se, newx = x.test)
mse <- mean((y.test - predicted)^2)
temp <- data.frame(alpha = i/10, mse = mse, fit.name = fit.name)
results <- rbind(results, temp)
}
results
## seems like Lasso still wins
|
4b9a9b857a9ad9caaa2fc9d88f49176ba538e627
|
7670edeaa1ae7fec20d66b174d5b6d7c8fe50318
|
/create_mask.R
|
7af48bad2cf1f68253f3ec0290e5e605e7814b04
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
PalEON-Project/composition
|
ed4174da595df34d4c75e73b2179111fe8436a09
|
0deef92068a1cee59f965bb29bbebad8bab5a2fe
|
refs/heads/master
| 2021-01-19T22:33:29.118627
| 2016-02-05T03:12:50
| 2016-02-05T03:12:50
| 17,234,057
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,795
|
r
|
create_mask.R
|
#!/usr/bin/Rscript
# code to create netCDF file with Paleon mask for Paleon Albers grid
source('config')
require(raster)
require(ncdf4)
region = t(as.matrix(raster(file.path(dataDir, 'paleonDomain.tif'))))
water = t(as.matrix(raster(file.path(dataDir, 'water.tif'))))
region[is.na(region)] <- 0
domain <- region > 0
source(file.path(codeDir, 'set_domain.R'))
# image.plot(1:296, 1:180, region[,180:1])
x_dim <- ncdim_def("x", "meters_east", xGrid, longname = 'x coordinate of grid cell centroid in Albers projection (Great Lakes St Lawrence Albers [Proj4 +init=epsg:3175])')
y_dim <- ncdim_def("y", "meters_north", yGrid, longname = 'y coordinate of grid cell centroid in Albers projection (Great Lakes St Lawrence Albers [Proj4 +init=epsg:3175])')
vars <- list()
vars[[1]] <- ncvar_def(name = 'water', dim = list(x_dim, y_dim),
units = 'unitless (proportion from 0 to 1)',
longname = 'percentage of water in grid cell',
prec = 'integer')
vars[[2]] <- ncvar_def(name = 'subregion', dim = list(x_dim, y_dim),
units = 'unitless (region index)',
longname = 'PalEON subregions',
prec = 'integer')
vars[[3]] <- ncvar_def(name = 'domain', dim = list(x_dim, y_dim),
units = 'unitless (0/1 indicator)',
longname = 'indicator of whether grid cell is in PalEON domain',
prec = 'integer')
ptr <- nc_create(file.path(dataDir, 'paleonMask.nc'), vars)
ncvar_put(ptr, 'water', water[ , dim(water)[2]:1], c(1, 1), c(-1, -1))
ncvar_put(ptr, 'subregion', region[ , dim(region)[2]:1], c(1, 1), c(-1, -1))
ncvar_put(ptr, 'domain', domain[ , dim(domain)[2]:1], c(1, 1), c(-1, -1))
nc_close(ptr)
|
b12bb6e43928e647defee3c8de8536e192ebd72d
|
dd2da0383712af02a76300c92656124160848aee
|
/R/assesmentstringprocessing.R
|
c018c73da8c651f2f6c9b6378c194098ee07c66a
|
[] |
no_license
|
mvillamea/DataScience-Exercises
|
1f429cf839f09db2a98442f1ed0c383dd1f71aad
|
38d73468cdd6a49333782fd15973aff0a15fa82d
|
refs/heads/master
| 2023-02-21T15:15:24.630373
| 2021-01-21T20:21:40
| 2021-01-21T20:21:40
| 279,113,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,246
|
r
|
assesmentstringprocessing.R
|
s <- c("5'10", "6'1\"", "5'8inches", "5'7.5")
tab <- data.frame(x = s)
#allow you to put the decimals in a third column called “decimal”. In this code, you extract three groups: one digit for “feet”, one or two digits for “inches”, and an optional decimal point followed by at least one digit for “decimal”.
extract(data = tab, col = x, into = c("feet", "inches", "decimal"),
regex = "(\\d)'(\\d{1,2})(\\.\\d+)?")
#pruebas para hacer tablas
schedule<- matrix(c("day", "staff", "Monday", "Mandy, Chris and Laura", "Tuesday", "Steve, Ruth and Frank"),ncol=2,byrow=TRUE)
schedule<-as.table(schedule)
#You have the following table, schedule:
>schedule
day staff
Monday Mandy, Chris and Laura
Tuesday Steve, Ruth and Frank
schedule<-data.frame(day=c("Monday", "Tuesday"), staff=c("Mandy, Chris and Laura", "Steve, Ruth and Frank"))
#2 commands would properly split the text in the “staff” column into each individual name
#This regex will correctly split each “staff” string into three names by properly accounting for the space after the comma as well as the spaces before and after the “and”, but it’s not the only one.
str_split(schedule$staff, ", | and ")
#This regex command is the same as the one above, except that the spaces are written as \\s, but it’s not the only one.
str_split(schedule$staff, ",\\s|\\sand\\s")
#code that would successfully turn your “Schedule” table into the following tidy table
> tidy
day staff
<chr> <chr>
Monday Mandy
Monday Chris
Monday Laura
Tuesday Steve
Tuesday Ruth
Tuesday Frank
tidy <- schedule %>%
mutate(staff = str_split(staff, ", | and ")) %>%
unnest()
#Using the gapminder data, you want to recode countries longer than 12 letters in the region “Middle Africa” to their abbreviations in a new column, “country_short”.
dat <- gapminder %>% filter(region == "Middle Africa") %>%
mutate(country_short = recode(country,
"Central African Republic" = "CAR",
"Congo, Dem. Rep." = "DRC",
"Equatorial Guinea" = "Eq. Guinea"))
#otro ej
#Import raw Brexit referendum polling data from Wikipedia:
library(rvest)
library(tidyverse)
library(stringr)
url <- "https://en.wikipedia.org/w/index.php?title=Opinion_polling_for_the_United_Kingdom_European_Union_membership_referendum&oldid=896735054"
tab <- read_html(url) %>% html_nodes("table")
polls <- tab[[5]] %>% html_table(fill = TRUE)
#Update polls by changing the column names to c("dates", "remain", "leave", "undecided", "lead", "samplesize", "pollster", "poll_type", "notes") and only keeping rows that have a percent sign (%) in the remain column.
polls<-polls%>% rename(
"dates"="Date(s) conducted",
"remain"="Remain" ,
"leave"="Leave" ,
"undecided"="Undecided",
"lead"="Lead" ,
"samplesize"="Sample" ,
"pollster"="Conducted by",
"poll_type"="Polling type",
"notes"="Notes"
) %>%filter(str_detect(remain, "^\\d{1,2}?\\.?\\d?\\%$"))
#The remain and leave columns are both given in the format "48.1%": percentages out of 100% with a percent symbol.
#These commands converts the remain vector to a proportion between 0 and 1
as.numeric(str_replace(polls$remain, "%", ""))/100 #cambia el % por un espacio y al resultado lo divide por 100
parse_number(polls$remain)/100 #drops any non-numeric characters before or after the first number and then divides by 100.
#convert "N/A" in the undecided column to 0
str_replace(polls$undecided, "N/A", "0")
#extracts the end day and month when inserted into the blank in the code above
temp <- str_extract_all(polls$dates, _____)
end_date <- sapply(temp, function(x) x[length(x)]) # take last element (handles polls that cross month boundaries)
temp <- str_extract_all(polls$dates, "\\d+\\s[a-zA-Z]+")
end_date <- sapply(temp, function(x) x[length(x)])
temp <- str_extract_all(polls$dates, "[0-9]+\\s[a-zA-Z]+")
end_date <- sapply(temp, function(x) x[length(x)])
temp <- str_extract_all(polls$dates, "\\d{1,2}\\s[a-zA-Z]+")
end_date <- sapply(temp, function(x) x[length(x)])
temp <- str_extract_all(polls$dates, "\\d+\\s[a-zA-Z]{3,5}")
end_date <- sapply(temp, function(x) x[length(x)])
|
e6dfe711c0af41fd92113e4a7c90d6e8d491376b
|
f839b94d8de824a4a2ff7d84f4daecb1540abd17
|
/scripts/main-data-cleaning.R
|
c5d833c46aea917c45d638c9bb2ba034507eaf3b
|
[] |
no_license
|
dxre-v3/college-scorecard
|
de6117f23ec47378f4ebf640882ffb06f2446e42
|
e9dcd88a3b3221f177998e9cfbe0ed9c4c8847dc
|
refs/heads/master
| 2022-10-09T18:51:06.469948
| 2020-06-12T16:44:51
| 2020-06-12T16:44:51
| 262,709,849
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,627
|
r
|
main-data-cleaning.R
|
library(tidyverse)
library(janitor)
# Read in main data set
initial_read <- read_csv('data/unprocessed/Most-Recent-Cohorts-All-Data-Elements.csv',
na = c("", "NA", "NULL"))
privsupress <- problems(initial_read)
terms <- read_rds('data/processed/institutions.rds')
privsupress <- privsupress %>%
left_join(terms, by = c("col" = "variable_name")) %>%
mutate(recorded = "NA") %>%
select(name_of_data_element, variable_name = col, api_data_type, recorded, actual)
# Keeping track of how things are saved
# ============== Only re run if necessary ======================
# write_rds(privsupress, 'data/processed/privacy_supressed.rds')
#================================================================
# Try with several custom NA values ---------------------------------------
second_read <- read_csv('data/unprocessed/Most-Recent-Cohorts-All-Data-Elements.csv',
na = c("", "NA", "NULL", "PrivacySuppressed"))
# New Problems
problems(second_read) %>%
left_join(terms, by = c("col" = "variable_name")) %>%
select(name_of_data_element, variable_name = col, api_data_type, expected, actual) %>%
view()
# Change guess max size ---------------------------------------------------
colleges <- read_csv('data/unprocessed/Most-Recent-Cohorts-All-Data-Elements.csv',
na = c("", "NA", "NULL", "PrivacySuppressed"),
guess_max = 6000)
# Our data is perfectly fine now, but we need to get the categories we actual want to work with
# To do that we are actually going to spend a bit of time with our terms dataset
# Variable choices --------------------------------------------------------
terms %>% count(dev_category)
# Why do we have a bunch of NA values?
terms %>%
filter(is.na(dev_category)) %>%
view
# It looks like they are the levels of certain variables. I will fill them later
# if need be. For now, they are fine.
# Root variables ----------------------------------------------------------
terms %>%
filter(dev_category == "root") %>%
view
# Useful = UNITID, LATITUDE, LONGITUDE
# Repayment ---------------------------------------------------------------
terms %>%
filter(dev_category == "repayment") %>%
view
term_small <- terms %>%
select(name_of_data_element, dev_category, variable_name, label)
# consider all the variables to select the most interesting ones -----------------
term_small %>% filter(dev_category == "academics") %>% view
term_small %>% filter(dev_category == "admissions") %>% view
term_small %>% filter(dev_category == "aid") %>% view
term_small %>% filter(dev_category == "completion") %>% view
term_small %>% filter(dev_category == "cost") %>% view
term_small %>% filter(dev_category == "earnings") %>% view
term_small %>% filter(dev_category == "repayment") %>% view
term_small %>% filter(dev_category == "root") %>% view
term_small %>% filter(dev_category == "school") %>% view
term_small %>% filter(dev_category == "student") %>% view
# create sets of strings to save the interesting columns
academics <- str_c("PCIP", c(13, 14, 23, 24, 26:28, 40, 42, 45, 50:54))
select_1 <- c("ADM_RATE", "SAT_AVG", "PCTPELL", "PCTFLOAN", "GRAD_DEBT_MDN",
"C150_L4", "NPT4_PUB", "NPT4_PRIV", "COMPL_RPY_5YR_RT", "UNITID",
"LONGITUDE", "LATITUDE", "MD_EARN_WNE_P6", "MN_EARN_WNE_P6",
"COUNT_NE_P6", "COUNT_WNE_P6")
earnings <- str_match(term_small$variable_name, ".*_P6")
earnings <- earnings[!is.na(earnings)]
school <- c("INSTNM", "CITY", "STABBR", "SCH_DEG", "NUMBRANCH",
"PREDDEG", "CONTROL", "LOCALE", "RELAFFIL", "AVGFACSAL",
"PFTFAC", "ICLEVEL", "TUITFTE", "WOMENONLY", "MENONLY", "PBI",
"HBCU", "ANNHI", "TRIBAL", "AANAPII", "NANTI")
# Student has a lot of matches so we will create it's own data set to filter from
student_set <- term_small %>% filter(dev_category == "student")
#
# Save the regex as a string and then filter for the names
match_vars <- "(^PAR_ED.*)|(^UGDS_.*)|(^APPL_.*)|(^DEP_INC.*)|(^FSEND.*)"
par <- str_match(student_set$variable_name, match_vars)
par <- par[!is.na(par)]
# add those names to the other chosen varaibles and save
student <- c("RET_FT4", "UG25ABV", "AGE_ENTRY", "FEMALE",
"VETERAN", "MD_FAMINC", "FAMINC", "UG12MN", "G12MN", par)
# save the terms
selected_vars <- terms %>%
filter(variable_name %in% c(academics, select_1, earnings, school, student))
# ++++++++ Saving the short version of the data ++++++++++++
# write_rds(selected_vars, "data/processed/terms.rds")
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# Get the selected variable names
variable_filter <- selected_vars$variable_name
# Filter on those names
small_colleges <- colleges %>%
select(variable_filter)
# Take a quick looks at the data
small_colleges %>% skimr::skim()
# Remove the empty columns
small_colleges <- small_colleges %>%
janitor::remove_empty(which = "cols") %>%
janitor::clean_names()
small_colleges <- small_colleges %>%
filter(preddeg == 3 & control %in% c(1, 2))
# -------------------------------------------------------------------------
# ========================== SAVE CLEANED DATA ============================
# -------------------------------------------------------------------------
# write_rds(small_colleges, "data/processed/colleges.rds")
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# terms %>%
# filter(label == "Private nonprofit")
|
1746e476d6bdb3da4fa40c0fd85fe9a19904d83b
|
25c86104b6123005f91ec250a6958c13112e8885
|
/R/dilog.R
|
bc8cb422caa76492556f0abb302f13ffa31de76d
|
[] |
no_license
|
cran/HMMcopula
|
32770d797f93f4362399bed5c42194f757897ec7
|
fd96e87761cb8dff3ffc1b291f676647c4c70209
|
refs/heads/master
| 2023-04-09T16:21:37.078422
| 2020-04-21T06:50:02
| 2020-04-21T06:50:02
| 155,122,276
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 334
|
r
|
dilog.R
|
#'@title Dilogarithm function
#'
#'@description This function computes the dilogarithm of a number.
#'
#'@param x a real number
#'
#'@return \item{out}{dilogarithm}
#'
#'@export
dilog <- function(x){
if(x==1) { return(NA)}
out = stats::integrate(function(y) log(y)/(1-y), lower = 1, upper = x)$value
return(out)
}
|
a6fdce34c958d049d734a28d9aa7f6b13ac2c3d2
|
6a21f44be0787a48dcc07239bb306be76150fa72
|
/demo.R
|
41f144b46b85e99b9b12a0942dd476f2557404de
|
[] |
no_license
|
lozsandino/MarkovMusic
|
ff7ada79ad31d0b22f66083d382720f4324b43e1
|
e42bf50f996f3cd7f6a25b5a0e547d8019b95036
|
refs/heads/master
| 2020-03-14T08:30:26.391904
| 2018-04-29T20:09:36
| 2018-04-29T20:09:36
| 131,526,283
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,876
|
r
|
demo.R
|
library(MarkovMusic)
# Lectura de los datos de entrenamiento.
Cm <- readMusicXML(paste0(system.file(package = "MarkovMusic"),
"/TrainingBlues.xml"))
Cm <- Cm$song[, "note"] # Se preserva solo la información del tono.
Cm <- c(Cm, rev(Cm)) # Melodía en escala menor de blues en C.
Fm <- Cm + 5 # Melodía en escala menor de blues en F.
Gm <- Fm + 2
# Conversión de la melodía en notación MIDI a notación anglosajona.
Cm <- fromMidiToNote(Cm)
Fm <- fromMidiToNote(Fm)
Gm <- fromMidiToNote(Gm)
# Espacio estado común para las tres secuencias.
state.space <- unique(unlist(c(Cm, Fm, Gm)))
# Matrices de transición de los procesos.
M0Cm <- priorVec(Cm, state.space = state.space) # Prob. inicial.
M1Cm <- transMat(Cm, state.space = state.space) # Orden 1.
M2Cm <- transMat(Cm, order = 2, state.space = state.space) # Orden 2.
M2Fm <- transMat(Fm, order = 2, state.space = state.space)
M2Gm <- transMat(Gm, order = 2, state.space = state.space)
# Cadenas de Markov no restringidas.
M <- c(list(M0Cm), list(M1Cm), rep(list(M2Cm), 46), # compases 1-4.
rep(list(M2Fm), 24), # 24 notas, compases 5 y 6.
rep(list(M2Cm), 24), # 24 notas, compases 7 y 8.
rep(list(M2Gm), 12), # 12 notas, compás 9.
rep(list(M2Fm), 12),
rep(list(M2Cm), 24))
# Definición de las restricciones.
constraints <- list(list(1, "C"), # La primera nota debe ser C.
list(49, "F"), # La nota 49 debe ser F.
list(73, "C"),
list(97, "G"),
list(109, "F"),
list(121, "C"),
list(144, "C"))
# Acro-consistencia.
Z <- arcConsistency(M, constraints)
# Generación de la melodía.
set.seed(69)
melody <- genMelody(Z, state.space)
writeMusicXML(melody, file = "MarkovMelody2_69.xml",
beat = 12, beat.type = 8)
|
fe4720f5607a61cff32905302cdf8af44d8df2a8
|
b3921db7e6ac213db389b4f2f5c4cb19e32a3411
|
/ECP/boxp_k_ecp_fast.R
|
bc76193a70be13419c3e8533aec4afdace6af8e1
|
[] |
no_license
|
12ramsake/MVT-WBS-RankCUSUM
|
cebb8c84aeec47c57d816b3281baca5cfd326a2b
|
e227f96fbf8ac752d78c6f755b71d79647297199
|
refs/heads/master
| 2023-08-09T08:13:50.768287
| 2023-07-19T20:02:00
| 2023-07-19T20:02:00
| 195,120,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,006
|
r
|
boxp_k_ecp_fast.R
|
library(MASS)
library(stringi)
library(xtable)
dirr<-""
setwd(dirr)
Ns<-c(1000,2500,5000)
sim.size=100
thetas<-list(c(.333,.666),
c(.25,.5,.75),
c(1/6,2/6,3/6,4/6,5/6))
ds=c(2,3,5,10)
distributions1=1:3
names(distributions1)<-c("Normal", "Cauchy", "Skew Normal")
##Create Parameter Vector
numUniqueRuns<-length(Ns)*length(thetas)*length(ds)*length(distributions1)
paramterIndices<-matrix(0,ncol=4,nrow=numUniqueRuns)
curr=0
for(i1 in 1:length(Ns)){
for(i2 in 1:length(thetas)){
for(i3 in 1:length(distributions1)){
for(i4 in 1:length(ds)){
curr=curr+1
paramterIndices[curr,]=c(i1,i2,i3,i4)
}
}
}
}
distributions<-list(1,2,3)
names(distributions)=c("Normal","Cauchy","Skew Normal")
getSummaryK<-function(results,N,theta){
l<-length(theta)
lhat<-lapply(results,length)
lhat<-unlist(lhat)
##k-khat
trueKs<-floor(N*theta)
khat<-lapply(results,sort)
#matrix of k-khat , one col for each cp
khatk<-matrix(0,nrow=length(lhat),ncol=l)
for(i in 1:length(results)){
#all detected
if(lhat[[i]]==l){
khatk[i,]<-khat[[i]]-trueKs
}
else if(lhat[[i]]>l){
spurious<-1:length(khat[[i]])
for(j in 1:length(trueKs)){
check<-abs(khat[[i]]-trueKs[j])
ind<-which.min(check)
while(sum(ind==spurious)==0){
check[ind]=10000
ind<-which.min(check)
print("looping")
}
spurious<-spurious[spurious!=ind]
khatk[i,j]<-khat[[i]][ind]-trueKs[j]
}
}
else if(is.null(khat[[i]])){
khat[[i]]<-rep(NA,l)
}
else if(length(khat[[i]])==0){
khat[[i]]<-rep(NA,l)
}
else{
undett<-1:l
for(j in 1:lhat[[i]]){
check<-abs(khat[[i]][j]-trueKs)
ind<-which.min(check)
#if already detected
while(sum(ind==undett)==0){
check[ind]=10000
ind<-which.min(check)
print("looping2")
}
#remove deteced point
undett<-undett[undett!=ind]
khatk[i,ind]<-khat[[i]][j]-trueKs[ind]
}
#undetected
if(!is.null(undett)){
khatk[i,undett]<-rep(NA,length(undett))
}
}
}
# print(khatk/N)
#
return(khatk/N)
}
getDist<-function(){
summ<-getSummaryK(results,N,theta)
return(summ)
}
output=NULL
cols=NULL
for(i in 1:36){
params<-paramterIndices[i,]
N=Ns[params[1]]
theta=thetas[[params[2]]]
distr=distributions1[[params[3]]]
d=ds[params[4]]
numInt=floor(log(Ns[params[1]]))*mod
dName=names(distributions1)[params[3]]
if(d==10&&distr==1){
fileName<-paste0(N,"_",length(theta),"_",dName,"_",d,"_ecp_delta_simsize_",sim.size,sep="")
fileName1<-paste(dirr,fileName,".Rda",sep="")
load(fileName1)
vals<-getDist()
output<-cbind(output,vals)
cols=c(cols,params[3])
}
}
par(mfrow=c(1,1),mgp=c(1.5,0,0),mar=c(0,2,0,0)+5)
makeBP<-function(vals){
boxplot(vals,ylim=c(-.1,.1),border=1,xaxt="n",yaxt="n",frame=F,lwd=2,ylab="")
mtext(TeX("$\\hat{k}/N-\\theta$"),2,cex=2,line=1)
axis(2,line=-1.5,cex=1.75)
txt <-rep(c("2","3","5"),length.out=9)
mtext(stri_unescape_unicode('\\u2113'),1,line=2,cex=2.75)
# axis(1.5, at=0.1, tick=F, labels=stri_unescape_unicode('\\u2113'), mgp=c(0,0,0),cex.axis=2)
# axis(1.5, at=0.15, tick=F, labels=":", mgp=c(0,0,0),cex.axis=2)
#
axis(1.5, at=c(1,1.5,2), tcl=0, labels=c("","2",""), mgp=c(0,1,0),cex.axis=1.75)
axis(1.5, at=c(3,4,5), tcl=0, labels=c("","3",""), mgp=c(0,1,0),cex.axis=1.75)
axis(1.5, at=c(6,8.5,12), tcl=0, labels=c("","5",""), mgp=c(0,1,0),cex.axis=1.75)
}
makeBP(output)
Cairo::CairoPDF(file="fast_ECP_kpb.pdf",height=6,width=10)
makeBP(output)
dev.off()
|
9a346a2ccf882890afcb8ad2cc7c4e07b37a75bc
|
ddd5dd6898d18fa111a54cfa7a130b7bc1b8718a
|
/man/mode.Rd
|
7215c3c004f5aa2b5f2c7f19a99488ca2ada5048
|
[
"MIT"
] |
permissive
|
ben-williams/funcr
|
ed30c0863aabdc9624ae6506100bc9ceb3eae65b
|
c828d4f464d2d6921644352cc3e4fd6891e9b9fb
|
refs/heads/master
| 2021-07-09T05:09:22.053009
| 2020-11-18T15:13:10
| 2020-11-18T15:13:10
| 214,511,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 366
|
rd
|
mode.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mode.R
\name{mode}
\alias{mode}
\title{Get the mode of a variable}
\usage{
mode(x)
}
\arguments{
\item{x}{= input variable}
}
\value{
mode
}
\description{
Get the mode of a variable
}
\examples{
df <- data.frame(year = 1970:2019,
catch = rnorm(50, 10, 2))
mode(df$catch)
}
|
707d5676ad6b44249547929d18030b66dd6f2d76
|
aa26052173994c5ce2363f11340f771d83d380a4
|
/R/printFFTrees_function.R
|
43564fb9e28ebde1a7fece969826607a5fb9a1ec
|
[] |
no_license
|
ronypik/FFTrees
|
ff92103e0c7d3105d9da96580da66d311e5a71ff
|
21421d9e7a48db3508bc721cd5b2ed9e60b0b19b
|
refs/heads/master
| 2021-01-11T16:31:20.673528
| 2017-01-26T08:04:48
| 2017-01-26T08:04:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,050
|
r
|
printFFTrees_function.R
|
#' Prints summary information from an FFTrees object
#'
#' @description Printing function for an FFTrees object
#' @param x FFTrees. A FFTrees object created from FFTrees()
#' @param ... additional arguments passed to print.
#' @export
print.FFTrees <- function(
x = NULL,
...
) {
goal <- x$params$goal
n.trees <- nrow(x$tree.stats$train)
n.cues.total <- x$data.desc$train$features
n.train.ex <- x$data.desc$train$cases
train.tree <- min(x$tree.stats$train$tree[x$tree.stats$train[[goal]] == max(x$tree.stats$train[[goal]])])
train.cues <- paste(unique(unlist(strsplit(x$tree.stats$train$cues[train.tree], ";"))), collapse = ",")
train.cues.n <- length(unique(unlist(strsplit(train.cues, ","))))
#
all.cues <- paste(unique(unlist(strsplit(x$tree.stats$train$cues, ";"))), collapse = ",")
all.cues.n <- length(unique(unlist(strsplit(x$tree.stats$train$cues, ";"))))
train.sens <- round(x$tree.stats$train$sens[train.tree], 2)
train.far <- round(x$tree.stats$train$far[train.tree], 2)
train.spec <- 1 - round(x$tree.stats$train$far[train.tree], 2)
train.dp <- round(x$tree.stats$train$dprime[train.tree], 2)
train.bacc <- round(x$tree.stats$train$bacc[train.tree], 2)
train.frugality <- round(x$tree.stats$train$frugality[train.tree], 2)
train.mcpc <- round(mean(x$levelout$train[,train.tree]), 2)
train.auc <- round(x$auc$FFTrees[1], 2)
train.acc <- round((x$tree.stats$train$hi[train.tree] + x$tree.stats$train$cr[train.tree]) / x$tree.stats$train$n[train.tree], 2)
if(is.null(x$tree.stats$test) == FALSE) {
n.test.ex <- x$data.desc$test$cases
test.sens <- round(x$tree.stats$test$sens[train.tree], 2)
test.far <- round(x$tree.stats$test$far[train.tree], 2)
test.spec <- 1 - round(x$tree.stats$test$far[train.tree], 2)
test.bacc <- round(x$tree.stats$test$bacc[train.tree], 2)
test.frugality <- round(x$tree.stats$test$frugality[train.tree], 2)
test.mcpc <- round(mean(x$levelout$test[,train.tree]), 2)
test.auc <- round(x$auc$FFTrees[2], 2)
test.acc <- round((x$tree.stats$test$hi[train.tree] + x$tree.stats$test$cr[train.tree]) / x$tree.stats$test$n[train.tree], 2)
summary.df <- data.frame("train" = c(n.train.ex,
train.frugality,
train.mcpc,
train.acc,
train.bacc,
train.sens,
train.spec),
"test" = c(n.test.ex,
test.frugality,
test.mcpc,
test.acc,
test.bacc,
test.sens,
test.spec)
)
}
if(is.null(x$tree.stats$test)) {
n.test.ex <- 0
test.frugality <- "--"
test.mcpc <- "--"
test.sens <- "--"
test.far <- "--"
test.spec <- "--"
test.auc <- "--"
test.acc <- "--"
test.bacc <- "--"
summary.df <- data.frame("train" = c(n.train.ex,
train.frugality,
train.mcpc,
train.acc,
train.bacc,
train.sens,
train.spec)
)
}
rownames(summary.df) <- c("n", "frugality", "mcpc", "acc", "bacc", "sens", "spec")
summary.text <- paste("FFTrees object containing ", n.trees, " trees using up to ", all.cues.n,
" predictors of an original ", n.cues.total, sep = "")
if(is.null(test.auc)) {
auc.text <- paste("FFTrees AUC: (Train = ", train.auc, ")", sep = "")
}
if(is.null(test.auc) == F) {
auc.text <- paste("FFTrees AUC: (Train = ", train.auc, ", Test = ", test.auc, ")", sep = "")
}
accuracy.text <- paste("Best training tree: #", train.tree, ", using ", train.cues.n, " cues {", train.cues, "}:", sep = "")
print(summary.text)
#print(auc.text)
print(accuracy.text)
print(summary.df)
}
|
d080663360fe89fc8a6618225f97a594ab023fc0
|
184a5b70c5bf8642501c82610e8ea5562445029b
|
/R/norm.R
|
2816d2d9feb4d4e5f208bc82ce1238236469b1cd
|
[] |
no_license
|
cran/QuantumOps
|
4910e53dda44803981801cbe1545e4ab63154538
|
35e2a8be5a6bbefbdc53a732eb6145a04dcd9e8e
|
refs/heads/master
| 2020-04-07T09:57:13.065211
| 2020-02-03T08:20:18
| 2020-02-03T08:20:18
| 158,270,510
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 148
|
r
|
norm.R
|
#Compute the norm of wavefunction
#' @export
norm <- function(v){
sqrt(inner(v,v))[1] #which is the square root of the inner product with itself
}
|
385b028a27bfa9a77e029578ca8f06da83e147c3
|
e4cb1e8aadcd175c4fd23d38b2689f0740c4f21d
|
/basketball_win_prediction/basketball_scrape.R
|
fe694b7dadb1e28c0101179f8e6299e31ad55dab
|
[] |
no_license
|
thpossidente/COGS-298-Project
|
52add812856e4a3713f65045b92f03173a1ad9de
|
5dc626f8111cbb7c7d679d680d7284f3b5a564af
|
refs/heads/master
| 2021-01-24T09:50:28.592240
| 2018-11-10T17:44:38
| 2018-11-10T17:44:38
| 123,028,382
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,514
|
r
|
basketball_scrape.R
|
library("rvest")
library(dplyr)
main.table <- data.frame(Minutes=integer(), FG=integer(), FGA=integer(), FGP=double(), ThreePoint=integer(),
ThreePointA=integer(), ThreePointP=double(), FT=integer(), FTA=integer(),
FTP=double(), ORB = integer(), DRB=integer(), TRB=integer(), AST = integer(),
STL=integer(), BLK=integer(), TOV=integer(), PF=integer(), PTS=integer())
#October Games
team.names <- read.csv("Data/teamsOctober.csv")
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games.html'
session <- html_session(url)
for (i in 1:104) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#November Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-november.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsNovember.csv")
for (i in 1:213) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#December Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-december.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsDecember.csv")
for (i in 1:227) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#January Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-january.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsJanuary.csv")
for (i in 1:216) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#February Games
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-february.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsFebruary.csv")
for (i in 1:160) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#March Games
## Note: not all march games are in. Goes up to 222
url <- 'https://www.basketball-reference.com/leagues/NBA_2018_games-march.html'
session <- html_session(url)
team.names <- read.csv("Data/teamsMarch.csv")
for (i in 1:83) {
game.link <- follow_link(session, css = paste("tr:nth-child(", i, ") .center a"))
team1.name <- team.names$Team1[[i]]
team1 <- game.link %>% html_nodes(paste("#all_box_",team1.name,"_basic h2", sep="")) %>% html_text()
table1 <- game.link %>% html_nodes(paste("#box_",team1.name,"_basic tfoot .right", sep="")) %>% html_text()
table1.df <- table1 %>% as.data.frame()
names(table1.df) = c(team1)
team2.name <- team.names$Team2[[i]]
team2 <- game.link %>% html_nodes(paste("#all_box_",team2.name,"_basic h2", sep="")) %>% html_text()
table2 <- game.link %>% html_nodes(paste("#box_", team2.name, "_basic tfoot .right", sep="")) %>% html_text()
table2.df <- table2 %>% as.data.frame()
names(table2.df) = c(team2)
table1.df = data.frame(t(table1.df))
colnames(table1.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table1.df = subset(table1.df, select=-c(Blank))
table2.df = data.frame(t(table2.df))
colnames(table2.df) = c("Minutes", "FG", "FGA", "FGP", "ThreePoint",
"ThreePointA", "ThreePointP", "FT", "FTA",
"FTP", "ORB", "DRB", "TRB", "AST",
"STL", "BLK", "TOV", "PF", "PTS", "Blank")
table2.df = subset(table2.df, select=-c(Blank))
main.table = rbind(main.table, table1.df)
main.table = rbind(main.table, table2.df)
session.return <- html_session(url)
}
#All data is in. Clean table and make into csv.
main.table = subset(main.table, select=-c(Minutes))
write.csv(main.table, "Data/GameData.csv")
|
68bbd7da5864bd391aa368c96e3efae225871cfd
|
4c29b2e8cd6d5d12889958a408ecde2498a25eba
|
/man/bandit-package.Rd
|
e195297103859ac3d5f12d02860d5f7f3d213a14
|
[] |
no_license
|
ssimeonov/bandit-1
|
f5fc38cc2c4e97dca8039b65fba89dddc8a76422
|
485deccebf06b77d68d00df45b877dc28da45e33
|
refs/heads/master
| 2021-01-18T00:22:34.032009
| 2012-08-23T22:07:22
| 2012-08-23T22:07:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 651
|
rd
|
bandit-package.Rd
|
\name{bandit-package}
\alias{bandit-package}
\alias{bandit}
\docType{package}
\title{
Functions for simple A/B split test and multi-armed bandit analysis
}
\description{
A set of functions (intended to be used by the ruby gem metricizer) for doing basic analysis of A/B split test data and recommending distribution of next probabilities, with some additional convenience functions for web metrics analysis.
}
\details{
\tabular{ll}{
Package: \tab bandit\cr
Type: \tab Package\cr
Version: \tab 0.3.0\cr
Date: \tab 2012-08-23\cr
Depends: \tab boot\cr
License: \tab GPL-3\cr
}
}
\author{
Thomas Lotze <thomaslotze@thomaslotze.com>
}
\keyword{ package }
|
eba85aa37fd61ae2c3dd3a5cda48f32b14ab5141
|
ada3800c6de8acfc2d0b7bbe7426ce5bcbc1901f
|
/micro-services/api/R/example/test_micsvc.R
|
a7181e28e776ed78fd23923d4239f526b2b25e1d
|
[] |
no_license
|
chaitanyawolfe/docs
|
9fb6728166fd30226c5930456bf29957060edee4
|
c4de365c1972450577192fdc2f40378b4092ab24
|
refs/heads/master
| 2020-04-28T15:22:10.572483
| 2019-03-06T11:05:04
| 2019-03-06T11:05:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,181
|
r
|
test_micsvc.R
|
## Author: Kartik Arora
## Date : 13-Feb-2018
##
## File shows simple example of using Risk Model/Optimization API
## Source the API file
source('micsvc.R')
# Setup a connection object
conn1 <- qes.microsvc.Conn$new(username = '<username here>', password = '<password>')
# Get instance of risk model builder
risk_model_builder <- conn1$get_risk_model_builder()
# Submit a new risk model builder request
risk_model_builder$new_request(universe = 'SP500',
template = 'default',
startDate = '2018-01-31',
endDate = '2018-12-31',
freq = '1me')
# Wait for it to finish
risk_model_builder$wait(max_wait_secs = 600)
# Download all data to a directory
risk_model_builder$download_all('QES-Risk-Model-Data')
# Uploading the Portfolio
conn1$upload_portfolio(id = 'Custom_Port1', filename = 'sample-port.csv')
# See the uploaded portfolio
catalog <- conn1$get_catalog()
portfolios <- catalog$get_portfolios()
View(portfolios)
# See the new factor
factors <- catalog$get_factors()
factors[which(startsWith(factors$ID,'Custom_Port1')),]
View(factors)
|
995a5e03b9a45e4efb892ee7c9d2142675a15ea0
|
d666f106be302a4b676c10736461dc94390fc0bf
|
/R/functions.R
|
9d60d4583077e2aeb1c7d0969fd6dbf21ba2b7f1
|
[] |
no_license
|
travitzki/censoescolaR
|
d7bf48542ca75c34825507f2eb19facff5755ea8
|
c2f4babdde1cdcebf42c71050cf867011226d931
|
refs/heads/master
| 2023-02-02T12:35:29.750304
| 2020-12-16T17:40:47
| 2020-12-16T17:40:47
| 311,745,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,804
|
r
|
functions.R
|
# 0- funcao para download do censo 2019 ------------------------------------
#' Download school census microdata
#'
#' @param year numeric, at the moment works with 2018 and 2019 data
#' @param method character, default is "wget". Other options: "internal", "libcurl", "curl"
#'
#' @return .zip file with all microdata, which must be unzipped
#' @export
#'
#' @details This function works only with 2019 and 2018 data.
#' @details Other years can be downloaded in: http://inep.gov.br/microdados
#' @details This package works fine with 2019 data, and at least partially with 2015 data. Other years were not tested till now.
#'
#' @examples
#' \donttest{
#' download_microdata()
#' download_microdata(year=2018)}
#'
#' # for a more complete view, see ?insert_labels
download_microdata<-function(year=2019, method = "wget")
{
warning('You can download the microdata in: http://inep.gov.br/microdados')
source=paste0('http://download.inep.gov.br/microdados/microdados_educacao_basica_',year,'.zip')
download.file(url=source, destfile='microdados_educacao_basica_',year,'.zip', method = method)
}
# 1- funcao para importar dados para o R -------------------------------------
#' Import csv microdata to R
#'
#' @description Import csv microdata to R, optimizing for big data (student and teacher levels)
#'
#' @param file_path_origin character, path to downloaded .csv file
#' @param file_path_destiny character, path to .rda file to be created
#'
#' @return data.frame saved in .rda file
#' @export
#'
#' @examples
#' \dontrun{
#' file.downloaded='~/YOUR_PATH/downloaded_data/ESCOLA.CSV'
#' file.imported='~/YOUR_PATH/temp_data.rda'
#'
#' import_csv2rda(file_path_origin=file.downloaded,
#' file_path_destiny=file.imported)}
#'
#' # for a more complete view, see ?insert_labels
import_csv2rda=function(file_path_origin, file_path_destiny)
{
# funcao supostamente valida para importar todos arquivos do censo escolar desde 2013
censo=data.table::fread(file_path_origin, sep="|", dec=",", encoding="Latin-1")
print('SEE THE DATA IMPORTED:')
print(head(censo))
save(censo,file=file_path_destiny)
}
# 2- funcao para incluir rotulos do dicionario no data frame ------------------------------------------
# usar o que era arquivo destino agora como origem
#' Insert labels in factor variables (in portuguese).
#'
#' @description The dictionary used to label is from 2019 data
#'
#' @param file_path_origin character, path to .rda file generated by function import_csv2rda()
#' @param file_path_destiny character, path to .rda file to be created, with labels in factors
#' @param data_level character, defines census data level according to data file imported ('Escola','Docente','Gestor','Turma','Matricula')
#' @param add_variables logical, if TRUE add some useful variables to the data.frame
#'
#' @details Works fine with 2019 data, and at least partially with 2015 data. Other years not tested yet.
#'
#' @return data.frame saved in .rda file
#' @export
#'
#' @examples
#' # you must first download the .zip file. See ?download_microdata
#' # then you must unzip it and choose a data file (eg. ESCOLA.CSV)
#' # then you can run the code below, changing the first 3 lines as you wish
#' # note that 'data_level' must also be defined, in function insert_labels()
#'
#' \dontrun{
#' file.downloaded='~/YOUR_PATH/downloaded_data/ESCOLA.CSV'
#' file.imported='~/YOUR_PATH/temp_data.rda'
#' file.labelled='~/YOUR_PATH/censusData_ESCOLA.rda'
#'
#' import_csv2rda(file_path_origin=file.downloaded,
#' file_path_destiny=file.imported)
#'
#' insert_labels (file_path_origin=file.imported,
#' file_path_destiny=file.labelled,
#' data_level='Escola',
#' add_variables=TRUE)}
insert_labels=function(file_path_origin, file_path_destiny,
data_level=c('Escola','Docente','Gestor','Turma','Matricula'), add_variables=TRUE)
{
# carregar dados desse nivel para recodificar
load(system.file("recodes", paste0('dados-recode_',data_level,'.rda'), package = 'BRschoolData'))
dd <- as.data.frame(dd) # declarar pra nao dar warning no pacote
# carregar arquivo importado
load(file_path_origin)
censo <- as.data.frame(censo)
# loop para recodificar
variaveis=names(censo)
for(lop.var in 1:length(variaveis)){
# lop.var=1
if(!variaveis[lop.var]%in%dd$nome) {
warning(paste('INCOMPLETE LABELING: variable',variaveis[lop.var],'is not in the dicionary.'))
next
}
indice.dicionario=which(dd$nome%in%variaveis[lop.var])
if(length(indice.dicionario)>1) stop('Tem duplicacao de variaveis')
if(is.na(dd$trad.fatores[indice.dicionario])){ # se nao for fator
censo[,lop.var][censo[,lop.var]==999]=NA
censo[,lop.var][censo[,lop.var]==8887]=NA
censo[,lop.var][censo[,lop.var]==8888]=NA
censo[,lop.var][censo[,lop.var]==88888]=NA
next
}
## recodificar variavel ----------------
if(add_variables==TRUE){
# criar antes variaveis sinteticas para 'TP_ETAPA_ENSINO'
if(variaveis[lop.var]=='TP_ETAPA_ENSINO'){
# identificar turmas de ensino fundamental e medio
turmas.infantil=1:3
turmas.mista.InfantFundam=56
turmas.fundamental1=c(4:7,14:18)
turmas.fundamental2=c(8:11,19:21,41)
turmas.mista.Fundam=c(12:13,22:24)
turmas.medio=c(25:38) # todos os tipos, menos EJA, menos educacao profissional
turmas.eja=c(65,67,69:74)
turmas.prof=c(39,40,64,68)
# fazer variavel 'ciclo'
censo$ciclo=NA
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.infantil)]="EI"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.mista.InfantFundam)]="EIeEFmix"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.fundamental1)]="EF1"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.fundamental2)]="EF2"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.mista.Fundam)]="EFmix"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.medio)]="EM"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.eja)]="EJA"
censo$ciclo[which(censo$TP_ETAPA_ENSINO%in%turmas.prof)]="Prof"
table(censo$ciclo)
## fazer variavel so para serie do ensino medio regular
censo$serieEM=NA
censo$serieEM[which(censo$TP_ETAPA_ENSINO%in%c(25,30,35))]="1a serie"
censo$serieEM[which(censo$TP_ETAPA_ENSINO%in%c(26,31,36))]="2a serie"
censo$serieEM[which(censo$TP_ETAPA_ENSINO%in%c(27,32,37))]="3a serie"
table(censo$serieEM, useNA = 'ifany')
}
}
# recodificar
censo[,lop.var]=car::Recode(var=censo[,lop.var], recodes=dd$trad.fatores[indice.dicionario])
}
save(censo,file=file_path_destiny)
print('SEE THE DATA LABELED:')
print(head(censo))
}
|
65133a827169fcda98cd47c9a0ac784716e09a96
|
4e3580312132efb5da4a385878c9394e16a9d70a
|
/man/modify_max_lk.Rd
|
0cdc32a2add251138a149b02672df650752a02ba
|
[] |
no_license
|
KerstinSpitzer/ModifyMaxLkAndPairwise
|
ed1c0bb9ba2e95789936a44f641659a70a447a1d
|
5599edf27bd14a2254b82af65391f5b0325efe47
|
refs/heads/master
| 2022-11-23T08:59:50.856527
| 2020-07-18T17:00:05
| 2020-07-18T17:00:05
| 257,632,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,832
|
rd
|
modify_max_lk.Rd
|
\name{modify_max_lk}
\alias{modify_max_lk}
\alias{ModifyMaxLkAndPairwise}
\title{
Rescaling of the max_lk output in LDhelmet to reduce the mean squared error
}
\description{
The output of the max_lk function in LDhelmet is improved in terms of the mean squared error by rescaling it with a certain constant, see Gaertner and Futschik (2016).
}
\usage{
modify_max_lk(rhohat,theta,n,l,all.ests=TRUE)
}
\arguments{
\item{rhohat}{
Output of the max_lk function in LDhelmet (population recombination rate in 1/bp)
}
\item{theta}{
Population mutation rate in 1/bp
}
\item{n}{
Sample size
}
\item{l}{
Sequence length in bp
}
\item{all.ests}{
Binary vector to choose if every input for the estimated recombination rate \code{rhohat} is modified (\code{all.ests=TRUE}) or if \code{rhohat} is modified only if it is in the range tested in Gaertner and Futschik (2016) and left unchanged otherwise (\code{all.ests=FALSE}).
}
}
\value{
Rescaled recombination rate in 1/bp
}
\references{
Chan, A.H., Jenkins, P.A., and Song, Y.S., 2012. Genome-wide fine-scale recombination
rate variation in Drosophila melanogaster. PLoS Genet, 8(12): e1003090.
Gaertner, K., and Futschik, A., 2016. Improved versions of common estimators of the recombination rate. Journal of Computational Biology 23(9), 756-768.
}
\author{
Kerstin Gaertner, Andreas Futschik
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
%\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
%}
\examples{
EstRho <- 0.031 #Output of max_lk
modify_max_lk(EstRho,0.02,10,15000)
modify_max_lk(EstRho,0.02,10,15000,all.ests=FALSE)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{shrinkage}
\keyword{estimate population recombination rate}% __ONLY ONE__ keyword per line
|
22a16aaef0d26694ee94f8a2c7e7cde96ba17898
|
31362fdab2193f92b64f9a82b0fe1ca732fcf6df
|
/Covid19VaccineAesiIncidenceCharacterization/server.R
|
3affb08d375244dab9077f77f647842d5d7c8969
|
[] |
no_license
|
OHDSI/ShinyDeploy
|
a5c8bbd5341c96001ebfbb1e42f3bc60eeceee7c
|
a9d6f598b10174ffa6a1073398565d108e4ccd3c
|
refs/heads/master
| 2023-08-30T17:59:17.033360
| 2023-08-26T12:07:22
| 2023-08-26T12:07:22
| 98,995,622
| 31
| 49
| null | 2023-06-26T21:07:33
| 2017-08-01T11:50:59
|
R
|
UTF-8
|
R
| false
| false
| 12,349
|
r
|
server.R
|
shiny::shinyServer(function(input, output, session) {
filteredSexGroups <- reactiveVal(NULL)
shiny::observeEvent(eventExpr = {
list(input$sexFilter_open,
input$tabs)
}, handlerExpr = {
if (isFALSE(input$sexFilter_open) || !is.null(input$tabs)) {
result <- input$sexFilter
filteredSexGroups(result)
}
})
filteredAgeGroups <- reactiveVal(NULL)
shiny::observeEvent(eventExpr = {
list(input$ageFilter_open,
input$tabs)
}, handlerExpr = {
if (isFALSE(input$ageFilter_open) || !is.null(input$tabs)) {
result <- input$ageFilter
filteredAgeGroups(result)
}
})
filteredDatabaseIds <- reactiveVal(NULL)
shiny::observeEvent(eventExpr = {
list(input$databaseFilter_open,
input$tabs)
}, handlerExpr = {
if (isFALSE(input$databaseFilter_open) || !is.null(input$tabs)) {
result <- input$databaseFilter
filteredDatabaseIds(result)
}
})
IRFilteredPlotdata <- shiny::reactive({
validate(need(length(filteredSexGroups()) > 0, "No gender selected"))
validate(need(length(filteredAgeGroups()) > 0, "No age groups selected"))
validate(need(length(filteredDatabaseIds()) > 0, "No databases selected"))
data <- ir_for_plot
if(!is.null(filteredSexGroups())) {
data <- data %>%
dplyr::filter(.data$sex_group %in% filteredSexGroups())
}
if(!is.null(filteredAgeGroups())) {
data <- data %>%
dplyr::filter(.data$age_group %in% filteredAgeGroups())
}
if(!is.null(filteredDatabaseIds())) {
data <- data %>%
dplyr::filter(.data$db_name %in% filteredDatabaseIds())
}
return(data)
})
output$outputPlot <- renderPlot({
shiny::withProgress(message = "Building Plot. . .", {
validate(need((nrow(IRFilteredPlotdata()) > 0), paste0("Data is not loaded.")))
if ('Meta-Analysis' %in% filteredDatabaseIds()) {
presentMetaAnalysisInPlot <- TRUE
} else {
presentMetaAnalysisInPlot <- FALSE
}
p0 <- plotIRv3(outcomeCohortDefinitionId, "Common",data = IRFilteredPlotdata(), metaAnalysis = presentMetaAnalysisInPlot)
p1 <-
plotIRv3(outcomeCohortDefinitionId, "Common",data = IRFilteredPlotdata(), metaAnalysis = presentMetaAnalysisInPlot) + theme_my(base_size = 9) +
scale_y_continuous(
trans = 'log10',
limits = c(0.1, 10000),
breaks = c(0.1, 1, 10, 100, 1000, 10000)
) +
theme(legend.position = "none")
p2 <-
plotIRv3(outcomeCohortDefinitionId, "Rare",data = IRFilteredPlotdata(), metaAnalysis = presentMetaAnalysisInPlot) + theme_my(base_size = 10) +
scale_y_continuous(
trans = 'log10',
limits = c(.1, 1000),
breaks = c(0.1, 1, 10, 100, 1000, 10000)
) +
theme(legend.position = "none")
p3 <-
plotIRv3(outcomeCohortDefinitionId, "Very rare",data = IRFilteredPlotdata(), metaAnalysis = presentMetaAnalysisInPlot) + theme_my(base_size =
10) +
scale_y_continuous(
trans = 'log10',
limits = c(.05, 1000),
breaks = c(0.05, 0.1, 1, 10, 100, 1000, 1000)
) +
theme(legend.position = "none")
legend <- cowplot::get_legend(p0 + guides(color = guide_legend(nrow = 15)) +
theme(legend.position = "right"))
pcol <- cowplot::plot_grid(p1, p2, p3, nrow = 3) #,hjust=-1
p123_v3 <-
cowplot::plot_grid(
pcol,
legend,
rel_widths = c(1, .15),
axis = 'l',
labels = "AUTO",
label_y = -8
)
})
return(p123_v3)
})
# observeEvent(eventExpr = input$sexFilter,handlerExpr = {
# if(!is.null(input$sexFilter)) {
# ir_for_plot_data(ir_for_plot_data()[ir_for_plot_data()$sex_group %in% input$sexFilter,])
# }
# })
output$resultTable <- DT::renderDT({
data <- IRFilteredPlotdata() %>%
dplyr::select(
.data$outcomeName,
.data$databaseName,
.data$age_group,
.data$sex_group,
.data$numOutcomes,
.data$personYears,
.data$IR_P_100000py
) %>%
dplyr::rename("Outcome" = "outcomeName",
"Data Source" = "databaseName",
"Age" = "age_group",
"Sex" = "sex_group",
"Incidence Rate/100,000 py" = "IR_P_100000py",
"Person Years" = "personYears",
"Case Count" = "numOutcomes") %>%
dplyr::mutate_if(is.numeric, ~round(., 2))
table <- standardDataTable(data)
return(table)
})
observe({
shinyWidgets::updatePickerInput(
session = session,
inputId = "sexFilter",
choices = sexGroups,
selected = sexGroups
)
shinyWidgets::updatePickerInput(
session = session,
inputId = "ageFilter",
choices = as.vector(ageGroups),
selected = as.vector(ageGroups)
)
shinyWidgets::updatePickerInput(
session = session,
inputId = "databaseFilter",
choices = db_names,
selected = db_names
)
})
output$dataSourceTable <- DT::renderDT({
data <- dataSource
# %>%
# dplyr::rename('Data Source' = "dataSource",
# 'Database Id' = 'shortName',
# 'Description' = 'description')
# colnames(data) <- camelCaseToTitleCase(colnames(data))
dataTable <- standardDataTable(data)
return(dataTable)
})
output$cohortTable <- DT::renderDT({
data <- cohort %>%
dplyr::select(.data$phenotype,.data$cohortId,.data$cohortName,.data$link) %>%
dplyr::mutate(cohortName = paste0("<a href='",.data$link,"'>",.data$cohortName,"</a>")) %>%
dplyr::select(-.data$link)
colnames(data) <- camelCaseToTitleCase(colnames(data))
table <- standardDataTable(data)
return(table)
}, selection = "single")
selectedCohortDefinitionRow <- reactive({
idx <- input$cohortTable_rows_selected
if (is.null(idx)) {
return(NULL)
} else {
subset <- cohort
if (nrow(subset) == 0) {
return(NULL)
}
row <- subset[idx[1],]
return(row)
}
})
output$cohortDefinitionRowIsSelected <- reactive({
return(!is.null(selectedCohortDefinitionRow()))
})
outputOptions(output,
"cohortDefinitionRowIsSelected",
suspendWhenHidden = FALSE)
output$cohortDetailsText <- shiny::renderUI({
row <- selectedCohortDefinitionRow()
if (!'logicDescription' %in% colnames(row)) {
row$logicDescription <- row$cohortName
}
if (is.null(row)) {
return(NULL)
} else {
tags$table(
style = "margin-top: 5px;",
tags$tr(
tags$td(tags$strong("Cohort ID: ")),
tags$td(HTML(" ")),
tags$td(row$cohortId)
),
tags$tr(
tags$td(tags$strong("Cohort Name: ")),
tags$td(HTML(" ")),
tags$td(row$cohortName)
),
tags$tr(
tags$td(tags$strong("Logic: ")),
tags$td(HTML(" ")),
tags$td(row$logicDescription)
)
)
}
})
cohortDefinitionCirceRDetails <- shiny::reactive(x = {
progress <- shiny::Progress$new()
on.exit(progress$close())
progress$set(message = "Rendering human readable cohort description using CirceR (may take time)", value = 0)
data <- selectedCohortDefinitionRow()
if (nrow(selectedCohortDefinitionRow()) > 0) {
details <- list()
circeExpression <-
CirceR::cohortExpressionFromJson(expressionJson = data$json)
circeExpressionMarkdown <-
CirceR::cohortPrintFriendly(circeExpression)
circeConceptSetListmarkdown <-
CirceR::conceptSetListPrintFriendly(circeExpression$conceptSets)
details <- data
details$circeConceptSetListmarkdown <-
circeConceptSetListmarkdown
details$htmlExpressionCohort <-
convertMdToHtml(circeExpressionMarkdown)
details$htmlExpressionConceptSetExpression <-
convertMdToHtml(circeConceptSetListmarkdown)
details <- dplyr::bind_rows(details)
} else {
return(NULL)
}
return(details)
})
output$cohortDefinitionText <- shiny::renderUI(expr = {
cohortDefinitionCirceRDetails()$htmlExpressionCohort %>%
shiny::HTML()
})
cohortDefinistionConceptSetExpression <- shiny::reactive({
row <- selectedCohortDefinitionRow()
if (is.null(row)) {
return(NULL)
}
expression <- RJSONIO::fromJSON(row$json, digits = 23)
if (is.null(expression)) {
return(NULL)
}
expression <-
getConceptSetDetailsFromCohortDefinition(cohortDefinitionExpression = expression)
return(expression)
})
output$conceptsetExpressionTable <- DT::renderDataTable(expr = {
data <- cohortDefinistionConceptSetExpression()
if (is.null(data)) {
return(NULL)
}
if (!is.null(data$conceptSetExpression) &&
nrow(data$conceptSetExpression) > 0) {
data <- data$conceptSetExpression %>%
dplyr::select(.data$id, .data$name)
} else {
return(NULL)
}
dataTable <- standardDataTable(data = data)
return(dataTable)
}, server = TRUE)
cohortDefinitionConceptSetExpressionRow <- shiny::reactive(x = {
idx <- input$conceptsetExpressionTable_rows_selected
if (length(idx) == 0 || is.null(idx)) {
return(NULL)
}
if (!is.null(cohortDefinistionConceptSetExpression()$conceptSetExpression) &&
nrow(cohortDefinistionConceptSetExpression()$conceptSetExpression) > 0) {
data <-
cohortDefinistionConceptSetExpression()$conceptSetExpression[idx, ]
if (!is.null(data)) {
return(data)
} else {
return(NULL)
}
}
})
output$conceptSetExpressionRowSelected <- shiny::reactive(x = {
return(!is.null(cohortDefinitionConceptSetExpressionRow()))
})
shiny::outputOptions(x = output,
name = "conceptSetExpressionRowSelected",
suspendWhenHidden = FALSE)
cohortDefinitionConceptSets <- shiny::reactive(x = {
if (is.null(cohortDefinitionConceptSetExpressionRow())) {
return(NULL)
}
data <-
cohortDefinistionConceptSetExpression()$conceptSetExpressionDetails
data <- data %>%
dplyr::filter(.data$id == cohortDefinitionConceptSetExpressionRow()$id)
data <- data %>%
dplyr::select(
.data$conceptId,
.data$conceptName,
.data$standardConcept,
.data$invalidReason,
.data$conceptCode,
.data$domainId,
.data$vocabularyId,
.data$conceptClassId
)
return(data)
})
output$cohortDefinitionConceptSetsTable <-
DT::renderDataTable(expr = {
data <- cohortDefinitionConceptSets()
if (is.null(cohortDefinitionConceptSets())) {
return(NULL)
}
dataTable <- standardDataTable(data = data)
return(dataTable)
}, server = TRUE)
output$cohortConceptsetExpressionJson <- shiny::renderText({
if (is.null(cohortDefinitionConceptSetExpressionRow())) {
return(NULL)
}
cohortDefinitionConceptSetExpressionRow()$json
})
output$cohortDefinitionJson <- shiny::renderText({
row <- selectedCohortDefinitionRow()
if (is.null(row)) {
return(NULL)
} else {
row$json
}
})
output$cohortDefinitionSql <- shiny::renderText({
row <- selectedCohortDefinitionRow()
if (is.null(row)) {
return(NULL)
} else {
circeExpression <-
CirceR::cohortExpressionFromJson(expressionJson = row$json)
circeoptions <-
CirceR::createGenerateOptions(cohortId = row$cohortId)
sql <-
CirceR::buildCohortQuery(expression = circeExpression, options = circeoptions)
return(sql)
}
})
})
|
eac3ba8076821d50feff67e8f1f1b0e892a66d88
|
e0ce905fc0d5cdde31fcb70219da6ae5585afdf2
|
/R/package.R
|
1487e2746b99ec791d5c67127abb157e35a42ba4
|
[] |
no_license
|
cran/easypackages
|
57febd9e044bdb4a4421d8eef72caeed751cbe37
|
1d5734cc996eb0cf46970e84322f086580541adb
|
refs/heads/master
| 2016-08-11T15:10:41.647828
| 2016-01-28T10:09:32
| 2016-01-28T10:09:32
| 50,570,866
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,871
|
r
|
package.R
|
## ============================================================================
##
## package - an S3 class that contains attributes about an R package that can
## be used to load or install that package
##
## ============================================================================
get_repo_info <- function(userRepoSplitSymbol) {
# Returns a function that, given a symbol, breaks a package name and repo
# combination into its username and repo components if need be
function(package_name, username_and_repo = FALSE) {
symbol_pos <- get_symbol_pos(package_name,
userRepoSplitSymbol)
username <- substr(package_name, 1, (symbol_pos - 1))
repo <- substr(package_name, (symbol_pos + 1),
nchar(package_name))
list(username = username, repo = repo,
userrepo = paste0(username, "/", repo))
}
}
get_github_info <- get_repo_info("/")
get_bitbucket_info <- get_repo_info("\\$")
## For export -----------------------------------------------------------------
#' package()
#'
#' A package object that contains at minimum the name of a package. If the
#' package exists on CRAN, the name of the package is used to install that
#' package from CRAN. A forward slash may be used in the package name to
#' indicate a GitHub username and repo that a package is located in. A dollar
#' sign may be used in the package name to indicate a Bitbucket username and
#' repo that a package is located in.
#'
#' If a package is not inteded to be installed from CRAN, Github (public), or
#' Bitbucket (public) you may optionally supply a function to the
#' \code{install} argument that installs the package, with additional arguments
#' to the function supplied via the \code{...} argument.
#'
#' @param name the name of a package, or, if the package is on Github, the
#' username and repo of the package, ex. Rdatatable/data.table, where
#' Rdatatable is the GitHub username, and data.table is the repo name. The
#' same process works with Bitbucket, except \code{$} is used instead of
#' \code{/} to separate the username and repo.
#' @param install (optional) a function used to install a package.
#' @param ... (optional) additional arguments to the \code{install} function.
#' Another way of supplying additonal parameters to \code{install} is to use
#' the \code{purrr::partial} function.
#' @param load_type (default is "attach") should the package be loaded, or
#' attached? See http://r-pkgs.had.co.nz/namespace.html to learn more.
#' @export
#' @examples
#' \dontrun{
#' this_package <- package("dplyr")
#' github_package <- package("Rdatatable/data.table")
#' that_package <- package("jakePackage", devtools::install_bitbucket,
#' repo = "repo", username = "user", password = "password")
#' }
package <- function(name, install = utils::install.packages, ...,
load_type = "attach") {
# Error handling
arguments <- list(...)
assert_that(is_function(install))
if (!load_type %in% c("load", "attach"))
stop("load_type may only be 'load' or 'attach'")
# Store the function name (for printing), same for ...
func_name <- deparse(substitute(install))
arg_names <- "None"
if (length(arguments) > 0)
arg_names <- "Additional arguments supplied for the install function"
# If :: is appended to the package name, set load_type to "load"
if (grepl("::", name)) {
load_type <- "load"
name <- sub("::", "", name)
}
# Determine if the user inputted a GitHub package
if (grepl("/", name)) {
# GitHub
github_info <- get_github_info(name)
install <- install_github
func_name <- "install_github"
arguments <- list(repo = github_info$userrepo)
name <- github_info$repo
} else if (grepl("\\$", name)) {
# Bitbucket
bitbucket_info <- get_bitbucket_info(name)
install <- install_bitbucket
func_name <- "install_bitbucket"
arguments <- list(repo = bitbucket_info$userrepo)
name <- bitbucket_info$repo
}
# Create, output the package object
if (length(arguments) > 0) {
# User supplies install function and separate arguments for that func
package_obj <- list(name = name, install_func = install,
install_args = arguments, load_type = load_type,
func_name = func_name, arg_names = arg_names)
} else if (!all.equal(install, utils::install.packages)) {
# User supplies an install function but no install arguments
package_obj <- list(name = name, install_func = install,
install_args = NULL, load_type = load_type,
func_name = func_name, arg_names = arg_names)
} else {
# Package going to be installed via CRAN
package_obj <- list(name = name, install_func = "default",
install_args = NULL, load_type = load_type,
func_name = func_name, arg_names = arg_names)
}
structure(package_obj, class = "package_obj")
}
#' is.package_obj()
#' Returns TRUE if obj is a package_obj object
#' @param obj an R object
#' @export
is.package_obj <- function(obj) inherits(obj, "package_obj")
#' load_package()
#' Function to load a package given a package object
#' @param pack a \code{package_obj} object
#' @export
load_package <- function(pack) UseMethod("load_package")
# Loads a package object
#' @export
load_package.package_obj <- function(pack) {
# Either load or attach the package, and return a logical to indicate
# whether or not the package loaded/attached correctly
load_type <- pack$load_type
if (load_type == "attach") {
status <- require(pack$name, character.only = TRUE)
} else if (load_type == "load") {
status <- requireNamespace(pack$name)
} else {
stop("load_type can only be 'load' or 'attach'")
}
status
}
#' install_package()
#' Function to install a package given a package object
#' @param pack a \code{package_obj} object
#' @export
install_package <- function(pack) UseMethod("install_package")
# Installs a package object
#' @export
install_package.package_obj <- function(pack) {
# Construct our install arguments
if (is.null(pack$install_args)) {
if (all.equal(pack$install_func, "default")) {
install_args <- list(pack$name)
pack$install_func <- utils::install.packages
} else {
install_args <- list()
}
} else {
install_args <- list(pack$install_args)
}
# Try installing the package, but don't throw an error
x <- try(do.call(pack$install_func, install_args))
# Return TRUE if installation was successful, FALSE otherwise
if (class(x) == "try-error") {
return(FALSE)
} else {
return(TRUE)
}
}
# Print method for package_obj
#' @export
print.package_obj <- function(x, ...) {
# Package name
cat("Package:", x$name, "\n")
# Function name
if (x$func_name == "utils::install.packages") {
cat("Install function: (default) utils::install.packages\n")
} else {
cat("Install function:", x$func_name, "\n")
}
# Additional argument names
if (x$arg_names == "None") {
cat("Additional install arguments: (default) None\n")
} else {
cat("Additional install arguments:", x$arg_names, "\n")
}
# Load type
if (x$load_type == "attach") {
cat("Load/attach: (default) attach\n")
} else {
cat("Load/attach:", x$load_type, "\n")
}
}
|
1d21e612959b564d1dda538e594709b756357f73
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/sqlutils/R/sqlexec.R
|
8f9df6d25020a7f8ffcfb13ee0da64784f95c98e
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,316
|
r
|
sqlexec.R
|
#' Generic function for executing a query.
#'
#' @param connection the database connection.
#' @param sql the query to execute.
#' @param ... other parameters passed to the appropriate \code{sqlexec} function.
#' @return a data frame.
#' @export sqlexec
sqlexec <- function(connection, sql, ...) { UseMethod("sqlexec") }
#' Executes queries for RODBC package.
#' @inheritParams sqlexec
sqlexec.RODBC <- function(connection, sql, ...) {
library(RODBC)
RODBC::sqlQuery(connection, sql) #TODO: Why doesn't this work with ... passed through
}
#' Executes queries for RSQLite package.
#' @inheritParams sqlexec
sqlexec.SQLiteConnection <- function(connection, sql, ...) {
library(RSQLite)
RSQLite::dbGetQuery(connection, sql, ...)
}
#' Executes queries for RMySQL package.
#' @inheritParams sqlexec
sqlexec.RMySQL <- function(connection, sql, ...) {
library(RMySQL)
RMySQL::dbSendQuery(connection, sql, ...)
}
#' Executes queries for RPostgreSQL
#' @inheritParams sqlexec
sqlexec.PostgreSQLConnection <- function(connection, sql, ...) {
library(RPostgreSQL)
rs <- RPostgreSQL::dbSendQuery(connection, sql)
RPostgreSQL::fetch(rs, n=-1)
}
#' Executes queries for RJDBC
#' @inheritParams sqlexec
sqlexec.JDBCConnection <- function(connection, sql, ...) {
library(RJDBC)
RJDBC::dbGetQuery(connection, sql)
}
|
b12ed7aec81664b7ce138327666a6a6245c4b2da
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/darch/R/rbm.Getter.R
|
10b2479678fff514be25c46f25618163ec76c6bd
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,256
|
r
|
rbm.Getter.R
|
# Copyright (C) 2013-2015 Martin Drees
#
# This file is part of darch.
#
# darch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# darch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with darch. If not, see <http://www.gnu.org/licenses/>.
#' Returns a list with the states of the visible units.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getVisibleUnitStates(rbm)
#' @seealso \code{\link{RBM}}
#' @return The states of the visible units.
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getVisibleUnitStates-methods
setGeneric("getVisibleUnitStates",function(rbm){standardGeneric("getVisibleUnitStates")})
#' @rdname getVisibleUnitStates-methods
#' @aliases getVisibleUnitStates,RBM-method
setMethod(
f="getVisibleUnitStates",
signature="RBM",
definition=function(rbm){
return (rbm@visibleUnitStates)
}
)
#' Returns a list with the states of the hidden units.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getHiddenUnitStates(rbm)
#' @return The states of the hidden units.
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getHiddenUnitStates-methods
setGeneric("getHiddenUnitStates",function(rbm){standardGeneric("getHiddenUnitStates")})
#' @rdname getHiddenUnitStates-methods
#' @aliases getHiddenUnitStates,RBM-method
setMethod(
f="getHiddenUnitStates",
signature="RBM",
definition=function(rbm){
return (rbm@hiddenUnitStates)
}
)
#' Returns the output of the \code{\link{RBM}}
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getOutput(rbm)
#' @seealso \code{\link{RBM}}
#' @return The output of the \code{\link{RBM}}
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getOutput-methods
setGeneric("getOutput",function(rbm){standardGeneric("getOutput")})
#' @rdname getOutput-methods
#' @aliases getOutput,RBM-method
setMethod(
f="getOutput",
signature="RBM",
definition=function(rbm){
if (rbm@ff){
return(rbm@ffOutput[])
}
return (rbm@output)
}
)
#' Returns the number of hidden units of the \code{\link{RBM}}
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getNumHidden(rbm)
#' @seealso \code{\link{RBM}}
#' @return The number of hidden units of the \code{\link{RBM}}
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getNumHidden-methods
setGeneric("getNumHidden",function(rbm){standardGeneric("getNumHidden")})
#' @rdname getNumHidden-methods
#' @aliases getNumHidden,RBM-method
setMethod(
f="getNumHidden",
signature="RBM",
definition=function(rbm){
return (rbm@numHidden)
}
)
#' Returns the number of visible units of the \code{\link{RBM}}
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getNumVisible(rbm)
#' @seealso \code{\link{RBM}}
#' @return The number of visible units of the \code{\link{RBM}}
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getNumVisible-methods
setGeneric("getNumVisible",function(rbm){standardGeneric("getNumVisible")})
#' @rdname getNumVisible-methods
#' @aliases getNumVisible,RBM-method
setMethod(
f="getNumVisible",
signature="RBM",
definition=function(rbm){
return (rbm@numVisible)
}
)
#' Returns the learning rate for the visible biases.
#'
#'
#' @usage getLearnRateBiasVisible(rbm)
#' @seealso \code{\link{RBM}}
#' @return The learning rate for the visible biases
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getLearnRateBiasVisible-methods
setGeneric("getLearnRateBiasVisible",function(rbm){standardGeneric("getLearnRateBiasVisible")})
#' @rdname getLearnRateBiasVisible-methods
#' @aliases getLearnRateBiasVisible,RBM-method
setMethod(
f="getLearnRateBiasVisible",
signature="RBM",
definition=function(rbm){
return (rbm@learnRateBiasVisible)
}
)
#' Returns the learning rate for the hidden biases.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getLearnRateBiasHidden(rbm)
#' @seealso \code{\link{RBM}}
#' @return The learning rate for the hidden biases
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getLearnRateBiasHidden-methods
setGeneric("getLearnRateBiasHidden",function(rbm){standardGeneric("getLearnRateBiasHidden")})
#' @rdname getLearnRateBiasHidden-methods
#' @aliases getLearnRateBiasHidden,RBM-method
setMethod(
f="getLearnRateBiasHidden",
signature="RBM",
definition=function(rbm){
return (rbm@learnRateBiasHidden)
}
)
#' Returns the weigth cost for the training
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getWeightCost(rbm)
#' @seealso \code{\link{RBM}}
#' @return The weigth cost for the training
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getWeightCost-methods
setGeneric("getWeightCost",function(rbm){standardGeneric("getWeightCost")})
#' @rdname getWeightCost-methods
#' @aliases getWeightCost,RBM-method
setMethod(
f="getWeightCost",
signature="RBM",
definition=function(rbm){
return (rbm@weightCost)
}
)
#' Returns the weights of the \code{\link{RBM}}.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getWeights(rbm)
#' @seealso \code{\link{RBM}}
#' @return The weigths of the \code{\link{RBM}}.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getWeights-methods
setGeneric("getWeights",function(rbm){standardGeneric("getWeights")})
#' @rdname getWeights-methods
#' @aliases getWeights,RBM-method
setMethod(
f="getWeights",
signature="RBM",
definition=function(rbm){
if (rbm@ff){
return(rbm@ffWeights[])
}
return(rbm@weights)
}
)
#' Returns the biases of the hidden units.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getHiddenBiases(rbm)
#' @seealso \code{\link{RBM}}
#' @return The biases of the hidden units.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getHiddenBiases-methods
setGeneric("getHiddenBiases",function(rbm){standardGeneric("getHiddenBiases")})
#' @rdname getHiddenBiases-methods
#' @aliases getHiddenBiases,RBM-method
setMethod(
f="getHiddenBiases",
signature="RBM",
definition=function(rbm){
if (rbm@ff){
return(rbm@ffHiddenBiases[])
}
return (rbm@hiddenBiases)
}
)
#' Returns the update value for the biases of the hidden units.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getHiddenBiasesInc(rbm)
#' @seealso \code{\link{RBM}}
#' @return The update value for the biases of the hidden units.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getHiddenBiasesInc-methods
setGeneric("getHiddenBiasesInc",function(rbm){standardGeneric("getHiddenBiasesInc")})
#' @rdname getHiddenBiasesInc-methods
#' @aliases getHiddenBiasesInc,RBM-method
setMethod(
f="getHiddenBiasesInc",
signature="RBM",
definition=function(rbm){
return (rbm@hiddenBiasesInc)
}
)
#' Returns the biases of the visible units.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getVisibleBiases(rbm)
#' @seealso \code{\link{RBM}}
#' @return The biases of the visible units.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getVisibleBiases-methods
setGeneric("getVisibleBiases",function(rbm){standardGeneric("getVisibleBiases")})
#' @rdname getVisibleBiases-methods
#' @aliases getVisibleBiases,RBM-method
setMethod(
f="getVisibleBiases",
signature="RBM",
definition=function(rbm){
if (rbm@ff){
return(rbm@ffVisibleBiases[])
}
return (rbm@visibleBiases)
}
)
#' Returns the update value for the biases of the visible units.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getVisibleBiasesInc(rbm)
#' @seealso \code{\link{RBM}}
#' @return The update value for the biases of the visible units.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getVisibleBiasesInc-methods
setGeneric("getVisibleBiasesInc",function(rbm){standardGeneric("getVisibleBiasesInc")})
#' @rdname getVisibleBiasesInc-methods
#' @aliases getVisibleBiasesInc,RBM-method
setMethod(
f="getVisibleBiasesInc",
signature="RBM",
definition=function(rbm){
return (rbm@visibleBiasesInc)
}
)
#' Returns the update value for the weights.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getWeightInc(rbm)
#' @seealso \code{\link{RBM}}
#' @return The update value for the weights.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getWeightInc-methods
setGeneric("getWeightInc",function(rbm){standardGeneric("getWeightInc")})
#' @rdname getWeightInc-methods
#' @aliases getWeightInc,RBM-method
setMethod(
f="getWeightInc",
signature="RBM",
definition=function(rbm){
return (rbm@weightInc)
}
)
#' Returns the data for the positive phase.
#'
#' @param rbm A instance of the class \code{\link{RBM}}.
#' @usage getPosPhaseData(rbm)
#' @seealso \code{\link{RBM}}
#' @return The data for the positive phase.
#'
#' @seealso \code{\link{RBM}}
#'
#' @include rbm.R
#'
#' @export
#' @docType methods
#' @rdname getPosPhaseData-methods
setGeneric("getPosPhaseData",function(rbm){standardGeneric("getPosPhaseData")})
#' @rdname getPosPhaseData-methods
#' @aliases getPosPhaseData,RBM-method
setMethod(
f="getPosPhaseData",
signature="RBM",
definition=function(rbm){
return (rbm@posPhaseData)
}
)
|
f96934190855834fcd8d4405a0799ded875e8970
|
f7105536a44be844d652f28e5c5b5bab0db66aa8
|
/R/CMF/ml/hw3/hw2.R
|
29f0532c7a60d333e496ab3e9009ace1e181264b
|
[] |
no_license
|
DmitryZheglov/code
|
e6f143c21287a250c01c639659b672fdef089bbe
|
fdec951c8dcf3145109076bc78f0646217b1b822
|
refs/heads/master
| 2022-12-13T11:43:00.962046
| 2019-08-12T18:58:55
| 2019-08-12T18:58:55
| 93,278,292
| 1
| 0
| null | 2022-12-07T23:49:07
| 2017-06-03T23:00:52
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 3,221
|
r
|
hw2.R
|
#Viboru
library(kernlab)
source("C:/Users/Dmitriy/Desktop/proga/R/cmf/learn/hw2/SVM_func.R")
# файл с пользовательскими функциями
datX= read.csv("C:/Users/Dmitriy/Desktop/proga/R/cmf/learn/hw2/mtrain.csv",header=TRUE,stringsAsFactors = FALSE,sep=",")
datY= read.csv("C:/Users/Dmitriy/Desktop/proga/R/cmf/learn/hw2/mtest.csv",header=TRUE,stringsAsFactors = FALSE,sep=",")
ClinLR <- datX$ClinLR
head(ClinLR)
polit <- c(extLib = 1, Lib = 2, sliLib = 3, Mod = 4, sliCon = 5, Con = 6, extCon = 7)
polit
for (i in 1:length(polit)) {
repl <- ClinLR == names(polit[i])
ClinLR[repl] <- replace(ClinLR, repl, polit[i])[repl]
}
ClinLR <- as.numeric(ClinLR)
DoleLR <- datX$DoleLR
head(DoleLR)
polit <- c(extLib = 1, Lib = 2, sliLib = 3, Mod = 4, sliCon = 5, Con = 6, extCon = 7)
polit
for (i in 1:length(polit)) {
repl <- DoleLR == names(polit[i])
DoleLR[repl] <- replace(DoleLR, repl, polit[i])[repl]
}
DoleLR <- as.numeric(DoleLR)
educ <- datX$educ
head(educ)
polit <- c(MS = 1, HSdrop = 2, HS = 3, Coll = 4, CCdeg = 5, BAdeg = 6, MAdeg = 7)
polit
for (i in 1:length(polit)) {
repl <- educ == names(polit[i])
educ[repl] <- replace(educ, repl, polit[i])[repl]
}
educ <- as.numeric(educ)
income=datX$income
o=numeric()
for(i in 1:length(income)){
if(income[i]=="$105Kplus"){
o[i]=105
}else if(income[i]=="$3Kminus"){
o[i]=3
}else{
v=unlist(strsplit(income[i],"-"))
v1=unlist(strsplit(v[1],"K"))
v2=unlist(strsplit(v[2],"K"))
n1=nchar(v1)
n2=nchar(v2)
b1=substring(v1,2,n1)
b2=substring(v2,2,n2)
b1=as.numeric(b1)
b2=as.numeric(b2)
o[i]=(b1+b2)/2
}
}
income=o
vote=datX$vote
pi=numeric()
for(i in 1:length(vote)){
if(vote[i]=="Dole"){
pi[i]=0
}else{
pi[i]=1
}
}
vote=pi
y=vote
X=cbind(1,popul=datX$popul,TWnews=datX$TVnews,ClinLR,DoleLR,age=datX$age,educ,income)
m <- nrow(X)
m.train <- round(0.8*m); m.cv <- m - m.train
train.obs <- sample(1:m,size=m.train,replace=FALSE)
cv.obs <- (1:m)[-train.obs]
X.train <- X[train.obs,]; X.cv <- X[cv.obs,]
y.train <- y[train.obs]; y.cv <- y[cv.obs]
par <- c(0.01,0.05,0.1,0.5,1,5,10,50,
100,500,1000)
par <- expand.grid(par,par)
# заголовки столбцов
dimnames(par)[[2]] <- c("C","sigma")
res <- NULL # в неё будут записаны результаты моделирования
for (i in 1:nrow(par)) { # для каждой комбинации экз. параметров
# подбор параметров 𝜃 на обучающей выборке
model <- ksvm(X.train, y.train, type="C-svc",
C = par$C[i], kern = "rbfdot",
kpar = list(sigma=par$sigma[i]))
# прогнозная классификация на экзаменующей выборке
y.pred <- predict(model, newdata = X.cv, type = "response")
# запись комбинации экзогенных параметров и статистик
# прогноза, возвращаемых пользовательской функцией fitStats
res <- rbind(res, c(par$C[i],par$sigma[i],fitStats(y.cv,y.pred)) )
}
dimnames(res)[[2]][1:2] <- c("C","sigma") # заголовки столбцов
b=res[,6]
which.max(b[28:121])
|
9aee8f0e240ff4a957a0698d3ac95667b78459a9
|
25a0c00d980650b8549e88002f30d542a7ebb42b
|
/WordCloud_Laxman.R
|
48d00892d2c3b322a7817937e06d4759fec9d564
|
[] |
no_license
|
bhavish2207/Airline-Customer-Churn-Analysis
|
6f499f07c2cc364375dc7b2dc6c198ea506eac70
|
b124c72b03e32cf745b241eb7c226de60af36adc
|
refs/heads/master
| 2022-03-23T09:53:19.422564
| 2019-12-15T21:43:05
| 2019-12-15T21:43:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,164
|
r
|
WordCloud_Laxman.R
|
install.packages("wordcloud", dependencies = TRUE)
install.packages("SnowballC") # for text stemming
install.packages("wordcloud") # word-cloud generator
install.packages("RColorBrewer") # color palettes
library(wordcloud)
library(tm)
library(NLP)
library(SnowballC)
library(RColorBrewer)
library(tidyverse)
textualReviews <- fall %>%
select(freeText) %>%
drop_na(freeText)
row.names(textualReviews) <- NULL
reviewsCorpus <- Corpus(VectorSource(textualReviews))
reviewsCorpus <- tm_map(reviewsCorpus,content_transformer(tolower))
reviewsCorpus <- tm_map(reviewsCorpus, removeNumbers)
reviewsCorpus <- tm_map(reviewsCorpus, removePunctuation)
reviewsCorpus <- tm_map(reviewsCorpus, removeWords,stopwords("English"))
reviewsCorpus <- tm_map(reviewsCorpus, stemDocument)
reviewMatrix <- TermDocumentMatrix(reviewsCorpus)
tdmMatrix <- as.matrix(reviewMatrix)
freq <- sort(rowSums(tdmMatrix),decreasing=TRUE)
tdmDat <- data.frame(word = names(freq),freq=freq)
rownames(tdmDat) <- NULL
wordcloud(tdmDat$word,tdmDat$freq,rot.per=.15,min.freq=15,random.order = FALSE,random.color = TRUE,colors = brewer.pal(8, "Dark2"))
|
66918125e486586e164f49a70f5266bde2a5cdce
|
b64995fe2715647319869f7b4d411dc6f846e559
|
/partitionComparison/tests/testthat/test-helper.R
|
9ab8a1543f2c72774f91a1cf753ac6eb6891793e
|
[
"MIT"
] |
permissive
|
KIT-IISM-EM/partitionComparison
|
6040346e48a9a3d497724bc6d33abfbec8afc5ca
|
741b25768d0669e4ed7feff513bdac3d2ef7b3ec
|
refs/heads/master
| 2021-10-22T00:39:28.757424
| 2019-03-07T10:58:38
| 2019-03-07T10:58:38
| 114,737,331
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,501
|
r
|
test-helper.R
|
library(testthat)
library(partitionComparison)
context("Test helper functions")
test_that("setOverlap produces the correct results", {
expect_equal(setOverlap(c(0, 0, 0, 0), c(1, 1, 1, 1), 0, 1), 4)
expect_equal(setOverlap(c(0, 0, 0, 0), c(1, 1, 1, 1), 0, 0), 0)
expect_equal(setOverlap(c(0, 0, 0, 0), c(0, 0, 1, 1), 0, 0), 2)
expect_equal(setOverlap(c(0, 0, 0, 0), c(0, 0, 1, 1), 0, 1), 2)
})
test_that("projectionNumber produces the correct results", {
expect_equal(projectionNumber(p, q), 4)
expect_equal(projectionNumber(q, p), 4)
expect_equal(projectionNumber(p, r), 4)
expect_equal(projectionNumber(r, p), 3)
expect_equal(projectionNumber(p, s), 4)
expect_equal(projectionNumber(s, p), 5)
expect_equal(projectionNumber(r, s), 3)
expect_equal(projectionNumber(s, r), 5)
expect_equal(projectionNumber(rep(0, 10), 1:10), 1)
})
test_that("entropy computation is correct", {
# Create a large partition of n objects into k clusters
k <- 1000
n <- 1000000
large_partition <- new("Partition", sample(k, n, replace = TRUE))
# Compute the relative frequencies
large_partition_relfreq <- as.vector(table(large_partition) / n)
expect_true(entropy(large_partition_relfreq) - log(k) <= .Machine$double.eps^.5)
expect_equal(entropy(rep(1/k, k)), log(k))
expect_equal(entropy(c(rep(0, k-1), 1)), 0)
expect_equal(entropy(new("Partition", rep(0, 10))), 0)
expect_equal(entropy(p), entropy(c(2/5, 3/5)))
expect_equal(entropy(q), entropy(c(2/5, 3/5)))
expect_equal(entropy(r), entropy(c(1/5, 4/5)))
expect_equal(entropy(s), entropy(c(2/5, 2/5, 1/5)))
expect_true(entropy(large_partition) - log(k) <= .Machine$double.eps^.5)
expect_true(-entropy(large_partition) <= .Machine$double.eps^.5)
})
test_that("registration of vector signatures for measures works correct", {
# Not registered => error
pattern <- "unable to find an inherited method for function 'randIndex' for signature '\"numeric\", \"numeric\"'"
expect_error(randIndex(c(0, 0, 0, 1, 1), c(0, 0, 1, 1, 1)), regexp = pattern)
# Only the "Partition" signature exists
expect_true(hasMethod("randIndex", c("Partition", "Partition")))
expect_false(hasMethod("randIndex", c("vector", "vector")))
# Register
expect_silent(registerPartitionVectorSignatures(environment()))
# Now the plain vector signature exists also
expect_true(hasMethod("randIndex", c("vector", "vector")))
expect_equal(randIndex(c(0, 0, 0, 1, 1), c(0, 0, 1, 1, 1)), randIndex(p, q))
})
|
26c4d36475bb1f10b3b0c20a540d92c8c7c8129a
|
72ad4953ea2c100a03a9ddd364857988a9d1b2de
|
/man/eeg_ica_summary_tbl.Rd
|
222d90baf86ea3ba5fb6fd24effb97b363009e73
|
[
"MIT"
] |
permissive
|
bnicenboim/eeguana
|
28b46a8088f9ca0a370d955987b688542690547a
|
3b475ac0472e6bedf2659a3f102abb9983e70b93
|
refs/heads/master
| 2023-05-23T09:35:20.767216
| 2022-10-10T11:53:21
| 2022-10-10T11:53:21
| 153,299,577
| 22
| 9
|
NOASSERTION
| 2022-11-06T13:40:13
| 2018-10-16T14:26:07
|
R
|
UTF-8
|
R
| false
| true
| 1,292
|
rd
|
eeg_ica_summary_tbl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/out.R
\name{eeg_ica_summary_tbl}
\alias{eeg_ica_summary_tbl}
\title{Show a table with a summary of the results of the ICA.}
\usage{
eeg_ica_summary_tbl(.data, ...)
}
\arguments{
\item{.data}{An \code{eeg_ica_lst} object}
\item{...}{If left empty, the function will assume that EOG channels include eog/EOG in their names, alternatively, EOG channels can be selected here.}
}
\value{
A table with the variance explained by each component, and the correlation between EOG channels and each components in each recording.
}
\description{
This function generates a table with the variance explained by each ICA component, and the correlations
between ICA components and EOG channels. See more details in \link{eeg_ica_cor_tbl} and \link{eeg_ica_var_tbl}.
}
\seealso{
Other ICA functions:
\code{\link{eeg_ica_cor_tbl}()},
\code{\link{eeg_ica_keep}()},
\code{\link{eeg_ica_show}()},
\code{\link{eeg_ica_var_tbl}()},
\code{\link{eeg_ica}()},
\code{\link{plot_components}()}
Other summary functions:
\code{\link{count_complete_cases_tbl}()},
\code{\link{eeg_ica_cor_tbl}()},
\code{\link{eeg_ica_var_tbl}()},
\code{\link{summaries}},
\code{\link{summary.eeg_lst}()}
}
\concept{ICA functions}
\concept{summary functions}
|
c094b6ff36f8ab7fefb9abfce6f4db61729f3055
|
6e5cb9a8d877de59c7290263929d2045962128d5
|
/v1/FanCodeV1/R/my_calc_cor.R
|
775e9fb4213ef06e8d5c70a94fff89ee0e2291b0
|
[
"Apache-2.0"
] |
permissive
|
FanJingithub/MyCode_Project
|
92a3c6732cdb4cafbaac537a85c3c28e213e9f45
|
eb356d243f2c0b6548326d5cd1baffed96dfde63
|
refs/heads/master
| 2022-04-26T07:02:07.134102
| 2020-04-27T18:17:51
| 2020-04-27T18:17:51
| 259,377,390
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,648
|
r
|
my_calc_cor.R
|
#' Calculate the correlation for two kinds of score
#'
#' @param data_1 tibble/matrix
#' @param data_2 tibble/matrix
#' @param score string, "each_row" - score by row, "each_col" - score by col
#'
#' @return matrix
#' @export
#'
#' @examples
my_calc_cor<-function(data_1, data_2, score="each_row"){
if (length(intersect(class(data_1),c("tble_df","tbl","data.frame")))>0 &&
length(intersect(class(data_2),c("tble_df","tbl","data.frame")))>0){
message("The first col would not calculate, and used as colname and rowname...\n")
} else if (class(data_1)=="matrix" && class(data_2)=="matrix"){
if (is.null(rownames(data_1))){ rownames(data_1)<-c(1:dim(data_1)[1]) }
if (is.null(rownames(data_2))){ rownames(data_2)<-c(1:dim(data_2)[1]) }
data_1<-as_tibble(data_1,rownames="name")
data_2<-as_tibble(data_2,rownames="name")
} else {
message("The data type is not supported!")
return(1)
}
if (score=="each_col"){
data_1<-my_t_tibble(data=data_1, new_name_col_1="name")
data_2<-my_t_tibble(data=data_2, new_name_col_1="name")
}
if (!all(colnames(data_1)[-1]==colnames(data_2)[-1])){
message("Error: two data not match!")
return(0)
}
col_name_1<-data_1[[1]]
col_name_2<-data_2[[1]]
cor_result<-matrix(rep(0,length(col_name_1)*length(col_name_2)),
nrow = length(col_name_1))
rownames(cor_result)<-col_name_1
colnames(cor_result)<-col_name_2
for (j in 1:length(col_name_1)){
for (k in 1:length(col_name_2)){
x1<-my_get_tibble_row(data_1,j)
x2<-my_get_tibble_row(data_2,k)
cor_result[j,k]<-cor(x1,x2)
}
}
return(cor_result)
}
|
e3e472974e7d75bc9699a927ee5ec8d38cfd5979
|
0b776535f2b134ec26c5dfbb0095e7a4dba49beb
|
/bin/ppca/pca_ppca.R
|
dd90e71beffa2db7e9613dcf40542bed157965a6
|
[] |
no_license
|
RonakSumbaly/Ancestry-Mapping
|
3fc65aab87aea672470b2bfe8083d3293ff145b4
|
5ac774020b1ebda09d85d4cb7ca698529e110ab1
|
refs/heads/master
| 2021-01-09T20:38:12.717319
| 2017-10-09T03:14:47
| 2017-10-09T03:14:47
| 60,070,665
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
pca_ppca.R
|
############################################################################
# Project Title: Ancestry Mapping
# Done By: Ronak Sumbaly
# Description: perform pca and ppca
############################################################################
library(pcaMethods)
library(flashpcaR)
# pca mapping
pca.baseline = flashpca(pruned.data, do_loadings=TRUE, verbose=TRUE, stand="binom", ndim=3,
nextra=100)
pca.baseline = prcomp(diploid)
# plot pca with color coding
pairs(pca.baseline$x[,1:3], col = numeric.mapping, oma=c(4,4,6,12), labels = c("PCA1","PCA2","PCA3"), pch = 20)
par(xpd=FALSE)
legend(0.85, 0.6, as.vector(c("EU", "AM", "AS", "AF")),
fill=c("red", "green3", "blue", "black"))
plot((pca.baseline$sdev^2/sum(pca.baseline$sdev^2))[1:10],type = "o",xlab = "Principal Components",ylab = "Variance Explained", main = "Variance Associated")
plot(pca.baseline, type = "l")
individuals
pairs(pca.baseline$x[,1:3], col = numeric.mapping, labels = c("PCA1","PCA2","PCA3"), pch= 20)
# ppca mapping
ppca.baseline = ppca(pruned.data, nPcs = 3)
# plot ppca with color coding
pairs(ppca.baseline@scores, col = numeric.mapping, oma=c(4,4,6,12), labels = c("PPCA1","PPCA2","PPCA3"), pch = 20)
par(xpd=FALSE)
legend(0.85, 0.6, as.vector(c("EU", "AM", "AS", "AF")),
fill=c("red", "green3", "blue", "black"))
plot((ppca.baseline@sdev^2/sum(ppca.baseline@sdev^2))[1:10],type = "o",xlab = "Principal Components",ylab = "Variance Explained", main = "Variance Associated")
|
f2941308f4a22a1509aa17e62011c9f71c7e0ec3
|
7c5bd5fd8072e57dd8742b2cbcad27d256b2573d
|
/cdashQC-master/R/summary_stats.R
|
d48473f7034c2bbb89e3ce43e448bc9355577bf4
|
[] |
no_license
|
statswork/cdashQC
|
101a03c276ca0d4156f4316ac7c14c59f8bee938
|
d69ca24ccca12f73c625d48dcef5a356617d1b08
|
refs/heads/master
| 2021-01-24T10:53:17.631165
| 2017-04-24T19:20:19
| 2017-04-24T19:20:19
| 69,898,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,759
|
r
|
summary_stats.R
|
#' Summary statistics for vital signs or eg
#'
#' @title summary statistics
#' @param data_clean the clean data set returned by \code{\link{replicate_clean}}.
#' @param inter_digit if rounding happens for the intermediate averages, what digits should be kept.
#' @param final_digit what is the digit for final summary?
#' @param na_rm should missing values be excluede? Default set to be \code{TRUE}.
#' @param ischangefrombase Is this summary for change from baseline? Default set to be \code{FALSE}
#' @return a data frame with summary statistics by test, time point and treatment.
#' @export
summary_vs_eg <- function(data_clean, inter_digit = NULL, final_digits = 3, na_rm=TRUE, ischangefrombase = FALSE){
# guess what the data set is
d1 <- guess_test(data_clean)
c1 <- replicate_average(data_clean, digits = inter_digit, na_rm = na_rm) %>% ungroup() %>%
unite_("seq_period", c("SEQ", "PERIOD"), remove = FALSE)
if(ischangefrombase){ # if it's for change from baseline
baseline <- c1 %>% filter(status == "BASELINE") %>%
mutate(baseline = outcome) %>% ungroup() %>%
select(-outcome, -status, -seq_period, -PERIOD, -PHOUR)
postdose <- c1 %>% filter(status == "POSTDOSE") %>%
mutate(postdose = outcome) %>% ungroup() %>%
select(-outcome, -status)
c2 <- left_join(postdose %>% arrange_("CLIENTID", d1, "SEQ"),
baseline %>% arrange_("CLIENTID", d1, "SEQ"),
by = c("CLIENTID", d1, "SEQ"))
# change from baseline
c3 <- c2 %>% mutate(outcome = postdose - baseline)
} else{ # if it's not for change from baseline
c3 <- c1 %>% filter(status %in% c("BASELINE", "POSTDOSE"))
}
# statistical summaries
c4 <- c3 %>% group_by_(d1, "seq_period", "PHOUR") %>%
summarise(COUNT = n(),
mean = mean(outcome, na.rm = na_rm),
sd = sd(outcome, na.rm = na_rm),
mininum = min(outcome, na.rm = na_rm),
q1 = quantile(outcome, probs = 0.25, na.rm = na_rm),
median = median(outcome, na.rm = na_rm),
q3 = quantile(outcome, probs = 0.75, na.rm = na_rm),
maximu = max(outcome, na.rm = na_rm)) %>%
mutate(cv = sd/mean)
# transpose
if (d1 == "VS_TEST"){
c5 <- c4 %>% gather(statistic, value, -VS_TEST, -seq_period, -PHOUR) %>%
spread(key = seq_period, value)
} else {
c5 <- c4 %>% gather(statistic, value, -EG_TEST, -seq_period, -PHOUR) %>%
spread(key = seq_period, value)
}
# the unique time points, used to sort the result by time point
timepoint <- data_clean %>% ungroup() %>%
select(PHOUR, DAY, HOUR) %>%
distinct(PHOUR, DAY, .keep_all = T)
c6 <- left_join(c5 %>% arrange(PHOUR),
timepoint %>% arrange(PHOUR),
by = c("PHOUR"))
result <- round_df(c6, final_digits) %>% arrange_(d1, "DAY", "HOUR") %>%
select(-DAY, -HOUR) # round_df() is included in the useful.R file.
return(result)
}
#' Get the averages of the replicates
#'
#' @title get the averanges of the replicates for \code{vs} or \code{eg} data.
#' @param data_clean an object returned from \code{replicate_clean}
#' @param digits should the averages be rounded? Default NO.
#' @param na_rm should missing values be excluede? Default \code{TRUE}.
#' @return the averages
#' @export
#' @examples
#' eg2 <- replicate_data(eg) # find the triplicates
#' @seealso \code{\link{replicate_clean}}
replicate_average <- function(data_clean, digits = NULL, na_rm = TRUE){
d1 <- guess_test(data_clean, var_identifier = "_TEST")
clean2 <- data_clean
# average by time points wherever applicable.
if (d1 == "VS_TEST") {
clean3 <- clean2 %>% filter(VS_TEST != "OVERALL COMMENT") %>%
mutate(outcome = as.numeric(VS_RES_R))
# average the replicates by position wherever applicable
clean4 <- clean3 %>% group_by(CLIENTID, VS_TEST, status, PERIOD, PHOUR, SEQ, DAY, HOUR, VS_POS) %>%
summarise(outcome = mean(outcome, na.rm = na_rm))
} else { # if it's eg data
clean3 <- clean2 %>% filter(EG_TEST != "OVERALL INTERPRETATION") %>%
mutate(outcome = as.numeric(EG_ORRES))
clean4 <- clean3 %>% group_by(CLIENTID, EG_TEST, status, PERIOD, PHOUR, SEQ) %>%
summarise(outcome = mean(outcome, na.rm = na_rm))
}
if (!is.null(digits)){ # do you need to round the averages?
clean4 <- round_df(clean4, digits = digits)
}
return(clean4)
}
|
bfa608a2141ecfb98374e54ea55a4fa97aa8127c
|
fd3e853bdee84f2aa5641b8d58c29f9e9290e608
|
/zip_1km_population.R
|
458594639394ea92e1459f18916ed7a287bfb26a
|
[] |
no_license
|
jsimkins2/urban_climate
|
3f3aff201c3055403bc8886f6fab5b04ebece96f
|
f8bcccbd7f6ac6e55a517aa3deb6cb67539ee7d3
|
refs/heads/master
| 2023-01-03T12:23:50.042359
| 2020-10-26T18:00:32
| 2020-10-26T18:00:32
| 177,999,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,373
|
r
|
zip_1km_population.R
|
years1 = seq(2010, 2050, 10)
years2 = seq(2060, 2100, 10)
ssp = paste0('SSP', seq(1,5), '_', '1km_NetCDF')
typelist = c("rural", "total", "urban")
ssp2 = paste0('ssp', seq(1,5))
for (s in ssp){
for (ty in typelist){
setwd(paste0("/Users/james/Documents/Delaware/urban_climate/datasets/1km_urban_population_projections/",
"UrbanPopulationProjections_SSPs1-5_2010-2100_v1_NETCDF_1km/", s))
system(paste0("/usr/bin/zip -r ",paste0(substr(s,10,16),'_',substr(s, 1,5),ty, '_', years2[[1]], '-', years2[[5]] ,'.zip '),
'*',ssp2[which(ssp == s)],'_', ty, "_", years2[[1]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years2[[2]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years2[[3]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years2[[4]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years2[[5]], "* -x '*.DS_Store'"))
system(paste0("/usr/bin/zip -r ",paste0(substr(s,10,16),'_',substr(s, 1,5),ty, '_', years2[[1]], '-', years2[[5]],'.zip.zip '),
paste0(substr(s,10,16),'_',substr(s, 1,5),ty, '_', years2[[1]], '-', years2[[5]] ,'.zip',
" -x '*.DS_Store'")))
}
}
for (s in ssp){
for (ty in typelist){
setwd(paste0("/Users/james/Documents/Delaware/urban_climate/datasets/1km_urban_population_projections/",
"UrbanPopulationProjections_SSPs1-5_2010-2100_v1_NETCDF_1km/", s))
system(paste0("/usr/bin/zip -r ",paste0(substr(s,10,16),'_',substr(s, 1,5),ty, '_', years1[[1]], '-', years1[[5]] ,'.zip '),
'*',ssp2[which(ssp == s)],'_', ty, "_", years1[[1]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years1[[2]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years1[[3]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years1[[4]], "* ",
'*',ssp2[which(ssp == s)],'_', ty, "_", years1[[5]], "* -x '*.DS_Store'"))
system(paste0("/usr/bin/zip -r ",paste0(substr(s,10,16),'_',substr(s, 1,5),ty, '_', years1[[1]], '-', years1[[5]],'.zip.zip '),
paste0(substr(s,10,16),'_',substr(s, 1,5),ty, '_', years1[[1]], '-', years1[[5]] ,'.zip',
" -x '*.DS_Store'")))
}
}
zip -r GeoTIFF_Base_year_urban.zip *urban* -x "*.DS_Store"
ssp1_rural_2010
|
e4f28a45d1dbac90ae93723147a99f1b7f3a456a
|
1c5de214a549fde2a2ac0baeac1982fccb126f40
|
/inst/doc/billboarder.R
|
24564ed390ecc7bf32e9187cf74a493114940b3c
|
[] |
no_license
|
cran/billboarder
|
59fefd44336f1a6da8e80f57fa8abbf3cfd6454f
|
07fb86e80f6effe899e9c080c40e671a62b081ae
|
refs/heads/master
| 2023-01-22T22:21:51.452308
| 2023-01-08T18:00:06
| 2023-01-08T18:00:06
| 101,809,914
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,072
|
r
|
billboarder.R
|
## ----setup, include = FALSE---------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
screenshot.force = FALSE
)
library(billboarder)
## -----------------------------------------------------------------------------
set_theme("insight")
set_color_palette(scales::brewer_pal(palette = "Set1")(9))
## ----barchart-----------------------------------------------------------------
library(billboarder)
# data
data("prod_par_filiere")
billboarder(data = prod_par_filiere) %>%
bb_barchart(
mapping = aes(x = annee, y = prod_hydraulique),
color = "#102246"
) %>%
bb_y_grid(show = TRUE) %>%
bb_y_axis(
tick = list(format = suffix("TWh")),
label = list(text = "production (in terawatt-hours)", position = "outer-top")
) %>%
bb_legend(show = FALSE) %>%
bb_labs(
title = "French hydraulic production",
caption = "Data source: RTE (https://opendata.rte-france.com)"
)
## ----barchart-dodge-----------------------------------------------------------
library(billboarder)
# data
data("prod_par_filiere")
billboarder() %>%
bb_barchart(
data = prod_par_filiere[, c("annee", "prod_hydraulique", "prod_eolien", "prod_solaire")]
) %>%
bb_data(
names = list(prod_hydraulique = "Hydraulic", prod_eolien = "Wind", prod_solaire = "Solar")
) %>%
bb_y_grid(show = TRUE) %>%
bb_y_axis(
tick = list(format = suffix("TWh")),
label = list(text = "production (in terawatt-hours)", position = "outer-top")
) %>%
bb_legend(position = "inset", inset = list(anchor = "top-right")) %>%
bb_labs(
title = "Renewable energy production",
caption = "Data source: RTE (https://opendata.rte-france.com)"
)
## ----barchart-stacked---------------------------------------------------------
library(billboarder)
# data
data("prod_par_filiere")
# stacked bar chart !
billboarder() %>%
bb_barchart(
data = prod_par_filiere[, c("annee", "prod_hydraulique", "prod_eolien", "prod_solaire")],
stacked = TRUE
) %>%
bb_data(
names = list(prod_hydraulique = "Hydraulic", prod_eolien = "Wind", prod_solaire = "Solar"),
labels = TRUE
) %>%
bb_colors_manual(
"prod_eolien" = "#41AB5D", "prod_hydraulique" = "#4292C6", "prod_solaire" = "#FEB24C"
) %>%
bb_y_grid(show = TRUE) %>%
bb_y_axis(
tick = list(format = suffix("TWh")),
label = list(text = "production (in terawatt-hours)", position = "outer-top")
) %>%
bb_legend(position = "inset", inset = list(anchor = "top-right")) %>%
bb_labs(
title = "Renewable energy production",
caption = "Data source: RTE (https://opendata.rte-france.com)"
)
## ----scatter------------------------------------------------------------------
billboarder() %>%
bb_scatterplot(
data = mtcars,
x = "wt", y = "mpg", group = "cyl",
point_opacity = 1
) %>%
# don't display all values on x-axis
bb_axis(x = list(tick = list(fit = FALSE))) %>%
bb_point(r = 5) %>%
# add grids
bb_x_grid(show = TRUE) %>%
bb_y_grid(show = TRUE)
## ----scatter-bubble-----------------------------------------------------------
billboarder(data = mtcars) %>%
bb_scatterplot(
mapping = aes(wt, mpg, group = cyl, size = scales::rescale(qsec, to = c(0.2, 7))),
point_opacity = 1
) %>%
bb_axis(x = list(tick = list(fit = FALSE))) %>%
bb_x_grid(show = TRUE) %>%
bb_y_grid(show = TRUE)
## ----pie----------------------------------------------------------------------
library(billboarder)
# data
data("prod_par_filiere")
nuclear2016 <- data.frame(
sources = c("Nuclear", "Other"),
production = c(
prod_par_filiere$prod_nucleaire[prod_par_filiere$annee == "2016"],
prod_par_filiere$prod_total[prod_par_filiere$annee == "2016"] -
prod_par_filiere$prod_nucleaire[prod_par_filiere$annee == "2016"]
)
)
# pie chart !
billboarder() %>%
bb_piechart(data = nuclear2016) %>%
bb_labs(title = "Share of nuclear power in France in 2016",
caption = "Data source: RTE (https://opendata.rte-france.com)")
## ----donut--------------------------------------------------------------------
billboarder() %>%
bb_donutchart(data = nuclear2016) %>%
bb_donut(
title = "Share of nuclear\nin France",
label = list(
format = JS("function(value, ratio, id) { return id + ': ' + d3.format('.0%')(ratio);}")
)
) %>%
bb_legend(show = FALSE) %>%
bb_labs(caption = "Data source: RTE (https://opendata.rte-france.com)")
## ----lines-date---------------------------------------------------------------
library(billboarder)
# data
data("equilibre_mensuel")
# line chart
billboarder() %>%
bb_linechart(
data = equilibre_mensuel[, c("date", "consommation", "production")],
type = "spline"
) %>%
bb_x_axis(tick = list(format = "%Y-%m", fit = FALSE)) %>%
bb_x_grid(show = TRUE) %>%
bb_y_grid(show = TRUE) %>%
bb_colors_manual("consommation" = "firebrick", "production" = "forestgreen") %>%
bb_legend(position = "right") %>%
bb_subchart(show = TRUE, size = list(height = 30)) %>%
bb_labs(title = "Monthly electricity consumption and production in France (2007 - 2017)",
y = "In megawatt (MW)",
caption = "Data source: RTE (https://opendata.rte-france.com)")
## ----lines-zoom1--------------------------------------------------------------
billboarder() %>%
bb_linechart(
data = equilibre_mensuel[, c("date", "consommation", "production")],
type = "spline"
) %>%
bb_x_axis(tick = list(format = "%Y-%m", fit = FALSE)) %>%
bb_x_grid(show = TRUE) %>%
bb_y_grid(show = TRUE) %>%
bb_colors_manual("consommation" = "firebrick", "production" = "forestgreen") %>%
bb_legend(position = "right") %>%
bb_zoom(
enabled = TRUE,
type = "drag",
resetButton = list(text = "Unzoom")
) %>%
bb_labs(title = "Monthly electricity consumption and production in France (2007 - 2017)",
y = "In megawatt (MW)",
caption = "Data source: RTE (https://opendata.rte-france.com)")
## ----lines-time---------------------------------------------------------------
library(billboarder)
# data
data("cdc_prod_filiere")
# Retrieve sunrise and and sunset data with `suncalc`
# library("suncalc")
# sun <- getSunlightTimes(date = as.Date("2017-06-12"), lat = 48.86, lon = 2.34, tz = "CET")
sun <- data.frame(
sunrise = as.POSIXct("2017-06-12 05:48:14"),
sunset = as.POSIXct("2017-06-12 21:55:32")
)
# line chart
billboarder() %>%
bb_linechart(
data = cdc_prod_filiere,
mapping = aes(date_heure, prod_solaire)
) %>%
bb_x_axis(tick = list(format = "%H:%M", fit = FALSE)) %>%
bb_y_axis(min = 0, padding = 0) %>%
bb_regions(
list(
start = as.numeric(cdc_prod_filiere$date_heure[1]) * 1000,
end = as.numeric(sun$sunrise)*1000
),
list(
start = as.numeric(sun$sunset) * 1000,
end = as.numeric(cdc_prod_filiere$date_heure[48]) * 1000
)
) %>%
bb_x_grid(
lines = list(
list(value = as.numeric(sun$sunrise)*1000, text = "sunrise"),
list(value = as.numeric(sun$sunset)*1000, text = "sunset")
)
) %>%
bb_labs(title = "Solar production (2017-06-12)",
y = "In megawatt (MW)",
caption = "Data source: RTE (https://opendata.rte-france.com)")
## ----area-stacked-------------------------------------------------------------
library(billboarder)
# data
data("cdc_prod_filiere")
billboarder() %>%
bb_linechart(
data = cdc_prod_filiere[, c("date_heure", "prod_eolien", "prod_hydraulique", "prod_solaire")],
type = "area"
) %>%
bb_data(
groups = list(list("prod_eolien", "prod_hydraulique", "prod_solaire")),
names = list("prod_eolien" = "Wind", "prod_hydraulique" = "Hydraulic", "prod_solaire" = "Solar")
) %>%
bb_legend(position = "inset", inset = list(anchor = "top-right")) %>%
bb_colors_manual(
"prod_eolien" = "#238443", "prod_hydraulique" = "#225EA8", "prod_solaire" = "#FEB24C",
opacity = 0.8
) %>%
bb_y_axis(min = 0, padding = 0) %>%
bb_labs(title = "Renewable energy production (2017-06-12)",
y = "In megawatt (MW)",
caption = "Data source: RTE (https://opendata.rte-france.com)")
## ----lines-range--------------------------------------------------------------
# Generate data
dat <- data.frame(
date = seq.Date(Sys.Date(), length.out = 20, by = "day"),
y1 = round(rnorm(20, 100, 15)),
y2 = round(rnorm(20, 100, 15))
)
dat$ymin1 <- dat$y1 - 5
dat$ymax1 <- dat$y1 + 5
dat$ymin2 <- dat$y2 - sample(3:15, 20, TRUE)
dat$ymax2 <- dat$y2 + sample(3:15, 20, TRUE)
# Make chart : use ymin & ymax aes for range
billboarder(data = dat) %>%
bb_linechart(
mapping = aes(x = date, y = y1, ymin = ymin1, ymax = ymax1),
type = "area-line-range"
) %>%
bb_linechart(
mapping = aes(x = date, y = y2, ymin = ymin2, ymax = ymax2),
type = "area-spline-range"
) %>%
bb_y_axis(min = 50)
## ----histo--------------------------------------------------------------------
billboarder() %>%
bb_histogram(data = rnorm(1e5), binwidth = 0.25) %>%
bb_colors_manual()
## ----histo-group--------------------------------------------------------------
# Generate some data
dat <- data.frame(
sample = c(rnorm(n = 1e4, mean = 1), rnorm(n = 1e4, mean = 2)),
group = rep(c("A", "B"), each = 1e4), stringsAsFactors = FALSE
)
# Mean by groups
samples_mean <- tapply(dat$sample, dat$group, mean)
# histogram !
billboarder() %>%
bb_histogram(data = dat, x = "sample", group = "group", binwidth = 0.25) %>%
bb_x_grid(
lines = list(
list(value = unname(samples_mean['A']), text = "mean of sample A"),
list(value = unname(samples_mean['B']), text = "mean of sample B")
)
)
## ----density-group------------------------------------------------------------
billboarder() %>%
bb_densityplot(data = dat, x = "sample", group = "group") %>%
bb_x_grid(
lines = list(
list(value = unname(samples_mean['A']), text = "mean of sample A"),
list(value = unname(samples_mean['B']), text = "mean of sample B")
)
)
|
2e9c0db30632e9abbedc34574c09f6fa0230dbe0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sloop/examples/otype.Rd.R
|
4847d2fa269350a110c485f5ed839cfc54434066
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 142
|
r
|
otype.Rd.R
|
library(sloop)
### Name: otype
### Title: Determine the type of an object
### Aliases: otype
### ** Examples
otype(1:10)
otype(mtcars)
|
8f71ebb4a01b56e2220cd698e31e47837558c11d
|
f67c710d5ff00df3dbafe5ca07e0447d7acdfe7e
|
/cachematrix.R
|
15fdd93808e0df5741679fd43e1344ef8eba64b3
|
[] |
no_license
|
RezaJalayer/datasciencecoursera
|
48968f789b6f4711a95b3df868140b044464445e
|
fa2a0941b12e8a978c9e9ae44c4aadf4a4f83e06
|
refs/heads/master
| 2020-03-17T12:19:10.451485
| 2018-05-26T18:02:16
| 2018-05-26T18:02:16
| 133,583,213
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 965
|
r
|
cachematrix.R
|
## This is to compute the inverse of a matrix efficiently.
## The efficiency comes from calculating the inverse only once until the matrix is changed
## This function gets a matrix and returns a list of 4 functions that will be used in the next function for inverse calculation
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInv <- function(inverse) inv <<- inverse
getInv <- function() inv
list(set = set, get = get,
setInv = setInv,
getInv = getInv)
}
## This function returns the matrix inverse by either returning the previuosly calculated inverse (no calculation) or calculating the inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInv(inv)
inv
}
|
5fd1e14b95eb9dec8ae292a2d3b7dd0cfdc4db4d
|
2011b48a27d5efd3b3267e565026d82019fd13cf
|
/Rscripts/01_ExtracciónValores.rsx
|
3670008b714e347c191f6c7bf30b86ebfd94f1c4
|
[
"MIT"
] |
permissive
|
klauswiese/QGIS-R
|
9203f367d6525a7b7c34e547ac6b4ea6d1cd0838
|
d4da3aa782427c082c82299b7374dfc79843f019
|
refs/heads/main
| 2023-09-06T07:13:07.192458
| 2021-10-28T13:51:11
| 2021-10-28T13:51:11
| 395,768,565
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 951
|
rsx
|
01_ExtracciónValores.rsx
|
##CTE534_ClasificacionSupervisada=group
##Imagen=raster
##Vector=vector
#Directorio=string
##Codigo=field Vector
##Usos=field Vector
#TablaUsos=output table
#CodigoUsos=output table
#EntrenamientoRaster=output raster
#Covariables=output raster
#Trainingbrick=output raster
##ValueTable=output table
#showplots
#showplots
Salida<-raster::extract(Imagen, Vector, df=TRUE)
Vector$ID<-row.names(Vector)
TablaUsos<-merge(Salida, Vector, x.by="ID", by.y="ID")
#Rasterizar
EntrenamientoRaster <- raster::rasterize(Vector, Imagen[[1]], field=Codigo)
#CoVariables cortadas
Covariables <- raster::mask(Imagen, EntrenamientoRaster)
#Training Brick
Trainingbrick <- raster::addLayer(Covariables, EntrenamientoRaster)
#Extraer todos los valores en una matriz
valuetable<- raster::getValues(Trainingbrick)
#Convertir a data frame
valuetable <- as.data.frame(valuetable)
#Eliminar los na
ValueTable <- na.omit(valuetable)
#usos
Usos <- unique(Vector[[Usos]])
|
0d26233157f7eaf7660fd83dd2adebd4a844d955
|
9262b19dac497b74eb35c29baf7daa2c41da69d3
|
/src/OptimzationLab.R
|
b7c26f65c6899127579168715e02f36e1d2a96de
|
[] |
no_license
|
uwesterr/CoronaPredict
|
dfde5c84b6041555684413b5ca45035a253dc4bd
|
8d2c1a6d3026a12a8f1bc99e6e00f50d6a202962
|
refs/heads/master
| 2021-04-17T15:41:41.005467
| 2020-05-18T19:40:29
| 2020-05-18T19:40:29
| 249,456,068
| 1
| 2
| null | 2020-05-15T07:58:45
| 2020-03-23T14:38:28
|
HTML
|
UTF-8
|
R
| false
| false
| 3,127
|
r
|
OptimzationLab.R
|
library(GA)
library(staTools)
library(shinyWidgets)
library(shinyalert)
library(writexl)
library(rlang)
library(DT)
library(modelr)
library(tidyr)
library(jsonlite)
library(shiny)
library(tidyverse)
library(lubridate)
library(zoo)
library(plotly)
library(readxl)
library(scales)
library(optimx)
source(file = "src/Rechenkern.R")
source(file = "src/createLandkreisR0_no_erfasstDf.R")
source(file = "src/createDfBundLandKreis.R")
source(file = "src/optimizerLoopingR0N0.R")
source(file = "src/helperForCovid19.R")
source(file = "src/Rechenkern.R")
source(file = "src/createLandkreisR0_no_erfasstDf.R")
source(file = "src/createDfBundLandKreis.R")
source(file = "src/optimizerLoopingR0N0.R")
source(file = "src/helperForCovid19.R")
load("data/inputExample.RData")
input <- isolate(reactiveValuesToList(inputExample))
vals <- list(Flag = "Bundesland")
input$BundeslandSelected <- "Thüringen"
r0_no_erfasstDf <- createLandkreisR0_no_erfasstDf(historyDfBundesLand, historyDfBund, regionSelected, vals, input,session)
dfRoNo <- r0_no_erfasstDf[[1]]
n0_erfasst_nom_min_max <- r0_no_erfasstDf[[2]]
R0_conf_nom_min_max <- r0_no_erfasstDf[[3]]
startDate <- r0_no_erfasstDf[[4]]
rechenDf_nom <- cbind(dfRoNo,n0_erfasst=n0_erfasst_nom_min_max$n0_erfasst_nom, R0 =R0_conf_nom_min_max$R0_nom)
calcPredictionsForOptimization = function(reduzierung_rt1, reduzierung_rt2, reduzierung_rt3, R0Opt, n0Opt, startDate, rechenDf_nom, input) {
#browser()
inputForOptimization <- input# to make setting reduzierung_rtx easy and fast
inputForOptimization$reduzierung_rt1 <- reduzierung_rt1
inputForOptimization$reduzierung_rt2 <- reduzierung_rt2
inputForOptimization$reduzierung_rt3 <- reduzierung_rt3
dfRechenKern <- (Rechenkern(rechenDf_nom, inputForOptimization, startDate))
metric <- calcMetric(dfRechenKern, rechenDf_nom)
cat("metric: ", metric)
return(metric) # minus because GA maximizes
}
calcMetric <- function(dfRechenKern, data){
res <- left_join(data, dfRechenKern %>% select(c(Tag, ErfassteInfizierteBerechnet)), by = c("MeldeDate" = "Tag")) %>% filter(!is.na(ErfassteInfizierteBerechnet))
# (sum(log10(res$SumAnzahl)
# - log10(res$ErfassteInfizierteBerechnet) )^2)/(nrow(data)-1)^0.5
# metric <- MPE((res$SumAnzahl), (res$ErfassteInfizierteBerechnet))
metric <- (sum(res$SumAnzahl - res$ErfassteInfizierteBerechnet )^2)^0.5
}
lower = c(10, 10,-20 )
upper = c(30, 30, 20)
# https://bergant.github.io/nlexperiment/flocking_bfgs.html
tic()
optimrRes <- optimr(c(30, 20,-0 ), function(x) calcPredictionsForOptimization(x[1], x[2], x[3], R0_conf_nom_min_max, n0_erfasst_nom_min_max, startDate, rechenDf_nom, input),
lower = lower, upper = upper, method="L-BFGS-B",control = list(maxit = 1, trace = 1))
toc()
proptimr(optimrRes)
optimrRes
optimrRes$par
GA <- ga(type = "real-valued",
fitness = function(x) calcPredictionsForOptimization(x[1], x[2], x[3], R0_conf_nom_min_max, n0_erfasst_nom_min_max, startDate, rechenDf_nom, input),
seed = 2020,
lower = lower, upper = upper,
popSize = 10, maxiter = 30, run = 5)
# browser()
|
8a7c03564985463c8b79a5d95f866f75b4f8c166
|
692e4a75bd72a4deba02328a10bade2b67a17d70
|
/ShinyApp/server.R
|
efd74726fdc0957a917b38af89a7d1db821e6d0d
|
[] |
no_license
|
reemabdullah/Visu_Superheroes
|
96d367f8c28843fb872045efcba5eff299249a5a
|
e4e484e1e9b9c98c4f83ac8061175423509ce76f
|
refs/heads/master
| 2021-03-29T14:29:10.719568
| 2020-03-23T15:59:51
| 2020-03-23T15:59:51
| 247,957,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
Superheroes <- read.csv("superheroes.csv")
output$distPlot <- renderPlot({
ggplot(Superheroes, aes(x = .data[[input$var1]], y = .data[[input$var2]]))+
theme_bw() +
geom_point() +
geom_smooth() +
labs(title = paste("Corrélation entre", as.character(input$var1),"et", as.character(input$var2)),
x = as.character(input$var1),
y = as.character(input$var2))
})
})
|
580d8f3ab76fa65e8ad8e1901bc194344675b611
|
38988692dd3e89dcc6fde5a0c48cfc98a3d16f51
|
/tests/testthat/test-ct_search.R
|
5fbd1b514c259959da4278c91014fa06d87d43b1
|
[] |
no_license
|
rtaph/comtradr
|
7c8ae748b872b0110f556443876fbbee8526b223
|
8acf1e0a76173940ffff3ecd815da80d48371450
|
refs/heads/master
| 2021-01-19T15:55:32.179919
| 2017-08-09T03:56:31
| 2017-08-09T03:56:31
| 100,980,391
| 1
| 0
| null | 2017-08-21T18:21:42
| 2017-08-21T18:21:41
| null |
UTF-8
|
R
| false
| false
| 5,862
|
r
|
test-ct_search.R
|
context("ct_search")
# All tests on the expected return data.
test_that("search return values are correct, and fail when expected", {
#skip_on_cran()
#skip_on_travis()
countrydf <- ct_countries_table()
# Get monhtly data on all German imports into Canada,
# 2011-01-01 thru 2011-05-01.
ex1 <- ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
freq = "monthly",
startdate = "2011-01-01",
enddate = "2011-05-01")
# Get yearly data on Canadian shrimp exports into Germany and Thailand,
# for all years on record.
shrimp_codes <- c("030613",
"030623",
"160520",
"160521",
"160529")
ex2 <- ct_search(reporters = "USA",
partners = c("Germany", "Thailand"),
countrytable = countrydf,
tradedirection = "exports",
freq = "annual",
startdate = "all",
enddate = "all",
commodcodes = shrimp_codes)
## ex1 tests
# Data type.
expect_is(ex1, "list")
expect_is(ex1$data, "data.frame")
# Number of variables.
expect_equal(ncol(ex1$data), 35)
# Variable "Reporter".
expect_equal(unique(ex1$data$Reporter), "Canada")
# Variable "Partner".
expect_equal(unique(ex1$data$Partner), "Germany")
# Variable "Trade Flow".
expect_equal(unique(ex1$data$`Trade Flow`), "Imports")
# Variable "Period".
expect_equal(ex1$data$Period[1], 201101)
# Variable "Commodity Code".
expect_equal(unique(ex1$data$`Commodity Code`), "TOTAL")
## ex2 tests
# Data type.
expect_is(ex2, "list")
expect_is(ex2$data, "data.frame")
# Number of variables.
expect_equal(ncol(ex2$data), 35)
# Variable "Reporter".
expect_equal(unique(ex2$data$Reporter), "USA")
# Variable "Partner".
expect_equal(sort(unique(ex2$data$Partner)), c("Germany", "Thailand"))
# Variable "Trade Flow".
expect_equal(unique(ex2$data$`Trade Flow`), "Export")
# Variable "Period".
expect_equal(sort(unique(ex2$data$Period))[1:3], c(1991, 1992, 1993))
# Variable "Commodity Code".
expect_equal(sort(unique(ex2$data$`Commodity Code`)), shrimp_codes)
# Variable "Netweight (kg)".
expect_is(ex2$data$`Netweight (kg)`, "integer")
## Check that ct_search is failing as expected.
# Throw error with invalid input for param "reporters".
expect_error(ct_search(reporters = "invalid_reporter",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports"))
# Throw error with invalid input for param "partners".
expect_error(ct_search(reporters = "Canada",
partners = "invalid_partner",
countrytable = countrydf,
tradedirection = "imports"))
# Throw error with invalid input for param "countrytable".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = data.frame(),
tradedirection = "imports"))
# Throw error with invalid input for param "tradedirection".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "invalid_td"))
# Throw error with invalid input for param "type".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
type = "invalid_type"))
# Throw error with invalid input for param "freq".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
freq = "invalid_freq"))
# Throw error with invalid input for params "startdate" and "endate".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
freq = "monthly",
startdate = "1/1/2011",
enddate = "5/1/2011"))
# Returned error msg from the API with invalid input for param "commodcodes".
ex1 <- ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
commodcodes = "invalid_codes")
expect_equal(ex1$details, "invalid_codes is an invalid commodity code.")
# Throw error with invalid input for param "fmt".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
fmt = "invalid_fmt"))
# Throw error with invalid input for param "colname".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
colname = "invalid_fmt"))
# Throw error with invalid input for param "codetype".
expect_error(ct_search(reporters = "Canada",
partners = "Germany",
countrytable = countrydf,
tradedirection = "imports",
codetype = "invalid_codetype"))
})
|
79b989879b09ccd56cef092a398cf55200cdbdfb
|
5874ae0a22213c3a692763f285a64433bd512f94
|
/R/d3_R_eg1.R
|
e818fa36b305a2292618690e89dd3900d63b98a2
|
[] |
no_license
|
d8aninja/code
|
8356486291a2db9f419549edaa525d9bbe39abfc
|
80a836db49a31ecd6d0e62aaf8b6e4417c49df68
|
refs/heads/master
| 2021-09-06T11:35:13.956195
| 2018-02-06T04:11:33
| 2018-02-06T04:11:33
| 80,898,202
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,134
|
r
|
d3_R_eg1.R
|
library(xml2)
library(htmltools)
library(rvest)
library(dplyr)
library(sunburstR)
url <- "http://www.rolltide.com/services/responsive-roster-bio.ashx?type=stats&rp_id=3153&path=football&year=2016&player_id=0"
ridley <- read_html(url)
#
games <- ridley %>%
html_node('table') %>%
html_nodes('tbody tr th') %>%
html_text()
run_yards <- ridley %>%
html_node('table') %>%
html_nodes('td:nth-child(4)') %>%
html_text() %>%
purrr::map_dbl(as.numeric)
pass_yards <- ridley %>%
html_node('table') %>%
html_nodes('td:nth-child(8)') %>%
html_text() %>%
purrr::map_dbl(as.numeric)
ridley_df <- data.frame(
game = games,
run = run_yards[-length(run_yards)],
pass = pass_yards[-length(pass_yards)],
stringsAsFactors = FALSE
) %>%
tidyr::gather(type, value, -game)
(
sb <- ridley_df %>%
mutate(path = paste(type, game, sep="-")) %>%
select(path,value) %>%
sunburstR::sunburst(percent=TRUE, count=TRUE)
)
# now build the e2d3 dot-bar chart
# see https://github.com/timelyportfolio/e2d3R/blob/master/prototype.R
e2d3 <- htmlDependency(
name = "e2d3",
version = "0.6.4",
src = c(href = "https://cdn.rawgit.com/timelyportfolio/e2d3/master/dist/lib"),
script = "e2d3.js"
)
# make a function for now as convenience
# to allow R data.frame in proper format
# but eventually rewrite e2-dot-bar with arguments
# to allow other column names for the hierarchy
e2d3_dot_builder <- function(data = NULL) {
browsable(
attachDependencies(
tagList(
tags$div(
id = "chart"
),
tags$script(HTML(
sprintf(
"
var root = document.getElementById('chart');
var data = '%s';
%s
var dim = { width: 600, height: 400 };
var margin = { top: 30, bottom: 50, left: 50, right: 20 };
var inputHeight = 20;
var numberFormat = d3.format('.0f');
dim.graphWidth = dim.width - margin.left - margin.right;
dim.graphHeight = dim.height - margin.top - margin.bottom;
require(['e2d3model'],function(model){
var rows = d3.csv.parseRows(data);
update(new model.ChartDataTable(rows));
})
",
paste0(
capture.output(write.csv(data, row.names=FALSE)),
collapse="\\n"
),
paste0(
readLines("C:\\Users\\jvangeete\\Google Drive\\Code\\R\\d3js\\dot-bar-chart\\d3.min.js"),
collapse="\n"
)
)
))
),
list(e2d3)
)
)
}
e2db <- ridley_df %>%
tidyr::spread(type, value) %>%
mutate(Year = game) %>%
select(Year,everything()) %>%
select(-game) %>%
e2d3_dot_builder()
browsable(
tagList(
tags$h1(
"University of Alabama | ",
tags$a(
href="http://www.rolltide.com/roster.aspx?rp_id=556",
"Calvin Ridley"
)
),
tags$h3("Sunburst from sunburstR"),
sb,
tags$h3("e2d3 Dot Bar Chart"),
e2db
)
)
|
d7d7f3bd685d70af9c1dbd356bcd6b9ab7090b7d
|
56878bcfe97163e0320567ae1c762087fefecfc0
|
/Untitled.R
|
7360cb135b38d1d05ac8721b98bf14b647cc294e
|
[] |
no_license
|
beehoover/EcoDataSci
|
78e2d0eb91fcd039416eda1716cff255fc8c0cd4
|
4cbd17f43d91f4849c37fa5c6608ede46b839a89
|
refs/heads/master
| 2020-04-28T12:21:49.673074
| 2019-03-12T19:51:19
| 2019-03-12T19:51:19
| 175,273,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 199
|
r
|
Untitled.R
|
#merging my datasets
old_data<-read.csv(here("raw_data", "iris.csv"))
new_data<-read.csv(here("raw_data", "iris_mythica.csv"))
data<-rbind(old_data, new_data)
write.csv(data, file="iris_four.csv")
|
0a068f6a0707b66edc88f8a3a1597405cecbf155
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/h2o.ifelse.Rd
|
ab562d397dfe215a5e70900c6c85e3cccf271a45
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,079
|
rd
|
h2o.ifelse.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frame.R
\name{h2o.ifelse}
\alias{h2o.ifelse}
\alias{ifelse}
\title{H2O Apply Conditional Statement}
\usage{
h2o.ifelse(test, yes, no)
ifelse(test, yes, no)
}
\arguments{
\item{test}{A logical description of the condition to be met (>, <, =, etc...)}
\item{yes}{The value to return if the condition is TRUE.}
\item{no}{The value to return if the condition is FALSE.}
}
\value{
Returns a vector of new values matching the conditions stated in the ifelse call.
}
\description{
Applies conditional statements to numeric vectors in H2O parsed data objects when the data are
numeric.
}
\details{
Both numeric and categorical values can be tested. However when returning a yes and no condition
both conditions must be either both categorical or numeric.
}
\examples{
\dontrun{
library(h2o)
h2o.init()
australia_path <- system.file("extdata", "australia.csv", package = "h2o")
australia <- h2o.importFile(path = australia_path)
australia[, 9] <- ifelse(australia[, 3] < 279.9, 1, 0)
summary(australia)
}
}
|
adfcd7daae3c6158fb5151e006bfdb3db37b0663
|
d46e60e1246b3899bf6f91ddb62fe01f9fc568b4
|
/Project2/plot3.R
|
937454f9591f4ef2d8fdf9005da886c059ef70a0
|
[] |
no_license
|
BLKhoo/Exploratory-Data-Analysis
|
deaf72172f1fd6c17f20c26851e4f7c06eb1220e
|
b478cdcd805d4745da2f32fce7f852111f9f5438
|
refs/heads/master
| 2016-08-04T18:57:20.484337
| 2015-07-26T19:42:32
| 2015-07-26T19:42:32
| 38,964,175
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 498
|
r
|
plot3.R
|
# Read the R object data
NEI <- readRDS("data//summarySCC_PM25.rds")
SCC <- readRDS("data//Source_Classification_Code.rds")
BC <- subset(NEI, fips =="24510") # select only Baltimore City, Maryland records
ggplot(BC, aes(x=factor(year),y=Emissions)) + geom_bar(stat="identity") + xlab("Year") +
ylab(expression('PM'[2.5])) + facet_grid( . ~ type ) +
labs(title="Baltimore City, Maryland Emissions across four sources")
dev.copy(png,file="plot3.png",height = 480, width = 480)
dev.off()
|
8d9d9227ae219917a1251b1199458fe737c310fd
|
df2efa599c39310e4c6c52be6d17163828b04aa4
|
/R Programming/rprog-data-specdata/corr.R
|
7e0f16ca973cff7e88ccb35b9a8bd8441970c95b
|
[] |
no_license
|
pappjr/datasciencecoursera
|
ba6f7dd7294d68d05879a8252fdd92f794200947
|
bb77b89e6ab8962651f73611d9ddc4b3b1a8382b
|
refs/heads/master
| 2021-01-13T02:26:30.963874
| 2015-08-21T23:42:07
| 2015-08-21T23:42:07
| 38,457,637
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 761
|
r
|
corr.R
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
## NOTE: Do not round the result!
comp <- complete(directory)
keep <- comp[,2] > threshold
fs <- list.files(directory)
fs <- fs[keep]
n <- vector(mode="numeric", length=0)
for(i in fs){
f <- file.path(directory,i)
d <- read.csv(f)
n <- rbind(n,cor(d$sulfate,d$nitrate, use="complete.obs"))
}
n
}
|
131ddddd40931759a3a68a6ecb0776596f9d0093
|
a361f14c000fc1c153eaeb5bf9f4419951c7e3aa
|
/tests/testthat/test-covars_make.R
|
b8059ff197fef4a1ac5cee37ad6621f0372eaf28
|
[] |
no_license
|
kbenoit/sophistication
|
0813f3d08c56c1ce420c7d056c10c0d8db4c420e
|
7ab1c2a59b41bd9d916383342b6ace23df2b1906
|
refs/heads/master
| 2021-06-04T23:19:22.944076
| 2021-05-08T13:40:26
| 2021-05-08T13:40:26
| 101,664,780
| 43
| 7
| null | 2020-08-25T11:19:22
| 2017-08-28T16:40:21
|
R
|
UTF-8
|
R
| false
| false
| 5,265
|
r
|
test-covars_make.R
|
context("test covariate creation")
test_that("covars_make works as expected", {
# 6 4 words
# 1 2 3 4 5 6 7 8 9 10 1 2 3 4 5 6 7 syllables
# 3 + 5 + 6 + 3 + 3 + 11 + 3 + 6 + 5 + 8 characters
txt <- "The silly ginger cat ate gingerbread. The gopher drank lemonade."
result <- covars_make(txt)
expect_equal(result$meanSentenceLength, (6 + 4)/2)
expect_equal(result$meanWordSyllables, 17 / (6 + 4))
expect_equal(result$meanWordChars, sum(nchar(as.character(quanteda::tokens(txt, remove_punct = TRUE)))) / (6 + 4))
expect_equal(result$meanSentenceChars, sum(nchar(as.character(quanteda::tokens(txt, remove_punct = TRUE)))) / 2)
expect_equal(result$meanSentenceSyllables, (10 + 7) / 2)
expect_equal(result$W3Sy, 2 / 10)
expect_equal(result$W2Sy, 5 / 10)
expect_equal(result$W2Sy, 5 / 10)
expect_equal(result$W6C, 4 / 10)
expect_equal(result$Wlt3Sy, 8 / 10)
# note: Dale-Chall is the proportion of words NOT in the Dale-Chall list
expect_equal(
result$W_wl.Dale.Chall,
length(which(! quanteda::char_tolower(as.character(quanteda::tokens(txt, remove_punct = TRUE))) %in%
quanteda.textstats::data_char_wordlists$dalechall)) / 10
)
})
txt <- c(test1 = "One two cat. One two cat. Always eat apples.")
frompredict <- as.data.frame(sophistication:::get_covars_from_newdata.character(txt))
test_that("meanSentenceChars computed the same in predict v component function", {
# should be: (9 + 9 + 15) / 3 = 11
expect_equivalent(covars_make(txt)[, c("meanSentenceChars")], 11)
expect_equivalent(frompredict[, "meanSentenceChars"], 11)
})
test_that("google_min_2000 computed the same in predict v component function", {
expect_equal(
covars_make_baselines(txt)[, c("google_min_2000")],
frompredict[, "google_min"]
)
})
test_that("meanWordChars computed the same in predict v component function", {
# should be: ((3 + 3 + 3) * 2 + 6 + 3 + 6) / 9 = 3.6667
expect_equal(as.numeric(covars_make(txt)[, c("meanWordChars")]), 3.6667, tol = .0001)
expect_equal(as.numeric(frompredict[, "meanWordChars"]), 3.6667, tol = .0001)
})
test_that("pr_noun computed the same in predict v component function", {
# should be: (1 + 1 + 1) / 9 = 0.33333
# doc_id sentence_id token_id token lemma pos entity
# 1 test1 1 1 One one NUM CARDINAL_B
# 2 test1 1 2 two two NUM CARDINAL_B
# 3 test1 1 3 cat cat NOUN
# 4 test1 1 4 . . PUNCT
# 5 test1 1 5 SPACE
# 6 test1 2 1 One one NUM CARDINAL_B
# 7 test1 2 2 two two NUM CARDINAL_B
# 8 test1 2 3 cat cat NOUN
# 9 test1 2 4 . . PUNCT
# 10 test1 2 5 SPACE
# 11 test1 3 1 Always always ADV
# 12 test1 3 2 eat eat VERB
# 13 test1 3 3 apples apple NOUN
# 14 test1 3 4 . . PUNCT
expect_equal(covars_make_pos(txt)[, c("pr_noun")], 0.333, tol = .001)
expect_equal(frompredict[, "pr_noun"], 0.333, tol = .001) # 0.214
})
test_that("paper example texts are correctly computed", {
library("quanteda")
txt_clinton <- "If we do these things---end social promotion; turn around failing schools; build modern ones; support qualified teachers; promote innovation, competition and discipline - then we will begin to meet our generation's historic responsibility to create 21st century schools. Now, we also have to do more to support the millions of parents who give their all every day at home and at work."
txt_bush <- "And the victory of freedom in Iraq will strengthen a new ally in the war on terror, inspire democratic reformers from Damascus to Tehran, bring more hope and progress to a troubled region, and thereby lift a terrible threat from the lives of our children and grandchildren. We will succeed because the Iraqi people value their own liberty---as they showed the world last Sunday."
corp_example <- corpus(c(Clinton_1999 = txt_clinton, Bush_2005 = txt_bush))
frompredict <- as.data.frame(sophistication:::get_covars_from_newdata.corpus(corp_example))
row.names(frompredict) <- docnames(corp_example)
expect_equivalent(
covars_make(corp_example)[, "meanSentenceChars"],
frompredict["meanSentenceChars"]
)
expect_equivalent(
covars_make_baselines(corp_example)["google_min_2000"],
frompredict["google_min"]
)
expect_equivalent(
covars_make(corp_example)[, "meanWordChars"],
frompredict["meanWordChars"],
tol = .016
)
expect_equivalent(
covars_make_pos(corp_example)[, "pr_noun", drop = FALSE],
frompredict["pr_noun"]
)
})
|
d6929cc572eb7b95dbbc4f29871ed89f0c16d3f8
|
d475ef97805a1b25f00b1f1aea15ae2abd44768b
|
/load_data.r
|
089bc0379f6c283201ebd10b6f43d6b05c740f6a
|
[] |
no_license
|
MaximeJumelle/Prediction-of-air-quality-in-Paris-subway
|
f48e32c495f4ac793a944b123a00aa36afffe84e
|
8fc68905ccdbecece3a58f34d75523727049435e
|
refs/heads/master
| 2021-05-06T19:02:30.929248
| 2017-12-22T20:59:29
| 2017-12-22T20:59:29
| 112,007,751
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 976
|
r
|
load_data.r
|
# Maxime JUMELLE
# Youva MANSOUR
# Wassil BENTARKA
# You must install this package to read the dataset file
#install.packages("rjson")
library(purrr)
library(ggplot2)
dataset<-read.csv("/home/maxime/Documents/R/TSA/data/dataset.csv", sep=";", header= TRUE)
dataset<-dataset[order(as.Date(dataset$DATE, format="%d/%m/%Y")),]
dataset$TEMP<-as.numeric(dataset$TEMP)
dataset$HUMI<-as.numeric(dataset$HUMI)
dataset$NO<-as.numeric(dataset$NO)
dataset$NO2<-as.numeric(dataset$NO2)
dataset$PM10<-as.numeric(dataset$PM10)
dataset$CO2<-as.numeric(dataset$CO2)
dates<-as.Date(dataset$DATE, "%d/%m/%Y")
par(mfrow=c(1,1))
jumpValue<-50
index<-1
datesJump<-c()
tempJump<-c()
while (index < length(dates))
{
datesJump<-c(datesJump, dates[index])
tempJump<-c(tempJump, dataset$TEMP[index])
index<-index + jumpValue
}
df <- data.frame(x = dates[0:35039],y=dataset$PM10[0:35039])
#df <- data.frame(x = datesJump,y=tempJump)
ggplot(df,aes(x=x,y=y)) + geom_line(position = 'jitter')
|
84280eda30b932a93e65859cf9d688d2184a90cc
|
6ac1931eeba6b0e0e4e073d67a3167b9186bfed4
|
/R/lrEMplus.R
|
a24ab795a42786e5ca81325b0f5e7309f58bd15b
|
[] |
no_license
|
Japal/zCompositions
|
cdebd661ee5c2a5bf24e384d03a9df2b1abf0924
|
773f2afe00311c676cc88bcd8ff47b6bb43db9e5
|
refs/heads/master
| 2023-09-01T05:55:34.994363
| 2023-08-23T17:47:23
| 2023-08-23T17:47:23
| 23,953,744
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,100
|
r
|
lrEMplus.R
|
lrEMplus <- function(X, dl = NULL, rob = FALSE, ini.cov = c("complete.obs", "multRepl"), frac = 0.65,
tolerance = 0.0001, max.iter = 50,
rlm.maxit=150, suppress.print = FALSE, closure=NULL, z.warning=0.8, delta=NULL){
if (any(X<0, na.rm=T)) stop("X contains negative values")
if (is.character(dl) || is.null(dl)) stop("dl must be a numeric vector or matrix")
if (is.vector(dl)) dl <- matrix(dl,nrow=1)
dl <- as.matrix(dl) # Avoids problems when dl might be multiple classes
if ((is.vector(X)) | (nrow(X)==1)) stop("X must be a data matrix")
if (ncol(dl)!=ncol(X)) stop("The number of columns in X and dl do not agree")
if ((nrow(dl)>1) & (nrow(dl)!=nrow(X))) stop("The number of rows in X and dl do not agree")
if (any(is.na(X))==FALSE) stop("No missing data were found in the data set")
if (any(X==0, na.rm=T)==FALSE) stop("No zeros were found in the data set")
if (!missing("delta")){
warning("The delta argument is deprecated, use frac instead: frac has been set equal to delta.")
frac <- delta
}
ini.cov <- match.arg(ini.cov)
gm <- function(x, na.rm=TRUE){
exp(sum(log(x), na.rm=na.rm) / length(x))
}
## Preliminaries ----
X <- as.data.frame(X,stringsAsFactors=TRUE)
nn <- nrow(X); D <- ncol(X)
X <- as.data.frame(apply(X,2,as.numeric),stringsAsFactors=TRUE)
c <- apply(X,1,sum,na.rm=TRUE)
# Number of zeros or missing per column for warning
checkNumZerosCol <- apply(X,2,function(x) sum(is.na(x) | (x==0)))
if (any(checkNumZerosCol/nrow(X) >= z.warning)) {
cases <- which(checkNumZerosCol/nrow(X) >= z.warning)
X <- X[,-cases]
warning(paste("Column ",cases," containing more than ",z.warning*100,"% zeros/unobserved values was deleted (pre-check out using function zPatterns/modify threshold using argument z.warning).\n",sep=""))
}
checkNumZerosRow <- apply(X,1,function(x) sum(is.na(x) | (x==0)))
if (any(checkNumZerosRow/ncol(X) >= z.warning)) {
cases <- which(checkNumZerosRow/ncol(X) >= z.warning)
X <- X[-cases,]
warning(paste("Row ",cases," containing more than ",z.warning*100,"% zeros/unobserved values was deleted (pre-check out using function zPatterns/modify threshold using argument z.warning).\n",sep=""))
}
if (nrow(dl)==1) dl <- matrix(rep(1,nn),ncol=1)%*%dl
# Check for closure
closed <- 0
if (all( abs(c - mean(c)) < .Machine$double.eps^0.3 )) closed <- 1
if (sum(is.na(X)) > sum(X==0,na.rm=T)){
X.old <- X
# Initial simple imputation of zero
for (i in 1:nn){
if (any(X.old[i, ]==0,na.rm=T)){
z <- which(X.old[i, ]==0)
X.old[i,z] <- frac*dl[i,z]
}
}
# Initial lrEM imputation of missing data
X.old <- lrEM(X.old, label = NA, imp.missing = TRUE, ini.cov = ini.cov, rob = rob,
tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit,
suppress.print = TRUE, closure = closure)
}
if (sum(is.na(X)) <= sum(X==0,na.rm=T)){
X.old <- X
# Initial ordinary geo mean imputation of missing (ignores 0s in the column if any)
gmeans <- apply(X.old,2,function(x) gm(x[x!=0]))
for (i in 1:nn){
if (any(is.na(X.old[i, ]))){
z <- which(is.na(X.old[i, ]))
X.old[i,z] <- gmeans[z]
}
}
# Initial lrEM imputation of zeros
X.old <- lrEM(X.old, label = 0, dl = dl, ini.cov = ini.cov, rob = rob,
tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit,
suppress.print = TRUE, closure = closure)
}
# Initial parameter estimates
X.old_alr <- log(X.old)-log(X.old[,D])
X.old_alr <- as.matrix(X.old_alr[,-D])
M.old <- matrix(colMeans(X.old_alr),ncol=1)
C.old <- cov(X.old_alr)
iter_again <- 1
niters <- 0
while (iter_again == 1){
niters <- niters+1
if (niters > 1) {X.old <- X.new; M.old <- M.new; C.old <- C.new}
X.old <- as.matrix(X.old)
X.old[which(X==0)] <- 0
X.new <- lrEM(X.old, label = 0, dl = dl, ini.cov = ini.cov, rob = rob,
tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit, suppress.print = TRUE,
closure = closure)
X.new[is.na(X)] <- NA
X.new <- lrEM(X.new, label = NA, imp.missing = TRUE, ini.cov = ini.cov, rob = rob,
tolerance = tolerance, max.iter = max.iter, rlm.maxit = rlm.maxit, suppress.print = TRUE,
closure = closure)
X.new_alr <- log(X.new)-log(X.new[,D])
X.new_alr <- as.matrix(X.new_alr[,-D])
M.new <- matrix(colMeans(X.new_alr),ncol=1)
C.new <- cov(X.new_alr)
# Convergence check
Mdif <- max(abs(M.new-M.old))
Cdif <- max(max(abs(C.new-C.old)))
if ((max(c(Mdif,Cdif)) < tolerance) | (niters == max.iter)) iter_again <- 0
}
## Final section ----
if (closed==1) X.new <- t(apply(X.new,1,function(x) x/sum(x)*c[1])) # If not closed lrEM above takes care of it
if (suppress.print==FALSE) cat(paste("No. iterations to converge: ",niters,"\n\n"))
return(as.data.frame(X.new,stringsAsFactors=TRUE))
}
|
a6a01b11f4b62d98873faf8bc13d05fe5a1fcabd
|
f91369d3ff4584d909ff5f0f4be382e54594d95c
|
/man/make_double_pipe.Rd
|
fa186271d28eb3fc94e0a4bf79413184c6076814
|
[
"Apache-2.0"
] |
permissive
|
Novartis/tidymodules
|
e4449133f5d299ec7b669b02432b537de871278d
|
daa948f31910686171476865051dcee9e6f5b10f
|
refs/heads/master
| 2023-03-06T01:18:55.990139
| 2023-02-23T15:01:28
| 2023-02-23T15:01:28
| 203,401,748
| 147
| 13
|
NOASSERTION
| 2020-04-02T16:09:32
| 2019-08-20T15:16:40
|
R
|
UTF-8
|
R
| false
| true
| 620
|
rd
|
make_double_pipe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipes.R
\name{make_double_pipe}
\alias{make_double_pipe}
\title{make_double_pipe: Pipe function generator}
\usage{
make_double_pipe(l, r, f = FALSE, rev = FALSE)
}
\arguments{
\item{l}{Left module.}
\item{r}{Right module.}
\item{f}{fast boolean. Used with the '>>' or '<<' signs to return the item at the opposite of the arrow.}
\item{rev}{Reverse operation. Boolean that indicates if this is a reverse operation, i.e. '<' or '<<'.}
}
\description{
Create a pipe function for mapping a module output to a module input
}
\keyword{internal}
|
e04bb0a921a448b5a5bf16c6b181cc7c32025582
|
924108a0fc572ad2cbad4230c582f19e2164fd00
|
/server.R
|
d294c6f179662af868b1ba24439c8b8f6e23578d
|
[] |
no_license
|
Athiette/diceThresholdometer
|
9180778736e9cc13d52f76b696bc5f73efcb829f
|
5d69c0f987ebd0c34799e9596e59f16599f47f79
|
refs/heads/master
| 2021-05-28T01:42:14.892817
| 2014-07-26T23:46:06
| 2014-07-26T23:46:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 722
|
r
|
server.R
|
library(shiny)
chanceSuccess <- function(nD6,nSuccess,Type){
chance <- 0
if (Type == "4s") {
for (n in nSuccess:nD6) {
chance <- chance + (choose(nD6,n)*(3/6)^(n)*(3/6)^(nD6 - n))
}
}
if (Type == "5s") {
for (n in nSuccess:nD6) {
chance <- chance + (choose(nD6,n)*(2/6)^(n)*(4/6)^(nD6 - n))
}
}
if (Type == "6s") {
for (n in nSuccess:nD6) {
chance <- chance + (choose(nD6,n)*(1/6)^(n)*(5/6)^(nD6 - n))
}
}
return(chance)
}
shinyServer(
function(input, output) {
output$onD6 <- renderPrint({input$inD6})
output$onSuccess <- renderPrint({input$inSuccess})
output$oProb <- renderPrint({chanceSuccess(input$inD6,input$inSuccess,input$iType)})
}
)
|
92ed85222b45948358c2e3b2d49a47b16ee98bb4
|
206aab419371d80bab70ab01ef9a7f78e3d8232f
|
/man/get_data.Rd
|
f8919e72030f0ea3e291b72e93853c24dadef76c
|
[] |
no_license
|
anubhav-dikshit/rLab5
|
c09b5e4913887b38849b5e6deddefc983191c969
|
c6ab9f3dc2381206960b2b6221fdbd3670652b80
|
refs/heads/master
| 2020-03-29T23:54:20.340409
| 2018-10-01T10:01:19
| 2018-10-01T10:01:19
| 150,486,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 522
|
rd
|
get_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_data.R
\name{get_data}
\alias{get_data}
\title{Title Function to Get Weather Data for a given city}
\usage{
get_data(input_city, forecast_days)
}
\arguments{
\item{input_city}{The name of the city that weather needs to be fetched}
\item{forecast_days}{The number of days that need to be forecasted}
}
\value{
current_weather_data
}
\description{
Title Function to Get Weather Data for a given city
}
\examples{
get_data("Bangalore", 1)
}
|
ac222baffb43091920fbca14dca95e0b3d6f99d9
|
39da6ebcf2d578230dbaf05713864757ac85da0c
|
/Plot2.R
|
d434deeda59fe898d5f81a6e1d5f10b89659b214
|
[] |
no_license
|
HuichunChien/ExData_Plotting1
|
6b106fa75d6f4ce4528e5ab564260b5a418941cb
|
b7d78d1c5c1d0323fdbc2e32122a4514b87056e6
|
refs/heads/master
| 2021-01-17T15:49:20.792191
| 2014-10-12T22:57:34
| 2014-10-12T22:57:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 857
|
r
|
Plot2.R
|
# Plot2.R
data=read.table("household_power_consumption.txt", na.strings = "?", sep=";", header=TRUE)
#get subdata with data are 1/2/2007 and 2/2/2007
subdata<-subset(data,data$Date=='1/2/2007'|data$Date=='2/2/2007', select=c(Date,Time,Global_active_power,Global_reactive_power,Voltage,Global_intensity ,Sub_metering_1,Sub_metering_2,Sub_metering_3))
subdata$Date<-as.Date(subdata$Date,format="%d/%m/%Y")
datetime<-(paste(subdata$Date,subdata$Time, sep=" "))
subdata$datetime<-as.factor(datetime)
# plot plot2.png
png("plot2.png", height=480, width=480)
plot(subdata$datetime,subdata$Global_active_power,type="l", xaxt="n", ylab="Global active power(kilowatts)")
lines(subdata$datetime,subdata$Global_active_power,ylab="Global Active Power (kilowatts)")
axis(1,at=c(1,length(subdata$datetime)/2,length(subdata$datetime)),lab=c("Thu","Fri","Sat"))
dev.off()
|
bad974b57e9e3a78377db4f7dcab29b8dff3dd33
|
69d96f973b5e3a782d1e6ea05ad32e15db00f762
|
/2_covs/4_read_campus.R
|
791432327a89e61281f8e694cfe4022b5757be2a
|
[] |
no_license
|
meghapsimatrix/retention
|
5d639e8149baa7cf2c8d6fb7152663866a3b28b0
|
fbed5e5715be27d060d423c8b7890ce6f3fd3b0d
|
refs/heads/master
| 2022-12-06T07:59:00.324608
| 2020-09-02T19:39:23
| 2020-09-02T19:39:23
| 278,442,784
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 959
|
r
|
4_read_campus.R
|
library(tidyverse)
# Read data ---------------------------------------------------------------
# get to the TEA folder and extract all files with class in it
files <- list.files("NewFilesReleased/TEA", pattern = "p_campus", full.names = TRUE)
files <- files[!str_detect(files, "f")][10:19]
# go through each files and read in the data and select particular columns
read_dat <- function(path, type){
dat <- read_delim(path, delim = type)
return(dat)
}
# create a tibble (modern data frame) with path and separator (comma or tab)
params <- tibble(path = files, type = c(rep("\t", 8), ","))
# for each file read in the data, create a big data frame of all the datasets together
campus_dat <- params %>%
mutate(
res = pmap(., .f = read_dat)
) %>%
unnest(cols = res)
# parse out the year from the path
campus_dat <- campus_dat %>%
mutate(year = parse_number(path))
save(campus_dat, file = "Revised Datasets/R/campus_dat.RData")
|
bd6079d2720bc103b6976ae39f04b22c49cf468c
|
66b97061d0e512b898cdc5736da6ac1d98edd644
|
/R/setMBOControlInfill.R
|
8b96962d4f87936ec4f0e72e590d395500d872a6
|
[] |
no_license
|
jakobbossek/mlrMBO
|
3e28a1de56dda76bac7f8974a3c73b9903c82d14
|
d383ea239591c41717c2a861424d642fb78dfdbe
|
refs/heads/master
| 2021-01-18T08:21:44.437766
| 2016-02-09T22:02:56
| 2016-02-09T22:02:56
| 51,073,599
| 1
| 0
| null | 2016-02-04T12:07:42
| 2016-02-04T12:07:40
|
R
|
UTF-8
|
R
| false
| false
| 13,573
|
r
|
setMBOControlInfill.R
|
#' @title Extends mbo control object with infill criteria and infill optimizer options.
#'
#' @template arg_control
#' @param crit [\code{character(1)}]\cr
#' How should infill points be rated. Possible parameter values are:
#' \dQuote{mean}: Mean response.
#' \dQuote{ei}: Expected improvement.
#' \dQuote{aei}: Augmented expected improvement.
#' \dQuote{eqi}: Expected quantile improvement.
#' \dQuote{lcb}: Lower confidence bound.
#' \dQuote{random}: Random infill point. Optimization of this criteria won't be performed.
#' Alternatively, you may pass a function name as string.
#' @param interleave.random.points [\code{integer(1)}]\cr
#' Add \code{interleave.random.points} uniformly sampled points additionally to the
#' regular proposed points in each step.
#' If \code{crit="random"} this value will be neglected.
#' Default is 0.
#' @param crit.eqi.beta [\code{numeric(1)}]\cr
#' Beta parameter for expected quantile improvement criterion.
#' Only used if \code{crit == "eqi"}, ignored otherwise.
#' Default is 0.75.
#' @param crit.lcb.lambda [\code{numeric(1)}]\cr
#' Lambda parameter for lower confidence bound infill criterion.
#' Only used if \code{crit == "lcb"}, ignored otherwise.
#' Default is 1.
# FIXME: does this only make sense for multicrit? or single crit too?
#' @param crit.lcb.pi [\code{numeric(1)}]\cr
#' Probability-of-improvement value to determine the lambda parameter for lcb infill criterion.
#' It is an alternative to set the trade-off between \dQuote{mean} and \dQuote{se}.
#' Only used if \code{crit == "lcb"}, ignored otherwise.
#' If specified, \code{crit.lcb.lambda == NULL} must hold.
#' Default is \code{NULL}.
#' @param crit.lcb.inflate.se [\code{logical(1)}]\cr
#' Try to inflate or deflate the estimated standard error to get to the same scale as the mean?
#' Calculates the range of the mean and standard error and multiplies the standard error
#' with the quotient of theses ranges.
#' Default is \code{FALSE}.
#' @param crit.aei.use.nugget [\code{logical(1)}]\cr
#' Only used if \code{crit == "aei"}. Should the nugget effect be used for the
#' pure variance estimation? Default is \code{FALSE}.
#' @param filter.proposed.points [\code{logical(1)}]\cr
#' Design points located too close to each other can lead to
#' numerical problems when using e.g. kriging as a surrogate model.
#' This parameter activates or deactivates a heuristic to handle this issue.
#' If \code{TRUE}, proposed points whose distance to design points or other current
#' candidate points is smaller than \code{filter.proposed.points.tol}, are replaced by random points.
#' If enabled, a logical column \dQuote{filter.replace} is added to the resulting \code{opt.path},
#' so you can see whether such a replacement happened.
#' Default is \code{FALSE}.
#' @param filter.proposed.points.tol [\code{numeric(1)}]\cr
#' Tolerance value filtering of proposed points. We currently use a maximum metric
#' to calculate the distance between points.
#' Default is 0.0001.
#' @param opt [\code{character(1)}]\cr
#' How should SINGLE points be proposed by using the surrogate model. Possible values are:
#' \dQuote{focussearch}: In several iteration steps the parameter space is
#' focused on an especial promising region according to infill criterion.
#' \dQuote{cmaes}: Use CMAES to optimize infill criterion. If all CMAES runs fail, a random point is generated
#' instead and a warning informs about it.
#' \dQuote{ea}: Use an (mu+1) EA to optimize infill criterion.
#' \dQuote{nsga2}: NSGA2 for multi obj. optimizationen. Needed for mspot.
#' Default is \dQuote{focussearch}.
#' Alternatively, you may pass a function name as string.
#' @param opt.restarts [\code{integer(1)}]\cr
#' Number of independent restarts for optimizer of infill criterion.
#' If \code{opt == "cmaes"} the first start point for the optimizer is always the
#' currently best point in the design of already visited points.
#' Subsequent restarts are started at random points.
#' Default is 1.
#' @param opt.focussearch.maxit [\code{integer(1)}]\cr
#' For \code{opt = "focussearch"}:
#' Number of iteration to shrink local focus.
#' Default is 5.
#' @param opt.focussearch.points [\code{integer(1)}]\cr
#' For \code{opt = "focussearch"}:
#' Number of points in each iteration of the focus search optimizer.
#' Default is 10000.
#' @param opt.cmaes.control [\code{list}]\cr
#' For \code{opt = "cmaes"}:
#' Control argument for cmaes optimizer.
#' Default is empty list.
#' @param opt.ea.maxit [\code{integer(1)}]\cr
#' For \code{opt = "ea"}:
#' Number of iterations / generations of EA.
#' Default is 500.
#' @param opt.ea.mu [\code{integer(1)}]\cr
#' For \code{opt = "ea"}:
#' Population size of EA.
#' Default is 10.
#' @param opt.ea.pm.eta [\code{numeric(1)}]\cr
#' For \code{opt = "ea"}:
#' Distance parameter of mutation distribution, see \code{\link[emoa]{pm_operator}}.
#' Default is 15.
#' @param opt.ea.pm.p [\code{numeric(1)}]\cr
#' For \code{opt = "ea"}:
#' Probability of 1-point mutation, see \code{\link[emoa]{pm_operator}}.
#' Default is 0.5.
#' @param opt.ea.sbx.eta [\code{numeric(1)}]\cr
#' For \code{opt = "ea"}:
#' Distance parameter of crossover distribution , see \code{\link[emoa]{sbx_operator}}.
#' Default is 15.
#' @param opt.ea.sbx.p [\code{numeric(1)}]\cr
#' For \code{opt = "ea"}:
#' Probability of 1-point crossover, see \code{\link[emoa]{sbx_operator}}.
#' Default is 0.5.
#' @param opt.ea.lambda [\code{numeric{1}}]\cr
#' For \code{opt.ea = "ea"}.
#' Number of children generated in each generation.
#' Default is 1.
#' @param opt.nsga2.popsize [\code{numeric{1}}]\cr
#' For \code{opt.multicrit.method = "nsga2"}.
#' Population size of nsga2.
#' Default is 100.
#' @param opt.nsga2.generations [\code{numeric{1}}]\cr
#' For \code{opt.multicrit.method = "nsga2"}.
#' Number of populations for of nsga2.
#' Default is 50.
#' @param opt.nsga2.cprob [\code{numeric{1}}]\cr
#' For \code{opt.multicrit.method = "nsga2"}.
#' nsga2 param. Default is 0.7.
#' @param opt.nsga2.cdist [\code{numeric{1}}]\cr
#' For \code{opt.multicrit.method = "nsga2"}.
#' nsga2 param. Default is 5.
#' @param opt.nsga2.mprob [\code{numeric{1}}]\cr
#' For \code{opt.multicrit.method = "nsga2"}.
#' nsga2 param. Default is 0.2.
#' @param opt.nsga2.mdist [\code{numeric{1}}]\cr
#' For \code{opt.multicrit.method = "nsga2"}.
#' nsga2 param. Default is 10.
#' @return [\code{\link{MBOControl}}].
#' @note See the other setMBOControl... functions and \code{makeMBOControl} for referenced arguments.
#' @seealso makeMBOControl
#' @export
setMBOControlInfill = function(control,
crit = NULL,
interleave.random.points = 0L,
crit.eqi.beta = 0.75,
crit.lcb.lambda = 1,
crit.lcb.pi = NULL,
crit.lcb.inflate.se = NULL,
crit.aei.use.nugget = NULL,
filter.proposed.points = NULL,
filter.proposed.points.tol = NULL,
opt = "focussearch", opt.restarts = NULL,
opt.focussearch.maxit = NULL, opt.focussearch.points = NULL,
opt.cmaes.control = NULL,
opt.ea.maxit = NULL, opt.ea.mu = NULL,
opt.ea.sbx.eta = NULL, opt.ea.sbx.p = NULL,
opt.ea.pm.eta = NULL, opt.ea.pm.p = NULL,
opt.ea.lambda = NULL,
#opt.multicrit.randomsearch.points = 50000L,
opt.nsga2.popsize = NULL, opt.nsga2.generations = NULL,
opt.nsga2.cprob = NULL, opt.nsga2.cdist = NULL,
opt.nsga2.mprob = NULL, opt.nsga2.mdist = NULL) {
assertClass(control, "MBOControl")
control$infill.crit = coalesce(crit, control$infill.crit, "mean")
assertChoice(control$infill.crit, choices = getSupportedInfillCritFunctions())
assertCount(interleave.random.points)
control$interleave.random.points = interleave.random.points
control$infill.crit.eqi.beta = coalesce(crit.eqi.beta, control$infill.crit.eqi.beta, 0.75)
assertNumber(control$infill.crit.eqi.beta, na.ok = FALSE, lower = 0.5, upper = 1)
# lambda value for lcb - either given, or set via given pi, the other one must be NULL!
if (!is.null(crit.lcb.lambda) && !is.null(crit.lcb.pi))
stop("Please specify either 'crit.lcb.lambda' or 'crit.lcb.pi' for the lcb crit, not both!")
if (is.null(crit.lcb.pi))
assertNumeric(crit.lcb.lambda, len = 1L, any.missing = FALSE, lower = 0)
if (is.null(crit.lcb.lambda)) {
assertNumeric(crit.lcb.pi, len = 1L, any.missing = FALSE, lower = 0, upper = 1)
# This is the formula from TW diss for setting lambda.
# Note, that alpha = -lambda, so we need the negative values
crit.lcb.lambda = -qnorm(0.5 * crit.lcb.pi^(1 / control$number.of.targets))
}
control$infill.crit.lcb.lambda = coalesce(crit.lcb.lambda, control$infill.crit.lcb.lambda, 1)
control$infill.crit.lcb.inflate.se = coalesce(crit.lcb.inflate.se, control$infill.crit.lcb.inflate.se, FALSE)
assertFlag(control$infill.crit.lcb.inflate.se)
control$infill.crit.aei.use.nugget = coalesce(crit.aei.use.nugget, control$infill.crit.aei.use.nugget, FALSE)
assertFlag(control$infill.crit.aei.use.nugget)
control$filter.proposed.points = coalesce(filter.proposed.points, control$filter.proposed.points, FALSE)
assertFlag(control$filter.proposed.points)
control$filter.proposed.points.tol = coalesce(filter.proposed.points.tol, control$filter.proposed.points.tol, 1e-4)
assertNumber(control$filter.proposed.points.tol, na.ok = FALSE, lower = 0)
control$infill.opt = coalesce(opt, control$infill.opt, "focussearch")
assertChoice(control$infill.opt, choices = getSupportedInfillOptFunctions())
control$infill.opt.restarts = coalesce(opt.restarts, control$infill.opt.restarts, 1L)
control$infill.opt.restarts = asCount(control$infill.opt.restarts)
assertCount(control$infill.opt.restarts, na.ok = FALSE)
control$infill.opt.focussearch.maxit = coalesce(opt.focussearch.maxit, control$infill.opt.focussearch.maxit, 5L)
control$infill.opt.focussearch.maxit = asCount(control$infill.opt.focussearch.maxit)
assertCount(control$infill.opt.focussearch.maxit, na.ok = FALSE, positive = TRUE)
control$infill.opt.focussearch.points = coalesce(opt.focussearch.points, control$infill.opt.focussearch.points, 10000L)
control$infill.opt.focussearch.points = asCount(control$infill.opt.focussearch.points)
assertCount(control$infill.opt.focussearch.points, na.ok = FALSE, positive = TRUE)
control$infill.opt.cmaes.control = coalesce(opt.cmaes.control, control$infill.opt.cmaes.control, list())
assertList(control$infill.opt.cmaes.control)
control$infill.opt.ea.maxit = coalesce(opt.ea.maxit, control$infill.opt.ea.maxit, 500L)
control$infill.opt.ea.maxit = asCount(control$infill.opt.ea.maxit)
assertCount(control$infill.opt.ea.maxit, na.ok = FALSE, positive = TRUE)
control$infill.opt.ea.mu = coalesce(opt.ea.mu, control$infill.opt.ea.mu, 10L)
control$infill.opt.ea.mu = asCount(control$infill.opt.ea.mu)
assertCount(control$infill.opt.ea.mu, na.ok = FALSE, positive = TRUE)
control$infill.opt.ea.sbx.eta = coalesce(opt.ea.sbx.eta, control$infill.opt.ea.sbx.eta, 15)
assertNumber(control$infill.opt.ea.sbx.eta, na.ok = FALSE, lower = 0)
control$infill.opt.ea.sbx.p = coalesce(opt.ea.sbx.p, control$infill.opt.ea.sbx.p, 0.5)
assertNumber(control$infill.opt.ea.sbx.p, na.ok = FALSE, lower = 0, upper = 1)
control$infill.opt.ea.pm.eta = coalesce(opt.ea.pm.eta, control$infill.opt.ea.pm.eta, 15)
assertNumber(control$infill.opt.ea.pm.eta, na.ok = FALSE, lower = 0)
control$infill.opt.ea.pm.p = coalesce(opt.ea.pm.p, control$infill.opt.ea.pm.p, 0.5)
assertNumber(control$infill.opt.ea.pm.p, na.ok = FALSE, lower = 0, upper = 1)
control$infill.opt.ea.lambda = coalesce(opt.ea.lambda, control$infill.opt.ea.lambda, 1L)
assertCount(control$infill.opt.ea.lambda, na.ok = FALSE)
# FIXME: Don't use for now
#control$infill.opt.multicrit.randomsearch.points = coalesce(opt.multicrit.randomsearch.points, control$infill.opt.multicrit.randomsearch.points)
#control$infill.opt.multicrit.randomsearch.points = asCount(control$infill.opt.multicrit.randomsearch.points)
#assertCount(control$infill.opt.multicrit.randomsearch.points, na.ok = FALSE, positive = TRUE)
control$infill.opt.nsga2.popsize = coalesce(opt.nsga2.popsize, control$infill.opt.nsga2.popsize, 100L)
control$infill.opt.nsga2.popsize = asCount(control$infill.opt.nsga2.popsize)
assertCount(control$infill.opt.nsga2.popsize, na.ok = FALSE, positive = TRUE)
if (control$infill.opt.nsga2.popsize < control$propose.points)
stop("Population size of nsga2 must be greater or equal than propose.points.")
control$infill.opt.nsga2.generations = coalesce(opt.nsga2.generations, control$infill.opt.nsga2.generations, 50L)
control$infill.opt.nsga2.generations = asCount(control$infill.opt.nsga2.generations)
control$infill.opt.nsga2.cprob = coalesce(opt.nsga2.cprob, control$infill.opt.nsga2.cprob, 0.7)
assertNumber(control$infill.opt.nsga2.cprob, lower = 0, upper = 1, na.ok = FALSE)
control$infill.opt.nsga2.cdist = coalesce(opt.nsga2.cdist, control$infill.opt.nsga2.cdist, 5)
assertNumber(control$infill.opt.nsga2.cdist, lower = 1e-16, na.ok = FALSE, finite = TRUE)
control$infill.opt.nsga2.mprob = coalesce(opt.nsga2.mprob, control$infill.opt.nsga2.mprob, 0.2)
assertNumber(control$infill.opt.nsga2.mprob, lower = 0, upper = 1, na.ok = FALSE)
control$infill.opt.nsga2.mdist = coalesce(opt.nsga2.mdist, control$infill.opt.nsga2.mdist, 10)
assertNumber(control$infill.opt.nsga2.mdist, lower = 1e-16, na.ok = FALSE, finite = TRUE)
return(control)
}
|
577872b9e213876303b3a56520245de08dd1136e
|
cbdeb7b5f3e8df763c5a36c487a1ac81eab130a6
|
/InitialDataCombineAndExplore.R
|
511322f3615ce7d971ad8b0d0137855869d70848
|
[] |
no_license
|
raywoo32/MSCI718
|
80b8faa0bc3639a689cc50c5ab3ef54de410ac23
|
73834982be1c2e1af7786a5ca53de851988aa4a5
|
refs/heads/main
| 2023-04-03T21:17:30.263383
| 2021-04-23T03:32:23
| 2021-04-23T03:32:23
| 360,751,698
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,073
|
r
|
InitialDataCombineAndExplore.R
|
# Intial Data Visualization
# Imports
library("tidyverse")
library(dplyr)
library(ggplot2)
#rm(list = ls())
# Read in
active <- read.csv("./Data/activeltcoutbreak.csv", stringsAsFactors = FALSE)
colnames(active) <- c("Date", "PHU_Num", "PHU", "LTC_Home", "LTCH_Num", "City", "Beds", "Total_LTC_Resident_Cases", "Total_LTC_Resident_Deaths", "Total_LTC_HCW_Cases")
#Get most recent
LTC_Home <- n_distinct(active$LTC_Home) #514 LTC homes, need to get the most recent
mostRecent <- active %>%
group_by(LTC_Home) %>%
slice(which.max(as.Date(Date, '%Y-%m-%d')))
mostRecent$Total_LTC_Resident_Cases[mostRecent$Total_LTC_Resident_Cases == "<5"] <- 4
mostRecent$Total_LTC_Resident_Deaths[mostRecent$Total_LTC_Resident_Deaths == "<5"] <- 4
mostRecent$Total_LTC_HCW_Cases[mostRecent$Total_LTC_HCW_Cases == "<5"] <- 4
# Add back in webscraped data:
accredit <- read.csv("./Data/AccreditationById.csv", stringsAsFactors = FALSE)
merged <- merge(x = mostRecent, y = accredit, by = "LTCH_Num", all = TRUE)
nonCompliance <- read.csv("./Data/nonCompliance.csv", stringsAsFactors = FALSE)
merged2 <- merge(x = merged, y = nonCompliance, by = "LTCH_Num", all.x = TRUE)
#write.csv(merged2,"./Data/CombinedLTC.csv", row.names = FALSE)
#Deprecated
#cities <- unique(merged2$City) #514 LTC homes, need to get the most recent
# Add data on population density:
#medianIncome <- read.csv("./Data/canadacities.csv", stringsAsFactors = FALSE)
#medianIncome <- medianIncome %>% filter(province_id == "ON")
#common <- medianIncome %>% filter(city_ascii %in% cities ) #95 matched no filtering
#medianIncome <- read.csv("./Data/medianHouseholdIncome2015.CSV", stringsAsFactors = FALSE)
#medianIncome <- medianIncome %>% select(Geographic.name, Geographic.code..Province.or.territory, Median.household.total.income..2015.constant.dollars...2015)
#colnames(medianIncome) <- c("City", "ProvinceCode", "MedianHHIncome2015")
#medianIncome <- medianIncome %>% filter(ProvinceCode == 35)
#medianIncome <- medianIncome %>% select(City, MedianHHIncome2015)
|
dcb33c926950cf631acc06e44c74e3c3cc0b2db0
|
14abd6e0490877b881bce35711865ec310e43621
|
/milestone1/lab2britsearch.R
|
a710c850b5cabfcd4d332191028ef38a17f95a2c
|
[] |
no_license
|
planetbridging/R
|
3f4f4aa111841cec3e033f0870e1a92c01de33bd
|
f83516448f4d8f1f041bbc02c9cccab4e03c73d4
|
refs/heads/master
| 2022-11-20T10:32:23.511716
| 2020-07-16T01:34:16
| 2020-07-16T01:34:16
| 280,020,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,714
|
r
|
lab2britsearch.R
|
# Import and attach libraries/packages
pkgs <- c("devtools","vosonSML","magrittr","tm","igraph","stringr")
lapply(pkgs, library, character.only = TRUE)
#library(writexl)
#library(openxlsx)
# Set up authentication variables
appname <- "declair"
my_api_key <- "33iRrzrqiUk28ChE1kaZN7bIv"
my_api_secret <- "VPtlcriRrID3sjYaS5cvz8SnNSsuBWZ5TENwhWAfXwvrWm0fFv"
my_access_token <- "1161974032146677761-3Y7CN4vb4lbli662VVOmqoAL1R7egt"
my_access_token_secret <- "AUbfdyK4VaPUKregw88qZJ5wLqaiaBLnCATSX965fWahW"
# Authenticate and get data
myTwitterData <- Authenticate("twitter",
appName= appname,
apiKey=my_api_key,
apiSecret=my_api_secret,
accessToken=my_access_token,
accessTokenSecret=my_access_token_secret,
useCachedToken = F) %>%
Collect(searchTerm="Britney Spears", language="en", numTweets=2000, writeToFile=TRUE)
# View Collected Twitter Data
#write_xlsx(x = myTwitterData, path = "daily.xlsx", col_names = TRUE)
?write.table
write.table(myTwitterData, file="BritB.csv", sep = ",")
#write.csv(myTwitterData, file = "foo.csv")
#write.xlsx(myTwitterData,file="BritTweets.xlsx")
View(myTwitterData)
# Create Actor Network and Graph
g_twitter_actor_network <- myTwitterData %>% Create("actor")
g_twitter_actor_graph <- Graph(g_twitter_actor_network)
V(g_twitter_actor_graph)$name <- V(g_twitter_actor_graph)$screen_name
# Write Graph to File
write.graph(g_twitter_actor_graph, "TwitterActor.graphml", format="graphml")
# Run Page Rank Algorithm to Find Important Users
pageRank_auspol_actor <- sort(page.rank(g_twitter_actor_graph)$vector, decreasing=TRUE)
head(pageRank_auspol_actor, n=3)
# Create Semantic Network and Graph (and Write to File)
g_twitter_semantic_network <- myTwitterData %>% Create("semantic", stopwordsEnglish = T)
g_twitter_semantic_graph <- Graph(g_twitter_semantic_network)
write.graph(g_twitter_semantic_graph, "TwitterSemantic.graphml", format="graphml")
# Run Page Rank Algorithm to Find Top 10 Important Terms
pageRank_auspol_semantic <- sort(page_rank(g_twitter_semantic_graph)$vector, decreasing=TRUE)
head(pageRank_auspol_semantic, n=10)
# Try the Analysis Again but with a Semantic Network of the 50% most Frequent Terms (Complete this part of the script yourself!)
g_twitter_semantic_network_allTerms <- myTwitterData %>%
Create("semantic", termFreq=50, removeTermsOrHashtags=c("#auspol"))
g_twitter_semantic_graph_allTerms <- Graph(g_twitter_semantic_network_allTerms)
write.graph(g_twitter_semantic_graph_allTerms, "TwitterSemanticAllTerms.graphml", format="graphml")
|
5047f9fc3929cc937267cf3e0e38428741326540
|
fa55322eb3674cb43ca87445c20dc53abfa70082
|
/man/summarised_lm.Rd
|
091299c997874e62158313b63e22fac1b207eedf
|
[] |
no_license
|
alberto-mateos-mo/data.analyseR
|
4b942b08d764132ced392a6b962236d630437335
|
75283eed47f9130504a03650bd8f2f444c2e48c2
|
refs/heads/master
| 2021-07-14T11:46:56.302131
| 2021-03-23T02:17:50
| 2021-03-23T02:17:50
| 237,685,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 433
|
rd
|
summarised_lm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcs_reg_models.R
\name{summarised_lm}
\alias{summarised_lm}
\title{Runs a summarised linear regression model}
\usage{
summarised_lm(formula, data, ...)
}
\arguments{
\item{formula}{An object of class formula}
\item{data}{A data frame with data}
\item{...}{other params to be passed to lm()}
}
\description{
Runs a summarised linear regression model
}
|
970b358e12c72156748a6ba7f0caca230f704293
|
8d7d4e1cfba177ed658c2fee39f67012e4b61a95
|
/code/pre_process.R
|
06d3c77660011e2ee047223328f82c59adfa020c
|
[] |
no_license
|
leixxli/NYC-uberpickup-prediction
|
ce6ab5d36a4a084d5fdf9fadf6ffb48fb968e212
|
75172966426b9f4587e30c4a89254e8c3486eedf
|
refs/heads/master
| 2022-11-07T20:56:22.489892
| 2020-06-16T22:20:09
| 2020-06-16T22:20:09
| 265,139,285
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,655
|
r
|
pre_process.R
|
library(lubridate)
library(ggplot2)
library(cowplot)
library(MASS)
library(ggpubr)
library(PerformanceAnalytics)
library(mctest)
library(fastDummies)
# --- import data
#setwd("/Users/spyker/Desktop")
data = read.csv(file = "uber_nyc_enriched.csv", header=TRUE, sep=",")
# ---data cleaning and mining
data$Month = parse_date_time(data$pickup_dt, orders= 'ymd HMS')
# extract only hour information
data$Hour = hour(data$Month) #extract only hour information
pickups = data$pickups
# add feature->weekday or weekends
data$week_mark = wday(data$Month, label = TRUE)
data$weekends_ornot = data$week_mark == 'Sun' | data$week_mark == 'Sat'
# remove missing values
data = subset(data, !is.na(borough))
# --- for plotting
# get seperate borough information
ewr = data[ which(data$borough == 'EWR'),]
staten_island = data[which(data$borough == 'Staten Island'),]
queen = data[ which(data$borough == 'Queens'),]
bronx = data[ which(data$borough == 'Bronx'),]
brooklyn = data[which(data$borough == 'Brooklyn'),]
manhattan = data[ which(data$borough == 'Manhattan'),]
# set col names for ui interface
time_col = c('Month','Hour')
bor_col = c("EWR","Staten Island","Queens","Bronx","Brooklyn","Manhattan")
use_col = c("Queens","Bronx","Brooklyn","Manhattan")
weather = data.frame(data[,4:8], data[,11:12],pickups = data$pickups)
weather_data = data.frame(weather, borough= data$borough)
# --- deciding to remove borough 'EWR' and 'Staten Island' for prediction
use_data = subset(data, data$borough!='EWR')
use_data = subset(use_data, use_data$borough!='Staten Island')
# change
dummy_hday = fastDummies::dummy_cols(use_data$hday)
use_data = data.frame(use_data[,2:12],y_hday=dummy_hday$.data_Y,pickup_dt=use_data$Month)
# correlation matrix
# chart.Correlation(use_data[,3:12], histogram=TRUE, method = 'spearman',pch=19)
# --- prediction1---GLM
lag_col = c('lag_1_hour','lag_1_day','lag_1_week')
feature_list = c("pickups"="pickups", "spd" ="spd", "vsb"="vsb", "temp"='temp', "dewp"='dewp',"slp"='slp',"pcp01"='pcp01',
"pcp06"='pcp06',"pcp24"='pcp24', "sd" ='sd',"lag_1_hour"="lag_1_hour",
"lag_1_day"="lag_1_day","lag_1_week"="lag_1_week")
#introduce lag variables
use_data$lag_1_hour = lag(use_data$pickups)
use_data$lag_1_day = lag(use_data$pickups,24)
use_data$lag_1_week = lag(use_data$pickups,168)
use_data = subset(use_data, !is.na(lag_1_week)) # remove missing vlues since lag features brought some NAs
# split into train and test data
train = use_data[-c(16701:nrow(use_data)),]
test = use_data[c(16701:nrow(use_data)),]
# --- for arima model
library('forecast')
library('tseries')
|
5ba0f78a5ad810b5077b236ea5adfb03b4da443c
|
3d329fe1bc2c17007859e1fee013516c769a75d0
|
/Lab_06.R
|
85127a70fb12e470c5051b927380b17cc1e0feab
|
[] |
no_license
|
KarenZhuqianZhou/HUDM5123_Lab06_TwoWayANOVA
|
ca94e75e46ea4b646e2c1961906822a186493867
|
6e7bf4793a50599a7809cd0fb6f394934d060ab3
|
refs/heads/master
| 2022-06-14T03:34:30.753131
| 2020-05-05T19:25:26
| 2020-05-05T19:25:26
| 260,261,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,418
|
r
|
Lab_06.R
|
# Load dat1 and dat2 by loading file "Lab_06_Prestige.Rdata"
load(file = "Lab_06_Prestige.Rdata")
# Start with dat1
head(dat1)
# Reorder the levels of the type factor
levels(dat1$type)
dat1$type <- factor(x = dat1$type,
levels = c("bc", "wc", "prof"))
dat2$type <- factor(x = dat2$type,
levels = c("bc", "wc", "prof"))
# Table of type by income factor
table(dat1$type, dat1$incomeF)
# Inspect settings
options("contrasts")
# Set the contrast coding for undordered factors
# to deviation coding (contr.sum)
options(contrasts = c("contr.sum", "contr.poly"))
# check the contrast coding
contrasts(x = dat1$type)
contrasts(x = dat1$incomeF)
# Fit the full model (interaction and both main effects)
# with dat1
lm1 <- lm(formula = prestige ~ incomeF*type, data = dat1)
summary(lm1)
# examine the actual design matrix for lm1
model.matrix(lm1)
# Create the Two-Way ANOVA table
library(car)
Anova(lm1, type = 3)
# Create an interaction plot using emmeans
library(emmeans)
emm1 <- emmeans(object = lm1,
specs = ~ incomeF*type)
emm1
p <- emmip(object = emm1,
formula = incomeF ~ type,
xlab = "Type of Profession",
ylab = "Mean Prestige Score")
p$labels$colour <- "Income" # Change legend title
print(p)
# Interaction is not significant nor visually
# important, so examine main effects.
# Main effects of income factor, averaging over
# levels of job type:
emm2 <- emmeans(object = lm1,
specs = ~ incomeF)
emm2
# Main effects pairwise comparisons of income,
# averaging over job type levels:
pairs(emm2, adjust = "none")
# Main effects of job type factor, averaging over
# levels of income factor:
emm3 <- emmeans(object = lm1,
specs = ~ type)
emm3
# Main effects pairwise comparisons of job
# type, averaging over income levels:
pairs(emm3, adjust = "none")
################
### Now move to data set 2 (real Prestige data)
################
# Fit the full model with dat2
lm2 <- lm(formula = prestige ~ incomeF*type, data = dat2)
summary(lm2)
# Produce the Two-Way ANOVA table
Anova(lm2, type = 3)
# Create an interaction plot
emm4 <- emmeans(object = lm2,
specs = ~ incomeF*type)
emm4
emmip(object = emm4,
formula = incomeF ~ type,
xlab = "Type of Profession",
ylab = "Mean Prestige Score")
# Test the simple (main) effects of job type at
# both levels of income separately. We have to
# do conditional tests because of the significant
# two-way interaction.
joint_tests(lm2, by = "incomeF")
# Create an emm object that examines type conditional
# on level of income and use it to do simple
# pairwise comparisons.
emm5 <- emmeans(object = lm2,
specs = ~ type | incomeF)
emm5
pairs(emm5, adjust = "none")
# Look at the job titles that are bc and high income.
dat2$X[which(dat2$type == "bc" & dat2$incomeF == "high")]
# both blue collar and in the lower income category:
dat2$X[which(dat2$type == "bc" & dat2$incomeF == "low")]
# Could go the other way and test the simple (main) effects
# of income category at each of the three job types.
joint_tests(object = lm2, by = "type")
# Follow up with simple pairwise comparisons (though these
# will be the same as the joint test results because the
# income factor only has two levels).
emm6 <- emmeans(object = lm2,
specs = ~ incomeF | type)
pairs(emm6, adjust = "none")
|
d10168a3b398307224ff7318aba281d3cd5b5dbc
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/rgl/demo/bivar.r
|
2589e036952135e60207ad0819412d19c4ad27e4
|
[
"MIT"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 1,041
|
r
|
bivar.r
|
# rgl demo: rgl-bivar.r
# author: Daniel Adler
rgl.demo.bivar <- function() {
if (!requireNamespace("MASS", quietly = TRUE))
stop("This demo requires MASS")
# parameters:
n<-50; ngrid<-40
# generate samples:
set.seed(31415)
x<-rnorm(n); y<-rnorm(n)
# estimate non-parameteric density surface via kernel smoothing
denobj <- MASS::kde2d(x, y, n=ngrid)
den.z <-denobj$z
# generate parametric density surface of a bivariate normal distribution
xgrid <- denobj$x
ygrid <- denobj$y
bi.z <- dnorm(xgrid)%*%t(dnorm(ygrid))
# visualize:
zscale<-20
# New window
open3d()
# clear scene:
clear3d("all")
# setup env:
bg3d(color="#887777")
light3d()
# Draws the simulated data as spheres on the baseline
spheres3d(x,y,rep(0,n),radius=0.1,color="#CCCCFF")
# Draws non-parametric density
surface3d(xgrid,ygrid,den.z*zscale,color="#FF2222",alpha=0.5)
# Draws parametric density
surface3d(xgrid,ygrid,bi.z*zscale,color="#CCCCFF",front="lines")
}
rgl.demo.bivar()
|
bd6e68531e27cc2cc5e50a19aac4d019df32c0a1
|
38bf1be78abe8aa0337c2df690d8856b0dab278b
|
/data-wrangling-automation/main.R
|
e0565cfb933e06f4b2affcb31e01ac6894f4427f
|
[] |
no_license
|
GrejSegura/my-projects
|
8d3954b6f3afc0e8eb9dca1061edeab33a5dfc9e
|
d4d50fb2078e8cb79b2244b7acfff1c14b9b4d75
|
refs/heads/master
| 2022-01-08T14:29:18.623861
| 2019-04-28T19:31:20
| 2019-04-28T19:31:20
| 111,678,070
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 493
|
r
|
main.R
|
gc()
rm(list = ls())
options(java.parameters = "-Xmx4g") ## memory control in RJava
library(tidyverse)
library(lubridate)
library(data.table)
library(xlsx)
library(XLConnect)
fileDirectory <- winDialogString("Please enter the file path below.", "")
setwd(fileDirectory)
source("./src/save_one_sheet.R")
data <- read.csv(file.choose())
options(warn = -1)
data <- save_on_one_sheet(data)
options(warn = 0)
sessions <- unique(data$session)
write.xlsx(data, "./output/final_output.xlsx")
|
6b0892e0070d6fd65a52c7e16ce6012123722dd0
|
956f493986a2e4836cd7d5565fb23be636645a24
|
/man/Variable.Rd
|
9488af65cd6baa9c43119193c3983fd53ce85faf
|
[
"MIT"
] |
permissive
|
Bhaskers-Blu-Org2/CNTK-R
|
d2fcb0eab33a5f6a9134fa20937622b308590b4a
|
5e2b8f5320a48dc492fa7dd1654532dd9e3e856a
|
refs/heads/master
| 2021-08-20T10:36:09.764093
| 2017-11-28T22:55:53
| 2017-11-28T22:55:53
| 276,122,540
| 0
| 1
|
MIT
| 2020-06-30T14:28:11
| 2020-06-30T14:28:10
| null |
UTF-8
|
R
| false
| true
| 552
|
rd
|
Variable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variables.R
\name{Variable}
\alias{Variable}
\title{Variable}
\usage{
Variable(shape = NULL, dtype = "auto", needs_gradient = FALSE,
is_sparse = FALSE, dynamic_axes = rep(c(get_default_batch_axis()), 2),
name = "")
}
\arguments{
\item{shape}{- list of ints representing tensor shape}
\item{dtype}{- data type to be used ("float32", "float64", or "auto")}
\item{name}{}
}
\description{
Denotes a symbolic entity corresponding to the inputs and outputs of a
Function.
}
|
670694cc1117c638b441bbc7534cd128547f5c11
|
0de866f206f7aef72ac01061e69bd85c58ff8956
|
/ncaaPredictions/plumber.R
|
58eb9cafdcfd86ea3f9dd97c3d2ed18a75c84f8d
|
[] |
no_license
|
rahuljungbahadur/NCAA538
|
55f90b6076ee62f609d6b96ec14b88bc641be7be
|
e13f9e1217a8ca8e7372966530e07794041734ef
|
refs/heads/master
| 2023-01-07T03:22:34.649524
| 2020-11-08T22:04:33
| 2020-11-08T22:04:33
| 311,163,011
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 882
|
r
|
plumber.R
|
#
# This is a Plumber API. You can run the API by clicking
# the 'Run API' button above.
#
# Find out more about building APIs with Plumber here:
#
# https://www.rplumber.io/
#
library(plumber)
library(tidymodels)
#* @apiTitle Plumber Example API
#* Echo back the input
#* @param msg The message to echo
#* @get /echo
function(msg = "") {
list(msg = paste0("The message is: '", msg, "'"))
}
#* Return the sum of two numbers
#* @param a The first number to add
#* @param b The second number to add
#* @post /sum
function(a, b) {
as.numeric(a) + as.numeric(b)
}
fittedModel <- readRDS("E:\\TidyTuesdays\\NCAAWomensBasketball\\NCAA538\\bestFitModel.rds")
#* retiurns the prediction based on the seed
#* @param newSeed THe seed on which the prediction is made
#* @get /predict
function(newSeed) {
predict(fittedModel, new_data = tibble(seed = as.numeric(newSeed)))
}
|
8a83ebfe45c2f7a59d53e1034882e329bf2efca6
|
9b419f8d44b87b4913b26c267a02bc3bdc5e6b44
|
/Class_18/mangaManipulate.R
|
59ef0b4b1502cd7a6abfa9e2b3707d45bdaf19b2
|
[] |
no_license
|
djwrisley/IMUH1511
|
62502fddae3323f5d6116244d04e997536130ee9
|
a625b4256260964bfc98e8ddf2f5acb81b70bc01
|
refs/heads/master
| 2020-04-21T11:37:33.735425
| 2019-04-10T06:48:01
| 2019-04-10T06:48:01
| 169,532,342
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 748
|
r
|
mangaManipulate.R
|
# exercise adapted from ch 8 of Humanities Data in R (Arnold/Tilton)
# loading the jpeg library for reading jpeg images
library(jpeg)
# reading the image into an array
manga <- readJPEG("a114.jpeg")
# explore the array; can you tell how the image has been turned into numbers?
class(manga)
dim(manga)
range(manga)
# crop the image by pixels and save it
mangaCrop <- manga[1:130,1:150,]
plot(mangaCrop)
writeJPEG(mangaCrop, "mangaCrop.jpeg")
# rotate the image by changing the array
mangaRotate <- aperm(a=manga,perm= c(2, 1, 3))
mangaRotate <- mangaRotate[dim(mangaRotate) [1]:1,,]
writeJPEG(mangaRotate,paste0("mangaRotate.jpeg"))
# isolate a color channel
mangaRed <- manga
mangaRed[,,2:3] <- 0
writeJPEG(mangaRed, "mangaRed.jpeg")
|
55cff3294be2e4760c30ed7329042bd2a25082d1
|
68e768c4761f93c9745a033c3fbcf97ebee417b3
|
/R/coef.est_multi_poly_within.R
|
ac73af32649e84d800a08463471111dabd8f2f8f
|
[] |
no_license
|
cran/MLCIRTwithin
|
2b15f70102e15514c8fed30dc635f449ee79734c
|
8fcce562ed6e258c152c265a09c8e618eff1de55
|
refs/heads/master
| 2021-01-21T04:19:31.549149
| 2019-09-30T14:20:06
| 2019-09-30T14:20:06
| 36,813,236
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 836
|
r
|
coef.est_multi_poly_within.R
|
coef.est_multi_poly_within <-function(object, ...){
# preliminaries
out = object
# print output
cat("\nEstimated abilities for the 1st latent variable:\n")
print(round(out$Th1,4))
cat("\nEstimated abilities for the 2nd latent variable:\n")
print(round(out$Th2,4))
cat("\nEstimated item parameters:\n")
Tmp = out$Bec
for(j in 1:ncol(out$Bec)) colnames(Tmp)[j] = paste("beta",j,sep="")
Items = cbind(gamma1=out$ga1c,gamma2=out$ga2c,Tmp)
print(round(Items,4))
cat("\nEstimated regression coefficients for the 1st latent variable:\n")
print(round(out$De1,4))
cat("\nEstimated regression coefficients for the 2nd latent variable:\n")
print(round(out$De2,4))
cat("\n")
# output
out = list(Th1=out$Th1,Th2=out$Th2,Items=out$Items,De1=out$De1,De2=out$De2)
}
|
b93c2eeecc8943dada671a2b69348acc03646b3b
|
bfcebe2f5231d8a6e446b086c82dc1a65879d985
|
/pollutantmean.R
|
7fc91b0531eb5c0d526c4a627b28e5ef6a39736c
|
[] |
no_license
|
dadhichmohit/program
|
cbe039d940b90d9c18651950eb145f718d72d759
|
9b962582b78050e299db7a86cc56fef454987783
|
refs/heads/master
| 2020-05-16T14:54:58.219775
| 2015-01-09T20:36:54
| 2015-01-09T20:36:54
| 29,032,376
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
pollutantmean.R
|
pollutantmean<- function(directory, pollutant, id = 1:332)
{
if(id[1]>=1 && id[length(id)]<=332)
{
pollutant<-tolower(pollutant)
if( pollutant=="sulfate" || pollutant=="nitrate")
{
id<-sprintf('%03d',id)
data<-sapply(paste("D:/",directory,"/",id,".csv",sep=""),read.csv)
Mean<-sapply(data[pollutant, ],mean,na.rm=TRUE)
print("Mean")
print(Mean)
Sum<-sapply(data[pollutant, ],sum,na.rm=TRUE)
number<-sum(Sum/Mean)
print("Number")
print(number)
totalsum<-sum(Sum)
print("TotalSum")
print(totalsum)
answer<-totalsum/number
}
else
{ print("no pollutant of this name exists")
}
}
else
{
print("id out of range")
}
}
|
b0fcc1012eef60ff92697a73b127769ebc9e9a17
|
58332751f752592649b7c1b2491b8e800fe6532d
|
/man/R.t.Rd
|
8a7606d81e84909e887c7821ce8c5da4c94b9c76
|
[] |
no_license
|
JPNotts/Package
|
57c1e38651ddbd557fd4c0ad1640e938878613dd
|
b6fb969cac267bc69b6bb246819e98cf970c8718
|
refs/heads/main
| 2023-08-14T08:02:27.972089
| 2021-10-04T12:05:10
| 2021-10-04T12:05:10
| 316,275,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 357
|
rd
|
R.t.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/QQ_KS.R
\name{R.t}
\alias{R.t}
\title{A function that returns R(t), the integrated intensity function.}
\usage{
R.t(q, time.step)
}
\arguments{
\item{q}{q}
\item{time.step}{time.step}
}
\value{
}
\description{
A function that returns R(t), the integrated intensity function.
}
|
57a2060144a945bd1c4d1a75a9761073361a5c21
|
fe612f81a3118bf3ebef644bae3281bd1c156442
|
/man/h2o.get_best_model_predictors.Rd
|
ffdc43b45b8003c9ad44702bafebe3a04c43097a
|
[] |
no_license
|
cran/h2o
|
da1ba0dff5708b7490b4e97552614815f8d0d95e
|
c54f9b40693ae75577357075bb88f6f1f45c59be
|
refs/heads/master
| 2023-08-18T18:28:26.236789
| 2023-08-09T05:00:02
| 2023-08-09T06:32:17
| 20,941,952
| 3
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 521
|
rd
|
h2o.get_best_model_predictors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modelselection.R
\name{h2o.get_best_model_predictors}
\alias{h2o.get_best_model_predictors}
\title{Extracts the subset of predictor names that yield the best R2 value for each predictor subset size.}
\usage{
h2o.get_best_model_predictors(model)
}
\arguments{
\item{model}{is a H2OModel with algorithm name of modelselection}
}
\description{
Extracts the subset of predictor names that yield the best R2 value for each predictor subset size.
}
|
80be2be7eaf07c404ae3bde8688ecd4788f3d963
|
09d57d28dc46e6a1029ac7370ae9fffdd84113d6
|
/iris_machineLearning_knn.R
|
c55f31a97c0837b890701c7f056147f187f85664
|
[] |
no_license
|
QSChou/Rlearning
|
2a7780a67c077e337d74a058f776b1c582bfd137
|
afafb67390d85c9f57dd7628076341afb1d881b3
|
refs/heads/master
| 2020-05-21T22:59:20.905405
| 2017-09-12T15:31:28
| 2017-09-12T15:31:28
| 48,810,298
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,469
|
r
|
iris_machineLearning_knn.R
|
# Below example use the iris data and knn to do classification
# Author: QS Chou Version 0.1 Date: 2016/01/12
# Although normaliztion is not required in the iris data set, below code still includes the normalization step for learning purpose
attach(iris)
normalize <- function(x) {
num <- x-min(x)
denum <- max(x) - min(x)
return (num/denum)
}
# test normalize; case when divided 0, it will return Null automatically.
n1 <- normalize(c(100,100,100))
# test normalize; usual case
n2 <- normalize(c(200,100,300,29,39,19,10))
# test normalize; negative value case
n3 <- normalize(c(-200,100,300,-29,39,-19,10))
iris_norm <- as.data.frame(lapply(iris[1:4], normalize))
iris_n <- cbind(iris_norm,iris[5])
# sampling data set into training & test
set.seed(1234)
ind <- sample(2, nrow(iris),replace = TRUE, prob = c(0.7,0.3))
# Original data set
iris_training <- iris[ind==1,1:4]
iris_training_labels <- iris[ind==1,5]
iris_test <- iris[ind==2, 1:4]
iris_test_lables <- iris[ind==2,5]
# Normalized data set
iris_n_training <- iris_n[ind==1,1:4]
iris_n_training_labels <- iris_n[ind==1,5]
iris_n_test <- iris_n[ind==2, 1:4]
iris_n_test_lables <- iris_n[ind==2,5]
# Apply knn for learning; it will need to install the 'class' packages
## Try with k= 3 & use the original data set
library(class)
iris_pred <- knn(train = iris_training, test = iris_test, cl = iris_training_labels, k=3)
iris_test_pred <- cbind(iris_test,iris_test_lables,iris_pred)
summary(iris_test_pred)
table(iris_test_pred$iris_test_lables,iris_test_pred$iris_pred)
## Try with k= 5 & use the original data set
library(class)
iris_pred <- knn(train = iris_training, test = iris_test, cl = iris_training_labels, k=5)
iris_test_pred <- cbind(iris_test,iris_test_lables,iris_pred)
summary(iris_test_pred)
table(iris_test_pred$iris_test_lables,iris_test_pred$iris_pred)
## Try with k= 3 & use the normalized data set
library(class)
iris_pred <- knn(train = iris_n_training, test = iris_n_test, cl = iris_n_training_labels, k=3)
iris_test_pred <- cbind(iris_n_test,iris_n_test_lables,iris_pred)
summary(iris_test_pred)
table(iris_test_pred$iris_n_test_lables,iris_test_pred$iris_pred)
## Try with k= 5 & use the normalized data set
library(class)
iris_pred <- knn(train = iris_n_training, test = iris_n_test, cl = iris_n_training_labels, k=11)
iris_test_pred <- cbind(iris_test,iris_n_test_lables,iris_pred)
summary(iris_test_pred)
table(iris_test_pred$iris_n_test_lables,iris_test_pred$iris_pred)
|
0a977128b244615722c7746eab6a076effc4407c
|
619c4321ffc122fa0333a7e35e6fba16d1f9368b
|
/man/FarmerI_yld_henrys_2018.Rd
|
092b1e0e91fd47d9150b35c8d7f7b8442caead97
|
[
"MIT"
] |
permissive
|
paulhegedus/OFPEDATA
|
4a6c3add78208f0250139c55d061f31775d56789
|
ae70c5d8ad4fe154fd4b746a7ec43f3031a6e0a9
|
refs/heads/master
| 2022-12-22T21:51:29.566593
| 2020-09-14T00:14:19
| 2020-09-14T00:14:19
| 278,233,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,564
|
rd
|
FarmerI_yld_henrys_2018.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FarmerI_yld_henrys_2018.R
\docType{data}
\name{FarmerI_yld_henrys_2018}
\alias{FarmerI_yld_henrys_2018}
\title{FarmerI_yld_henrys_2018}
\format{
A data frame with 31276 rows and 39 variables:
\describe{
\item{\code{gid}}{integer Observation identifier.}
\item{\code{field}}{character Field name.}
\item{\code{dataset}}{character Date and time of collection.}
\item{\code{product}}{character Product identifier.}
\item{\code{obj__id}}{double Observation identifier.}
\item{\code{dstnc_f}}{double Distance between observations in feet.}
\item{\code{trck_d_}}{double Track distance.}
\item{\code{durtn_s}}{double Duration of collection in seconds.}
\item{\code{elevtn_}}{double Elevation of collection.}
\item{\code{time}}{double Date of collection.}
\item{\code{are_cnt}}{character On/Off counter.}
\item{\code{swth_w_}}{double Width of swath.}
\item{\code{dff_stt}}{character Yes/No option.}
\item{\code{crp_flw_m}}{double Mass of crop flowing through combine.}
\item{\code{mostr__}}{double Moisture content (\%).}
\item{\code{yld_mss_w}}{double Mass of wet yield.}
\item{\code{yld_vl_w}}{double Volume of wet yield.}
\item{\code{yld_mss_d}}{double Mass of dry yield.}
\item{\code{yld_vl_d}}{double Volume of dry yield, this is the harvest yield in bushels per acre.}
\item{\code{wrk_stt}}{character In/Out option.}
\item{\code{y_ffst_}}{double Yield offset.}
\item{\code{prtn___}}{double Unknown.}
\item{\code{ful_sd_}}{double Unknown.}
\item{\code{fl_ff__}}{double Unknown.}
\item{\code{eng_pw_}}{double Engine power.}
\item{\code{eng_l__}}{double Engine load.}
\item{\code{pass_nm}}{double Number of pass.}
\item{\code{spd_mp_}}{double Speed in MPH.}
\item{\code{prd_c__}}{double Unknown.}
\item{\code{ful_cn_}}{double Unknown.}
\item{\code{fl_cn_t}}{double Unknown.}
\item{\code{fl_cn_m}}{double Unknown.}
\item{\code{fl_cn_v}}{double Unknown.}
\item{\code{crp_flw_v}}{double Crop flow volume.}
\item{\code{fl_ff_m}}{double Unknown.}
\item{\code{date}}{double Date of observation}
\item{\code{yl18_r_}}{character Unknown.}
\item{\code{orig_file}}{character Original shapefile name.}
\item{\code{geometry}}{list Coordinates of harvest points.}
}
}
\usage{
FarmerI_yld_henrys_2018
}
\description{
Winter wheat crop yield (bushels per acre) data
collected from Farmer I's Case combine in 2018 Yield
data is automatically collected ~3 seconds during harvest.
Data is raw yield data with attributes recorded by
Farmer I in the field named henrys.
}
\details{
DETAILS
}
\keyword{datasets}
|
dcb60e62940a72ccf212b571c8453a4d9050a127
|
bbf40006c142498db5b8e6d62f65a2b1023757ec
|
/test.r
|
f13130c7ccfd667c73ef5e7808edadc2ccba7019
|
[] |
no_license
|
amymscott/test
|
7e2f50d75b4fd88000da39608126d8b35359112f
|
756dd5b330350f8b69caf1804a777ea1647899de
|
refs/heads/master
| 2021-06-26T07:31:03.736033
| 2017-09-15T15:36:38
| 2017-09-15T15:36:38
| 103,672,551
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18
|
r
|
test.r
|
f<-4+5
a<-"apple"
|
6a21f0083d04d680fed63622f5a1dd22bc743dab
|
e431281e3de2743f23355c20fb378fc4cc534c34
|
/R/Intermediate Practice.R
|
822c81928dec3f4b4905c3ad53de1b531979b349
|
[] |
no_license
|
sue-wallace/datacamp_datascience
|
4f02a207a4e78c9c440a77b3f6145d3b7637c564
|
80216a8dbf0b93e02b719dae3d6613ea16d02065
|
refs/heads/master
| 2020-03-11T08:04:02.707467
| 2018-04-23T13:13:28
| 2018-04-23T13:13:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,620
|
r
|
Intermediate Practice.R
|
#Sue Wallace
#30/12/2017
#Intermediate Practice
##dim(x) - to see how many variables and rows there are
####LAPPY----
#useful for applying a function to lots of variables within the data
lapply(x, my_fun)
#where x is the data and my_fun is the function (i.e. sum)
d <- mtcars
sum(d$carb)
lapply(d, class)
lapply(d, sum)
#you can create your own function and then apply it to all variables
get_mpg <- function(x) {
d$mpg
}
# Apply get_timestamp() over all elements in logs
lapply(d, get_mpg)
#The below adds each number in the list by 2
a <- list(3, 2, 5, 8, 9, 10, 76)
lapply(a, function(x) { 2 + x })
x <- list(a = 1, b = 2, c = 3)
x[["a"]]
`[[`(x, "a")
####SAPPLY----
#performs an lapply(), and sees whether the result can be simplified to a vector.
sapply(d, length)
sapply(d, sum)
str(d[[5]])
# Implement function get_failure_carb
#the below function will return carb, unless carn is null, then it will return mpg
get_failure_carb <- function(x) {
if (d$carb) {
return(NULL)
} else {
return(d$mpg)
}
}
# Use sapply() to call get_failure_loc on logs
sapply(d,get_failure_carb)
####VAPPLY----
#You have to explicitly mention what the outcome of the function you're applying will be with vapply(). the below should
#return TRUE or FALSE
vapply(d, `[[`, "mpg", FUN.VALUE = logical(1))
####EXPLORATORY QUERIES----
# Print out total value of fares
sum(titanic$Fare)
# Print out proportion of passengers that survived
round (mean(titanic$Survived)*100,1)
####GREPL
#grepl returns TRUE if a string contains the pattern, otherwise FALSE;
#if the parameter is a string vector, returns a logical vector
#(match or not for each element of the vector).
#grepl(pattern, x, ignore.case = FALSE, perl = FALSE,
#fixed = FALSE, useBytes = FALSE)
#.pattern: regular expression, or string for fixed=TRUE
#.x: string, the character vector
#.ignore.case: case sensitive or not
#.perl: logical. Should perl-compatible regexps be used? Has priority over extended
#.fixed: logical. If TRUE, pattern is a string to be matched as is. Overrides all conflicting arguments
#.useBytes: logical. If TRUE the matching is done byte-by-byte rather than character-by-character
# Extract the name column from titanic
pass_names <- titanic$Name
# Create the logical vectror is_man
is_man <- grepl(", Mr\\.", pass_names)
# Count the number of men
sum(is_man)
# Count number of men based on gender
sum(titanic$Sex == "male")
# Extract the name column from titanic
pass_names <- titanic$Name
# Create titles
titles <- gsub("^.*, (.*?)\\..*$", "\\1", pass_names)
# Call unique() on titles
unique(titles)
#The below craetes a variable with all possible titles. vapply and grepl are then used to work out how many passengers
#have one of the titles.
pass_names <- titanic$Name
titles <- paste(",", c("Mr\\.", "Master", "Don", "Rev", "Dr\\.", "Major", "Sir", "Col", "Capt", "Jonkheer"))
# Finish the vapply() command
hits <- vapply(titles,
FUN = grepl,
FUN.VALUE = logical(length(pass_names)),
pass_names)
# Calculate the sum() of hits
sum(hits)
##The below cleans the name coloumn
# pass_names is available in your workspace
# Finish the convert_name() function
convert_name <- function(name) {
# women: take name from inside parentheses
if (grepl("\\(.*?\\)", name)) {
gsub("^.*?\\((.*?)\\)$", "\\1", name)
# men: take name before comma and after title
} else {
gsub("^(.*?),\\s[a-zA-Z\\.]*?\\s(.*?)$", "\\2 \\1", name)
}
}
# Call convert_name on name
clean_pass_names <- vapply(pass_names, FUN = convert_name,
FUN.VALUE = character(1), USE.NAMES = FALSE)
# Print out clean_pass_names
# Count number of men based on gender
sum(titanic$Sex == "male")
#converting to dates
# titanic, dob1 and dob2 are preloaded
# Have a look at head() of dob1 and dob2
head(dob1)
head(dob2)
# Convert dob1 to dob1d, convert dob2 to dob2d
dob1d <- as.Date(dob1)
dob2d <- as.Date(dob2, format = "%B %d, %Y")
# Combine dob1d and dob2d into single vector: birth_dates
birth_dates <- c(dob1d, dob2d)
#adding a column----
# titanic, dob1 and dob2 are preloaded
dob1d <- as.Date(dob1)
dob2d <- as.Date(dob2, format = "%B %d, %Y")
birth_dates <- c(dob1d, dob2d)
disaster_date <- as.Date("1912-04-15")
# Add birth_dates to titanic (column Birth)
titanic$Birth <- birth_dates
# Create subset: survivors
survivors <- subset(titanic, Survived == 1)
# Calculate average age of survivors
mean(disaster_date - survivors$Birth, na.rm = TRUE)
d <- mtcars
mtcars$day <- sum(mtcars$mpg, mtcars$cyl)
|
c89c138b85e967e94a67aed12f2570ec4fb17b33
|
c2f3e71eb4842cb5b02f367dad4680634017b92c
|
/.init.R
|
78fe2c2167a200d83c43bfb2c7ddaee4f8ec12db
|
[] |
no_license
|
MElhalawany/R_Intro-Environment
|
9b7c40f1e4944be2302fc8a0bdbc2de3f890b20d
|
0671311f61f5f1dd86a4e9fca07a90d4e06fc1f9
|
refs/heads/master
| 2022-02-01T07:48:18.677752
| 2016-09-13T21:37:52
| 2016-09-13T21:37:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 189
|
r
|
.init.R
|
# .init.R
# Functions to initialize this tutorial session
# Boris Steipe
# ====================================================================
file.edit("R_Intro-Environment.R")
# [End]
|
3e58e754e1eb5c376a4557c12d51231630edf4ae
|
3fdb12a1fe34aca6b96aa9047df4593404a5fc52
|
/rhocnps.R
|
b369abab801131e01184ee2308727b1c32f93afe
|
[] |
no_license
|
carnegie-dpb/bartonlab-modeling
|
06c90e10df8fc37973a02db41f2c882bc8ceedfd
|
7d875f16f675bf94fc04a360ae8f6855d4642619
|
refs/heads/master
| 2021-01-22T03:48:47.674881
| 2018-04-18T22:29:04
| 2018-04-18T22:29:04
| 81,460,423
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,135
|
r
|
rhocnps.R
|
##
## numerically solve linear transcription model for a primary and secondary target using numerical integration using input time array
##
require("deSolve")
rhocnps = function(turnOff=0, rhoc0, rhon0, nu, gammae, gamman, rhop0, etap, gammap, rhos0, etas, gammas, t) {
## derivatives for rhoc, rhon and rhop
derivs = function(t, y, parms) {
rhoc = y[1]
rhon = y[2]
rhop = y[3]
rhos = y[4]
## TURN OFF etap for t>turnOff
if (turnOff>0 && t>turnOff) etap = 0
## continuous logistic function to model turn-off
## if (turnOff>0) {
## sigma = 100.0
## etap = etap*2*exp(-sigma*(t-turnOff))/(1+exp(-sigma*(t-turnOff)))
## }
return( list( c(
-nu*rhoc + gammae*(rhon-rhon0),
+nu*rhoc - gammae*(rhon-rhon0) - gamman*(rhon-rhon0),
+etap*rhon - gammap*(rhop-rhop0),
+etas*rhop - gammas*(rhos-rhos0)
)))
}
## integrate the equations
out = ode(y=c(rhoc0,rhon0,rhop0,rhos0), times=t, func=derivs, parms=NULL, method="lsoda")
## return in a data frame
return(data.frame(t=out[,1], rhoc=out[,2], rhon=out[,3], rhop=out[,4], rhos=out[,5]))
}
|
47f83cfb3d8c0c4ece6981eabc7ea00eddd8cfcd
|
5ffffcfbf8c869c9ff1ff3620f61fe31bb7ece36
|
/plot4.R
|
6600853eb3ffb7fa69d989900f2707303830294c
|
[] |
no_license
|
litannalex/ExData_Plotting1
|
1aa7af08fbda70f34c2c22ef7d7d893ecbd2567e
|
70e67c458f4b55afba5945c22ab184b53ddeb67c
|
refs/heads/master
| 2021-01-01T18:57:19.021576
| 2017-07-27T01:22:49
| 2017-07-27T01:22:49
| 98,469,133
| 0
| 0
| null | 2017-07-26T21:58:46
| 2017-07-26T21:58:46
| null |
UTF-8
|
R
| false
| false
| 1,729
|
r
|
plot4.R
|
# cleaning the workspace
remove(list = ls())
# downloading and unzipping files
URL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
f <- file.path(getwd(), "household_power_consumption.zip")
download.file(URL, f)
unzip("household_power_consumption.zip", exdir = getwd())
# reading data
data <- read.table("./household_power_consumption.txt", sep = ";", na.strings = "?", header=TRUE)
# Converting data variable
data$Date <- as.Date(data$Date, format = "%d/%m/%Y")
# Subsetting days
mydata <- subset(data, Date == "2007-02-01" | Date == "2007-02-02")
# Converting time
DateTime <- paste(mydata$Date, mydata$Time, sep = " ")
mydata$Time <- strptime(DateTime, format = "%Y-%m-%d %H:%M:%S")
# Changing quantity of plots
par(mfrow = c(2, 2), bg = "transparent")
# Creating plots one by one
plot(mydata$Time, mydata$Global_active_power, type = "l", xlab = "",
ylab = "Global Active Power (kilowatts)", bty = "o")
plot(mydata$Time, mydata$Voltage, type = "l", xlab = "datetime",
ylab = "Voltage", bty = "o")
plot(mydata$Time, mydata$Sub_metering_1, type = "l", xlab = "",
ylab = "Energy sub metering", bty = "o")
points(mydata$Time, mydata$Sub_metering_2, type = "l", col = "red")
points(mydata$Time, mydata$Sub_metering_3, type = "l", col = "blue")
legend("topright", lwd = c(1, 1) , col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
bty = "n", cex = 1, y.intersp = 0.5)
plot(mydata$Time, mydata$Global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power", bty = "o")
# Saving the plot to PNG file
dev.copy(png, file = "plot4.png", width = 480, height = 480)
dev.off()
|
bbc1900676e4a1bad89fc385dc1ce9cc5adfb0f0
|
6f81b6ed71739feaf017ab7e8fbc011d3bd2920b
|
/ui.R
|
221fef2a7085a96cdaa5782bfe87349415071ead
|
[] |
no_license
|
Invictus666/Portfolio
|
24f7d0ee37d083a7d415e9c67a6bb4fbbdcbed67
|
7d3a772524cb09d709a10e40453d6ad95df23dc5
|
refs/heads/master
| 2020-06-05T16:47:22.102820
| 2014-06-12T08:49:20
| 2014-06-12T08:49:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 707
|
r
|
ui.R
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("Stock Portfolio Simulator"),
sidebarPanel(
width=2,
radioButtons("radio", label = h3("Select your asset:"), choices = list("STI ETF" = "ES3.SI", "Asian Bond ETF" = "A35.SI","Commodities ETF" = "A0W.SI","Permanent Portfolio" = "PP"),selected = "PP"),
numericInput('ei', 'Equity Component', 34, min = 0, max = 100, step = 1),
numericInput('bi', 'Bond Component', 33, min = 0, max = 100, step = 1),
numericInput('ci', 'Commodities Component', 33, min = 0, max = 100, step = 1),
h4("Do ensure that all components sum to 100%"),
submitButton('Submit')
),
mainPanel(width=10, plotOutput('diagram', height="800px"))
)
)
|
447cc49c5b2d8f16befddea7c3bd18c94318fcc5
|
13fd537c59bf51ebc44b384d2b5a5d4d8b4e41da
|
/R/tests/testdir_autoGen/runit_simpleFilterTest_tnc3_49.R
|
e3ee57c737940da54c35984bdc7730ad1ba5e677
|
[
"Apache-2.0"
] |
permissive
|
hardikk/h2o
|
8bd76994a77a27a84eb222a29fd2c1d1c3f37735
|
10810480518d43dd720690e729d2f3b9a0f8eba7
|
refs/heads/master
| 2020-12-25T23:56:29.463807
| 2013-11-28T19:14:17
| 2013-11-28T19:14:17
| 14,797,021
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,067
|
r
|
runit_simpleFilterTest_tnc3_49.R
|
##
# Author: Autogenerated on 2013-11-27 18:13:58
# gitHash: c4ad841105ba82f4a3979e4cf1ae7e20a5905e59
# SEED: 4663640625336856642
##
source('./findNSourceUtils.R')
Log.info("======================== Begin Test ===========================")
simpleFilterTest_tnc3_49 <- function(conn) {
Log.info("A munge-task R unit test on data <tnc3> testing the functional unit <<=> ")
Log.info("Uploading tnc3")
hex <- h2o.uploadFile(conn, locate("../../smalldata/tnc3.csv"), "rtnc3.hex")
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 1.16859362136")
filterHex <- hex[hex[,c("pclass")] <= 1.16859362136,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"pclass" <= 1.16859362136,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 2.79915321422")
filterHex <- hex[hex[,c("pclass")] <= 2.79915321422,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"pclass" <= 2.79915321422,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 88.740951772")
filterHex <- hex[hex[,c("fare")] <= 88.740951772,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"fare" <= 88.740951772,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 48.3412972842")
filterHex <- hex[hex[,c("age")] <= 48.3412972842,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 48.3412972842,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"survived\" using value 0.852817634299")
filterHex <- hex[hex[,c("survived")] <= 0.852817634299,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"survived" <= 0.852817634299,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 14.130200359")
filterHex <- hex[hex[,c("age")] <= 14.130200359,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 14.130200359,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 331.426035691")
filterHex <- hex[hex[,c("fare")] <= 331.426035691,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"fare" <= 331.426035691,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 61.5949293089")
filterHex <- hex[hex[,c("age")] <= 61.5949293089,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 61.5949293089,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"parch\" using value 5.56499170464")
filterHex <- hex[hex[,c("parch")] <= 5.56499170464,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"parch" <= 5.56499170464,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 24.1257964371")
filterHex <- hex[hex[,c("age")] <= 24.1257964371,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 24.1257964371,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 328.159534609")
filterHex <- hex[hex[,c("fare")] <= 328.159534609,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"fare" <= 328.159534609,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 42.699587709")
filterHex <- hex[hex[,c("age")] <= 42.699587709,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 42.699587709,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 74.5407804005")
filterHex <- hex[hex[,c("fare")] <= 74.5407804005,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"fare" <= 74.5407804005,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 40.8114758731")
filterHex <- hex[hex[,c("age")] <= 40.8114758731,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 40.8114758731,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 67.2132285495")
filterHex <- hex[hex[,c("age")] <= 67.2132285495,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"age" <= 67.2132285495,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 310.268503916")
filterHex <- hex[hex[,c("fare")] <= 310.268503916,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"fare" <= 310.268503916,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"survived\" using value 0.169838394841")
filterHex <- hex[hex[,c("survived")] <= 0.169838394841,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"survived" <= 0.169838394841,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"parch\" using value 5.74615990691")
filterHex <- hex[hex[,c("parch")] <= 5.74615990691,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"parch" <= 5.74615990691,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"sibsp\" using value 6.31277981312")
filterHex <- hex[hex[,c("sibsp")] <= 6.31277981312,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"sibsp" <= 6.31277981312,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"survived\" using value 0.930697390234")
filterHex <- hex[hex[,c("survived")] <= 0.930697390234,]
Log.info("Perform filtering with the '$' sign also")
filterHex <- hex[hex$"survived" <= 0.930697390234,]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 1.39454878494, and also subsetting columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.39454878494, c("pclass")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.39454878494, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 265.539319381, and also subsetting columns.")
filterHex <- hex[hex[,c("fare")] <= 265.539319381, c("fare")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("fare")] <= 265.539319381, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"sibsp\" using value 1.0408691284, and also subsetting columns.")
filterHex <- hex[hex[,c("sibsp")] <= 1.0408691284, c("sibsp")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("sibsp")] <= 1.0408691284, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 45.2853331205, and also subsetting columns.")
filterHex <- hex[hex[,c("age")] <= 45.2853331205, c("age")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("age")] <= 45.2853331205, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"parch\" using value 8.15570387869, and also subsetting columns.")
filterHex <- hex[hex[,c("parch")] <= 8.15570387869, c("parch")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("parch")] <= 8.15570387869, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 74.7500195684, and also subsetting columns.")
filterHex <- hex[hex[,c("fare")] <= 74.7500195684, c("fare")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("fare")] <= 74.7500195684, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"parch\" using value 6.49202590977, and also subsetting columns.")
filterHex <- hex[hex[,c("parch")] <= 6.49202590977, c("parch")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("parch")] <= 6.49202590977, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 1.75094296678, and also subsetting columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.75094296678, c("pclass")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.75094296678, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 2.97840783955, and also subsetting columns.")
filterHex <- hex[hex[,c("pclass")] <= 2.97840783955, c("pclass")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("pclass")] <= 2.97840783955, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 2.42120664856, and also subsetting columns.")
filterHex <- hex[hex[,c("pclass")] <= 2.42120664856, c("pclass")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("pclass")] <= 2.42120664856, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"parch\" using value 6.62654127699, and also subsetting columns.")
filterHex <- hex[hex[,c("parch")] <= 6.62654127699, c("parch")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("parch")] <= 6.62654127699, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"sibsp\" using value 2.34649732698, and also subsetting columns.")
filterHex <- hex[hex[,c("sibsp")] <= 2.34649732698, c("sibsp")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("sibsp")] <= 2.34649732698, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 0.614716608698, and also subsetting columns.")
filterHex <- hex[hex[,c("age")] <= 0.614716608698, c("age")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("age")] <= 0.614716608698, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"fare\" using value 375.589101704, and also subsetting columns.")
filterHex <- hex[hex[,c("fare")] <= 375.589101704, c("fare")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("fare")] <= 375.589101704, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 1.67215301188, and also subsetting columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.67215301188, c("pclass")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.67215301188, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"sibsp\" using value 5.44157049141, and also subsetting columns.")
filterHex <- hex[hex[,c("sibsp")] <= 5.44157049141, c("sibsp")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("sibsp")] <= 5.44157049141, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"pclass\" using value 1.07260709597, and also subsetting columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.07260709597, c("pclass")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("pclass")] <= 1.07260709597, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 45.9928629432, and also subsetting columns.")
filterHex <- hex[hex[,c("age")] <= 45.9928629432, c("age")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("age")] <= 45.9928629432, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"age\" using value 63.3059281732, and also subsetting columns.")
filterHex <- hex[hex[,c("age")] <= 63.3059281732, c("age")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("age")] <= 63.3059281732, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
Log.info("Filtering out rows by <= from dataset tnc3 and column \"sibsp\" using value 0.761499201582, and also subsetting columns.")
filterHex <- hex[hex[,c("sibsp")] <= 0.761499201582, c("sibsp")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[hex[,c("sibsp")] <= 0.761499201582, c("boat","name","sex","fare","pclass","survived","ticket","cabin","age","body","embarked","home.dest","sibsp","parch")]
}
conn = new("H2OClient", ip=myIP, port=myPort)
tryCatch(test_that("simpleFilterTest_ on data tnc3", simpleFilterTest_tnc3_49(conn)), warning = function(w) WARN(w), error = function(e) FAIL(e))
PASS()
|
90746ba141aeb33fda43a9e17fea2f8a5ad85f2f
|
b1f52acdcf0dc387702357cd18d26a54c0defbc1
|
/R/get_log_density_ph.R
|
0ae8c6bd7de1168012c5d16ea3e3e338bd2529bb
|
[] |
no_license
|
oliviayu/robust-HHJMs
|
b0af6060a38af9d56a8f5c1f008c422b17ccf188
|
87179785c6a0b394871db3366967059346484a92
|
refs/heads/master
| 2023-06-05T23:05:20.965988
| 2021-06-28T23:33:30
| 2021-06-28T23:33:30
| 216,898,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 984
|
r
|
get_log_density_ph.R
|
#' Get log density function of propotional hazard model
#'
get_log_density_ph <- function(model_object){
if(is.null(model_object$distribution)){
# Cox PH model
log_density <- paste(model_object$event, "*( log(hazard0) +",
model_object$reg_equation, ") - exp(",
model_object$reg_equation, ")*cum_hazard0")
additional_param <- c("hazard0", "cum_hazard0")
} else if(model_object$distribution == "weibull"){
# Weibull PH model
log_density <- paste(model_object$event, "*( Wlogscale+log(Wshape)+log(",
model_object$response, ")*(Wshape-1) +",
model_object$reg_equation, ") - exp(Wlogscale+",
model_object$reg_equation, ")*",
model_object$response, "^Wshape")
additional_param <- c("Wlogscale", "Wshape")
}
list(log_density = log_density,
additional_param = additional_param)
}
|
ecb1c0521d076b665328c857a47fc4dc22e2372a
|
9e8936a8cc7beae524251c8660fa755609de9ce5
|
/inst/add-in/parsnip_model_db.R
|
37cb8c1cc37f312f432300873dad8e6992b3c8cc
|
[
"MIT"
] |
permissive
|
tidymodels/parsnip
|
bfca10e2b58485e5b21db64517dadd4d3c924648
|
907d2164a093f10cbbc1921e4b73264ca4053f6b
|
refs/heads/main
| 2023-09-05T18:33:59.301116
| 2023-08-17T23:45:42
| 2023-08-17T23:45:42
| 113,789,613
| 451
| 93
|
NOASSERTION
| 2023-08-17T23:43:21
| 2017-12-10T22:48:42
|
R
|
UTF-8
|
R
| false
| false
| 2,798
|
r
|
parsnip_model_db.R
|
# ------------------------------------------------------------------------------
# code to make the parsnip model database used by the RStudio addin
# ------------------------------------------------------------------------------
library(tidymodels)
library(usethis)
# also requires installation of:
packages <- c("parsnip", "discrim", "plsmod", "rules", "baguette", "poissonreg",
"multilevelmod", "modeltime", "modeltime.gluonts")
# ------------------------------------------------------------------------------
# Detects model specifications via their print methods
print_methods <- function(x) {
require(x, character.only = TRUE)
ns <- asNamespace(ns = x)
mthds <- ls(envir = ns, pattern = "^print\\.")
mthds <- gsub("^print\\.", "", mthds)
purrr::map(mthds, get_engines) %>% purrr::list_rbind() %>% dplyr::mutate(package = x)
}
get_engines <- function(x) {
eng <- try(parsnip::show_engines(x), silent = TRUE)
if (inherits(eng, "try-error")) {
eng <- tibble::tibble(engine = NA_character_, mode = NA_character_, model = x)
} else {
eng$model <- x
}
eng
}
get_tunable_param <- function(mode, package, model, engine) {
cl <- rlang::call2(.ns = package, .fn = model)
obj <- rlang::eval_tidy(cl)
obj <- parsnip::set_engine(obj, engine)
obj <- parsnip::set_mode(obj, mode)
res <-
tune::tunable(obj) %>%
dplyr::select(parameter = name)
# ------------------------------------------------------------------------------
# Edit some model parameters
if (model == "rand_forest") {
res <- res[res$parameter != "trees",]
}
if (model == "mars") {
res <- res[res$parameter == "prod_degree",]
}
if (engine %in% c("rule_fit", "xgboost")) {
res <- res[res$parameter != "mtry",]
}
if (model %in% c("bag_tree", "bag_mars")) {
res <- res[0,]
}
if (engine %in% c("rpart")) {
res <- res[res$parameter != "tree-depth",]
}
res
}
# ------------------------------------------------------------------------------
model_db <-
purrr::map(packages, print_methods) %>%
purrr::list_rbind() %>%
dplyr::filter(engine != "liquidSVM") %>%
dplyr::filter(model != "surv_reg") %>%
dplyr::filter(engine != "spark") %>%
dplyr::filter(!is.na(engine)) %>%
dplyr::mutate(label = paste0(model, " (", engine, ")")) %>%
dplyr::arrange(model, engine, mode)
num_modes <-
model_db %>%
dplyr::group_by(package, model, engine) %>%
dplyr::count() %>%
dplyr::ungroup() %>%
dplyr::mutate(single_mode = n == 1) %>%
dplyr::select(package, model, engine, single_mode)
model_db <-
dplyr::left_join(model_db, num_modes, by = c("package", "model", "engine")) %>%
dplyr::mutate(parameters = purrr::pmap(list(mode, package, model, engine), get_tunable_param))
usethis::use_data(model_db, overwrite = TRUE)
|
60c42e17dd03232dd909f81fb441441484c4a849
|
706bab2a3d9a6b6372820609f7f2154d0df38fae
|
/R/localize.R
|
a8a8ca8638eb49e9bd752f416901e9c64bc0ea85
|
[] |
no_license
|
vjcitn/AnVIL
|
3c2ea79e6dcd72d7e2fc37dcc2ec94a5d11d6171
|
1e2790a22b5128209297b5a5ff8dbe032f279f48
|
refs/heads/master
| 2021-07-02T17:27:27.074280
| 2021-06-29T10:40:16
| 2021-06-29T10:40:16
| 172,901,323
| 0
| 0
| null | 2019-02-27T11:16:39
| 2019-02-27T11:16:39
| null |
UTF-8
|
R
| false
| false
| 6,947
|
r
|
localize.R
|
#' @rdname localize
#'
#' @title Copy packages, folders, or files to or from google buckets.
#'
#' @description `localize()`: recursively synchronizes files from a
#' Google storage bucket (`source`) to the local file system
#' (`destination`). This command acts recursively on the `source`
#' directory, and does not delete files in `destination` that are
#' not in `source.
#'
#' @param source `character(1)`, a google storage bucket or local file
#' system directory location.
#'
#' @param destination `character(1)`, a google storage bucket or local
#' file system directory location.
#'
#' @param dry `logical(1)`, when `TRUE` (default), return the
#' consequences of the operation without actually performing the
#' operation.
#'
#' @return `localize()`: exit status of function `gsutil_rsync()`.
#'
#' @export
localize <-
function(source, destination, dry = TRUE)
{
stopifnot(
.gsutil_is_uri(source),
.is_scalar_character(destination), dir.exists(destination),
.is_scalar_logical(dry)
)
if (dry)
warning("use 'dry = FALSE' to localize source / destination")
## FIXME: return destination paths of copied files
gsutil_rsync(
source, destination, delete = FALSE, recursive = TRUE, dry = dry
)
}
#' @rdname localize
#'
#' @description `delocalize()`: synchronize files from a local file
#' system (`source`) to a Google storage bucket
#' (`destination`). This command acts recursively on the `source`
#' directory, and does not delete files in `destination` that are
#' not in `source`.
#'
#' @param unlink `logical(1)` remove (unlink) the file or directory
#' in `source`. Default: `FALSE`.
#'
#' @return `delocalize()`: exit status of function `gsutil_rsync()`
#'
#' @export
delocalize <-
function(source, destination, unlink = FALSE, dry = TRUE)
{
stopifnot(
.is_scalar_character(source), file.exists(source),
.gsutil_is_uri(destination),
.is_scalar_logical(unlink),
.is_scalar_logical(dry)
)
if (dry)
warning("use 'dry = FALSE' to delocalize source / destination")
## sync and optionally remove source
result <- gsutil_rsync(
source, destination, delete = FALSE, recursive = TRUE, dry = dry
)
if (!dry && unlink)
unlink(source, recursive=TRUE)
result
}
#' @rdname localize
#'
#' @description `install()`: install R / Bioconductor packages, using
#' fast pre-built 'binary' libraries if available.
#'
#' @param pkgs `character()` packages to install from binary repository.
#'
#' @param lib `character(1)` library path (directory) in which to
#' install `pkgs`; defaults to `.libPaths()[1]`.
#'
#' @param ... additional arguments, passed to `install.packages()`.
#'
#' @param version `character(1)` or `package_version` Bioconductor
#' version, e.g., "3.12".
#'
#' @param binary_base_url `character(1)` host and base path for binary
#' package 'CRAN-style' repository; not usually required by the
#' end-user.
#'
#' @param verbose `logical(1)` report on package installation
#' progress?
#'
#' @return `install()`: return value of `install.packages()`.
#'
#' @examples
#' \dontrun{install(c('BiocParallel', 'BiocGenerics'))}
#'
#' @importFrom utils contrib.url install.packages
#'
#' @export
install <-
function(pkgs, lib = .libPaths()[1], ...,
version = BiocManager::version(),
binary_base_url = "https://storage.googleapis.com/bioconductor_docker/packages",
verbose = getOption("verbose"))
{
stopifnot(
.is_character(pkgs),
.is_scalar_character(lib), dir.exists(lib),
.is_scalar_character(version) || is.package_version(version),
.is_scalar_character(binary_base_url),
.is_scalar_logical(verbose)
)
repos <- repositories(version, binary_base_url)
install.packages(pkgs, repos = repos, lib = lib, ..., verbose = verbose)
}
#' @rdname localize
#'
#' @description `repositories()`: repositories to search for binary
#' (if available), Bioconductor, and CRAN packages.
#'
#' @details `repositories()` prepends an additional repository URI to
#' `BiocManager::repositories()`. The URI is formed by
#' concatenating `binary_base_url`, the environment variables
#' `TERRA_R_PLATFORM` and the 'major' and 'minor' components of
#' `TERRA_R_PLATFORM_BINARY_VERSION` and
#' `BiocManager::version()`. The URI is only prepended if a
#' CRAN-style repostiory exists at that location, with binary
#' package tar.gz content described by `src/contrib/PACKAGES.gz`.
#'
#' @return `repositories()`: character() of binary (if available),
#' Bioconductor, and CRAN repositories.
#'
#' @examples
#' repositories()
#'
#' @export
repositories <-
function(
version = BiocManager::version(),
binary_base_url = "https://storage.googleapis.com/bioconductor_docker/packages")
{
stopifnot(
.is_scalar_character(version) || is.package_version(version),
.is_scalar_character(binary_base_url)
)
repositories <- BiocManager::repositories()
## are we running on a docker container?
bioconductor_docker_version <- Sys.getenv("BIOCONDUCTOR_DOCKER_VERSION")
if (!nzchar(bioconductor_docker_version))
return(repositories)
## is the docker container configured correctly?
bioconductor_version <- package_version(version)
docker_version <- package_version(bioconductor_docker_version)
test <-
(bioconductor_version$major == docker_version$major) &
(bioconductor_version$minor == docker_version$minor)
if (!test) {
return(repositories)
}
## does the binary repository exist?
binary_repos0 <- paste0(binary_base_url, "/", bioconductor_version, "/bioc")
packages <- paste0(contrib.url(binary_repos0), "/PACKAGES.gz")
url <- url(packages)
binary_repos <- tryCatch({
suppressWarnings(open(url, "rb"))
close(url)
binary_repos0
}, error = function(...) {
close(url)
NULL
})
c(BiocBinaries = binary_repos, repositories)
}
#' @rdname localize
#'
#' @description `add_libpaths()`: Add local library paths to
#' `.libPaths()`.
#'
#' @param paths `character()`: vector of directories to add to
#' `.libPaths()`. Paths that do not exist will be created.
#'
#' @return `add_libpaths()`: updated .libPaths(), invisibly.
#'
#' @examples
#' \dontrun{add_libpaths("/tmp/host-site-library")}
#'
#' @export
add_libpaths <-
function(paths)
{
stopifnot(is.character(paths))
## make sure all paths exist
exist <- vapply(paths, dir.exists, logical(1))
ok <- vapply(paths[!exist], dir.create, logical(1))
if (!all(ok))
stop(
"'add_libpaths()' failed to create directories:\n",
" '", paste(paths[!exist][!ok], collapse="'\n '"), "'"
)
.libPaths(c(paths, .libPaths()))
}
|
217f76bd24f5b222c4617c794ac646692ea924ac
|
421ae58289f144dfe3d8b6797ab5a3d76c491ae1
|
/script.R
|
301cb29d365d419caa3cab141cbc7a6ea0bc25c6
|
[] |
no_license
|
Jkang-alien/Predictive_Model
|
4970d25cf7a5d9b234e886bc5ebdf9b5340d2e58
|
22457ddc2049d4e1b34c7598a13a848da6eea1ec
|
refs/heads/master
| 2020-04-07T10:52:56.953500
| 2018-12-18T03:52:04
| 2018-12-18T03:52:04
| 158,303,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,518
|
r
|
script.R
|
library(AmesHousing)
data("ames_raw")
library(purrr)
map(ames_raw, names)
names(ames_raw) <- gsub(' ', '_', names(ames_raw))
map(ames_raw, class)
library(dplyr)
ames_raw <- ames_raw %>%
mutate(log_sale_price = log10(SalePrice))
summary(lm(log_sale_price ~ Lot_Area, ames_raw))
library(tidyverse)
library(rsample)
################################################################
############ DATA SPLITING #####################################
data_split <- initial_split(ames_raw, strata = 'log_sale_price')
dim(training(data_split))
dim(testing(data_split))
summary(lm(log_sale_price ~ Lot_Area, training(data_split)))
################################################################
#################### Model accessment ##########################
cv_splits <- vfold_cv(training(data_split))
library(yardstick)
lm_fit <- function(data_split, ...)
lm(..., data = analysis(data_split))
# A formula is also needed for each model:
form <- as.formula(
log10(Sale_Price) ~ Longitude + Latitude
)
model_perf <- function(data_split, mod_obj) {
vars <- rsample::form_pred(mod_obj$terms)
assess_dat <- assessment(data_split) %>%
select(!!!vars, Sale_Price) %>%
mutate(
pred = predict(
mod_obj,
newdata = assessment(data_split)
),
Sale_Price = log10(Sale_Price)
)
rmse <- assess_dat %>%
rmse(truth = Sale_Price, estimate = pred)
rsq <- assess_dat %>%
rsq(truth = Sale_Price, estimate = pred)
data.frame(rmse = rmse, rsq = rsq)
}
map(cv_splits$splits, function(x){lm(log_sale_price ~ ., assessment(x))})
assessment(cv_splits$splits[[1]])
#####################################################################
##################### Caret #########################################
library(caret)
load("car_data.RData")
summary(car_data)
hist(car_data$mpg)
qplot(car_data$mpg, bins = 30)
qplot(car_data$model_year)
library(dplyr)
test_set <- car_data %>%
filter (model_year == 2018)
train_set <- car_data %>%
filter (model_year < 2018)
library(recipes)
basic_rec <- recipe(mpg ~ ., data = train_set) %>%
update_role(carline, new_role = "car name") %>%
step_other(division, threshold = 0.005) %>%
step_dummy(all_nominal(), -carline) %>%
step_zv(all_predictors())
glmn_grid <- expand.grid(alpha = seq(0, 1, by = .25), lambda = 10^seq(-3, -1, length = 20))
nrow(glmn_grid)
glmn_grid
car_data$carline[1:10]
ctrl <- trainControl(
method = 'cv',
savePredictions = 'final',
verboseIter = TRUE)
glmn_rec <- basic_rec %>%
step_poly(eng_displ) %>%
step_center(all_predictors()) %>%
step_scale(all_predictors())
set.seed(3544)
glmn_mod <- train(
glmn_rec,
data = train_set,
method = "glmnet",
trControl = ctrl,
tuneGrid = glmn_grid
)
ggplot(glmn_mod) + scale_x_log10() + theme(legend.position = 'top')
ggplot(glmn_mod$pred, aes(x = obs, y = pred)) +
geom_abline(col = 'green', alpha = .5) +
geom_point(alpha = 0.3) +
geom_smooth(se = FALSE, col = 'red', lty = 2, lwd = 1, alpha = 0.5)
ggplot(glmn_mod$pred, aes(x = obs, y = pred)) +
geom_abline() +
geom_point()
reg_imp <- varImp(glmn_mod, scale = FALSE)
ggplot(reg_imp, top = 30) + xlab('' )
library(glmnet)
plot(glmn_mod$finalModel, xvar = 'lambda')
glmn_mod$finalModel
rec_trained <- prep(glmn_rec, training = train_set, retain = TRUE)
baked_data <- bake(rec_trained, new_data = train_set, all_predictors())
######################################################
################# MARS ###############################
ctrl$verboseIter <- FALSE
mars_grid <- expand.grid(degree = 1:2, nprune = seq(2, 60, by = 2))
set.seed(3544)
mars_mod <- train(
basic_rec,
data = train_set,
method = "earth",
tuneGrid = mars_grid,
trControl = ctrl
)
test_set <- test_set %>%
mutate(pred = predict(glmn_mod, test_set))
library(yardstick)
rmse(test_set, truth = mpg, estimate = pred)
ggplot(test_set, aes(x = mpg, y = pred)) +
geom_abline() +
geom_point() +
geom_smooth()
set.seed(3544)
mars_gcv_bag <- train(
basic_rec,
data = train_set,
method = "bagEarthGCV",
tuneGrid = data.frame(degree = 1:2),
trControl = ctrl,
B = 50
)
mars_gcv_bag
rs <- resamples (
list(glmn = glmn_mod, MARS = mars_mod, bagged = mars_gcv_bag)
)
library(tidyposterior)
###################################################################
############# Classification ######################################
library(yardstick)
library(dplyr)
two_class_example %>% head(4)
two_class_example %>% conf_mat(truth = truth, estimate = predicted)
two_class_example %>% accuracy(truth = truth, estimate = predicted)
library(pROC)
roc_obj <- roc(
response = two_class_example$truth,
predictor = two_class_example$Class1,
levels = rev(levels(two_class_example$truth))
)
auc(roc_obj)
plot(
roc_obj,
legacy.axes = TRUE,
print.thres = c(.2, .5, .8),
print.thres.pattern = "cut = %.2f (Spec = %.2f, Sens = %.2f)",
print.thres.cex = .8
)
table(okc_train$Class)
library(caret)
ctrl <- trainControl(
method = 'cv',
classProbs = TRUE,
summaryFunction = twoClassSummary,
savePredictions = 'final',
sampling = 'down'
)
set.seed(5515)
cart_mod <- train(
x = okc_train[, names(okc_train) != "Class"],
y = okc_train$Class,
method = 'rpart2',
metric = "ROC",
tuneGrid = data.frame(maxdepth = 1:20),
trControl = ctrl
)
cart_mod$finalModel
plot_roc <- function(x, ...) {
roc_obj <- roc(
response = x[["obs"]],
predictor = x[["stem"]],
levels = rev(levels(x$obs))
)
plot(roc_obj, ...)
}
plot_roc(cart_mod$pred)
cart_mod$pred
confusionMatrix(cart_mod)
car_imp <- varImp(cart_mod)
ggplot(car_imp, top = 7)
set.seed(5515)
cart_bag <- train(
x = okc_train[, names(okc_train) != "Class"],
y = okc_train$Class,
methods = 'treebag',
metric = 'ROC',
trControl = ctrl
)
cart_bag
confusionMatrix(cart_bag)
plot_roc(cart_mod$pred)
plot_roc(cart_bag$pred,
col = 'darkred',
add = TRUE)
bag_imp <- varImp(cart_bag, scale = FALSE)
ggplot(bag_imp, top = 30)
library(recipes)
is_dummy <- vapply(okc_train, function(x) length(unique(x)) == 2 & is.numeric(x), logical(1))
dummies <- names(is_dummy)[is_dummy]
no_dummies <- recipe(Class ~ ., data = okc_train) %>%
step_bin2factor(!!! dummies) %>%
step_zv(all_predictors())
smoothing_grid <- expand.grid(usekernel = TRUE, fL = 0, adjust = seq(0.5, 3.5, by = 0.5))
set.seed(5515)
nb_mod <- train(
no_dummies,
data = okc_train,
methods = 'nb',
metric = 'roc',
tuneGrid = smoothing_grid,
trControl = ctrl
)
|
4524da6c48242b585eba93cb7ad25b47e82cbf7b
|
24140a55b535e2207ebc717590a961f737a31674
|
/FantasyFootball/Tests.R
|
e95efbbf9b8eea3f1af7f380a0d59d675544e156
|
[] |
no_license
|
im281/R
|
066410decf6a783947873049bd5f65df60312bb9
|
26ed130bdfd9abc5f3a84a9cc2fca1e5863aaa9a
|
refs/heads/master
| 2020-05-25T15:43:53.482411
| 2017-12-01T23:06:40
| 2017-12-01T23:06:40
| 69,046,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,255
|
r
|
Tests.R
|
library(data.table)
source('FFfunctions.R')
source('FootballOptimizer.R')
#fanduel data http://rotoguru1.com/cgi-bin/fyday.pl?week=1&game=fd&scsv=1
#2015 stats
#Training#
#read the player FFP tables######################################################################################
w1 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week1.txt', sep = ';',stringsAsFactors = FALSE)
w2 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week2.txt', sep = ';',stringsAsFactors = FALSE)
w3 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week3.txt', sep = ';',stringsAsFactors = FALSE)
w4 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week4.txt', sep = ';',stringsAsFactors = FALSE)
w5 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week5.txt', sep = ';',stringsAsFactors = FALSE)
w6 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week6.txt', sep = ';',stringsAsFactors = FALSE)
w7 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week7.txt', sep = ';',stringsAsFactors = FALSE)
w8 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week8.txt', sep = ';',stringsAsFactors = FALSE)
w9 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week9.txt', sep = ';',stringsAsFactors = FALSE)
w10 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week10.txt', sep = ';',stringsAsFactors = FALSE)
w11 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week11.txt', sep = ';',stringsAsFactors = FALSE)
w12 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week12.txt', sep = ';',stringsAsFactors = FALSE)
w13 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week13.txt', sep = ';',stringsAsFactors = FALSE)
w14 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week14.txt', sep = ';',stringsAsFactors = FALSE)
w15 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week15.txt', sep = ';',stringsAsFactors = FALSE)
w16 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week16.txt', sep = ';',stringsAsFactors = FALSE)
w17 <- read.csv('C:/Users/Owner/Source/Repos/R/FantasyFootball/2015/Week17.txt', sep = ';',stringsAsFactors = FALSE)
###################################################################################################################
#2016 stats########################################################################################################
w18 <- read.csv('Week1.txt', sep = ';',stringsAsFactors = FALSE)
w19 <- read.csv('Week2.txt', sep = ';',stringsAsFactors = FALSE)
w20 <- read.csv('Week3.txt', sep = ';',stringsAsFactors = FALSE)
###################################################################################################################
#append the tables on the row
p <- data.table(rbind(w1,w2,w3,w4,w5,w6,w7,w8,w9,w10,w11,w12,w13,w14,w15,w16,w17,w18,w19,w20))
#subset the player tables so it doesn't contain defensive data
p <- p[which(p$Name %like% "Defense" == FALSE)]
#add offensive stats per player to the table for each player
#read the multidimensional defense tables
#NFL stats page http://www.nfl.com/stats/team?seasonId=2015&seasonType=REG&Submit=Go
#Mergin tables######################################################################################################
#Each week get the latest defensive stats and retrain the model
#2015 run pass and total defense
runD15 <- data.table(read.csv('2015RunD.txt',sep = "\t"))
passD15 <- data.table(read.csv('2015PassD.txt',sep = "\t"))
totalD15 <- data.table(read.csv('2015TotalD.txt',sep ="\t"))
#merge 2015 stats to one table
runandpass2015 <- merge(runD15,passD15,by="Team")
runandpassandtotal2015 <- merge(runandpass2015 ,totalD15,by="Team")
runandpassandtotal2015[,"Year"] <- 2015
#2016 run pass and total defense
runD16 <- data.table(read.csv('2016RunD.txt',sep = "\t"))
passD16 <- data.table(read.csv('2016PassD.txt',sep = "\t"))
totalD16 <- data.table(read.csv('2016TotalD.txt',sep ="\t"))
runandpassandtotal2015[,"Year"] <- 2016
#merge 2015 stats to one table
runandpass2016 <- merge(runD16,passD16,by="Team")
runandpassandtotal2016 <- merge(runandpass2016 ,totalD16,by="Team")
#row bind
finalD <- rbind(runandpassandtotal2015,runandpassandtotal2016,fill=TRUE)
d <- finalD
#####################################################################################################################
#add team ID column names (to cbind using column IDs)
p[,"TeamID"] <- 0
d[,"TeamID"] <- 0
players <- AssignTeamIDs(p)
defenses <- AssignTeamIDsDefense(d)
# setkey(players,'TeamID')
# setkey(defenses,'TeamID')
#join the tables on Team ID. Player and the opposing team
matrix = merge(players,defenses, by='TeamID',allow.cartesian = TRUE)
write.csv(matrix,'ffd7.csv')
UploadToMLStudio(myexp,'ffd7.csv')
#End Training
#Experiment##########################################################################################################
path <- 'C:/Users/Owner/Source/Repos/R/FantasyFootball/Fanduel/FanDuel-NFL-2016-09-29-16456-players-W4-Thurs-Sun-list.csv'
t <- data.table(read.csv(path,stringsAsFactors = FALSE))
# remove injured players
t <- t[which(t$Injury.Indicator == "")]
idData<- data.table(read.csv('RotoguruPlayerIds.csv',stringsAsFactors = FALSE))
input <- ConvertFanduelCSV(t,idData)
input[,"TeamID"] <- 0
experiment <- AssignTeamIDs(input)
#setkey(experiment,'TeamID')
myexp <- merge(experiment,defenses, by='TeamID',allow.cartesian = TRUE)
#rename points and salary columns
colnames(myexp)[colnames(myexp)=="FD.Points"] <- "FD.points"
colnames(myexp)[colnames(myexp)=="FD.Salary"] <- "FD.salary"
write.csv(myexp,'Week4_Thurs_Sun_Exp.csv')
UploadToMLStudio(myexp,'Week4_Thurs_Sun_Exp.csv')
#####################################################################################################################
#Optimize the lineup##############################################################
#csvFile < 'THE FILE PATH TO CSV FILE WITH EXTENSION'
csvFile <- 'W4_Thurs_Mon_FullModel_P.csv'
data <- data.table(read.csv(csvFile))
d <- OptimizeFBLineup(data)
##TESTS############################################################################################################
po <- data.table(read.csv('2016PassO.txt', sep = "\t"))
View(po)
#onlt numeric columns
dt <- data[sapply(data,is.numeric)]
cor(dt)
#regression
# Multiple Linear Regression Example
fit <- lm(data$Scored.Labels ~ data$Rk.y + data$Yds.x + data$FD.salary +
data$TD.y + data$TotPts.y + data$TD.x + data$X3rd.Pct + data$Att.x +
data$Yds.G.x,data=data)
summary(fit) # show result
# diagnostic plots
#layout(matrix(c(1,2,3,4),2,2)) # optional 4 graphs/page
plot(fit)
# Other useful functions
coefficients(fit) # model coefficients
confint(fit, level=0.95) # CIs for model parameters
fitted(fit) # predicted values
residuals(fit) # residuals
anova(fit) # anova table
vcov(fit) # covariance matrix for model parameters
influence(fit) # regression diagnostics
#example###############
y <- c(1,2,3,4,5)
x <-c(1,2,3,4,5)
fit <- lm(y ~ x)
summary(fit) # show results
######################################################################################################################
|
3e653249a461ac054325bd30215e31252b7b9634
|
13e8faf5de82d75d115dcbe2863719798523f160
|
/workout02-jacqueline-wood/workout02-jacqueline-wood.R
|
6698153eac61dbd49c0fe15390cfb5c13a709089
|
[] |
no_license
|
stat133-sp19/hw-stat133-jacquiwood1
|
5c8cee72388b35b557459e41c35d844ba91e7066
|
2243a55762227c4cd2470c5d7b73a9976d6d2d67
|
refs/heads/master
| 2020-04-28T19:10:58.065360
| 2019-05-03T05:01:58
| 2019-05-03T05:01:58
| 175,503,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,351
|
r
|
workout02-jacqueline-wood.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(reshape2)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("$avings"),
fluidRow(
column(4,
# Sidebar with a slider input for initial amount
sliderInput("initial",
"Initial Amount",
min = 1,
max = 100000,
value = 1000,
step = 20000,
pre = "$",
sep = ",")),
column(4,
# Sidebar with a slider input for return rate
sliderInput("return",
"Return Rate (in %)",
min = 0,
max = 20,
value = 5,
step = 2)),
column(4,
# Sidebar with a slider input for years
sliderInput("years",
"Years",
min = 0,
max = 50,
value = 10,
step = 5))
),
fluidRow(
column(4,
# Sidebar with a slider input for annual contribution
sliderInput("contrib",
"Annual Contribution",
min = 0,
max = 50000,
value = 2000,
step = 10000,
pre = "$",
sep = ",")),
column(4,
# Sidebar with a slider input for growth rate
sliderInput("growth",
"Growth Rate (in %)",
min = 0,
max = 20,
value = 2,
step = 2)),
column(4,
selectInput("facet",
"Facet",
choices = list("No" = 1, "Yes" = 2),
selected = 1))
),
h4("Timelines"),
# Show timelines
plotOutput("timelines"),
h4("Balances"),
# Table of values
verbatimTextOutput("balances")
)
# Define server logic required to draw a histogram
server <- function(input, output) {
modalities <- reactive({
future_value <- function(amount, rate, years) {
return(amount * ((1 + rate)^years))
}
annuity <- function(contrib, rate, years) {
return(contrib * ((((1+ rate)^years) - 1) / rate))
}
growing_annuity <- function(contrib, rate, growth, years) {
return(contrib * ((((1 + rate)^years) - ((1 + growth)^years)) / (rate - growth)))
}
no_contrib <- c()
fixed_contrib <- c()
growing_contrib <- c()
for(year in 0:input$years) {
no_contrib <- c(no_contrib, future_value(input$initial,input$return/100,year))
fixed_contrib <- c(fixed_contrib, future_value(input$initial,input$return/100,year) + annuity(input$contrib,input$return/100,year))
growing_contrib <- c(growing_contrib, future_value(input$initial,input$return/100,year) + growing_annuity(input$contrib,input$return/100,input$growth/100,year))
}
modalities <- data.frame("year" = 0:input$years, "no_contrib" = no_contrib, "fixed_contrib" = fixed_contrib, "growing_contrib" = growing_contrib)
modalities
})
output$timelines <- renderPlot({
melted <- melt(modalities(), id.vars='year')
names(melted)[2] <- "variable"
names(melted)[3] <- "value"
if(input$facet == 1){
ggplot(melted, aes(year,value, col=variable)) + geom_point() + geom_line() + ggtitle("Three modes of investing")
} else {
ggplot(melted, aes(year,value, col=variable)) + facet_grid(. ~ variable) + geom_area(aes(fill=variable),alpha=0.4) + geom_point() + geom_line() + ggtitle("Three modes of investing")
}
})
output$balances <- renderPrint(modalities())
}
# Run the application
shinyApp(ui = ui, server = server)
|
32e4641fa93ac1a8d68f2343eebd13b0dad6520c
|
e573bc7fd968068a52a5144a3854d184bbe4cda8
|
/Recommended/survival/R/print.survreg.R
|
7b87054e799c5676eec77cff376373c325bfb6eb
|
[] |
no_license
|
lukaszdaniel/ivory
|
ef2a0f5fe2bc87952bf4471aa79f1bca193d56f9
|
0a50f94ce645c17cb1caa6aa1ecdd493e9195ca0
|
refs/heads/master
| 2021-11-18T17:15:11.773836
| 2021-10-13T21:07:24
| 2021-10-13T21:07:24
| 32,650,353
| 5
| 1
| null | 2018-03-26T14:59:37
| 2015-03-21T21:18:11
|
R
|
UTF-8
|
R
| false
| false
| 1,878
|
r
|
print.survreg.R
|
print.survreg <- function(x, ...)
{
if(!is.null(cl <- x$call)) {
cat(gettext("Call:", domain = "R-survival"), "\n", sep = "")
dput(cl)
}
if (!is.null(x$fail)) {
cat(gettext(" Survreg failed. ", domain = "R-survival"), x$fail, "\n", sep = "")
return(invisible(x))
}
coef <- x$coef
if(any(nas <- is.na(coef))) {
if(is.null(names(coef))) names(coef) <- paste("b", 1:length(coef), sep = "")
cat("\n", sprintf(ngettext(sum(nas), "Coefficients: (%d not defined because of singularity)", "Coefficients: (%d not defined because of singularities)", domain = "R-survival"), sum(nas)), "\n", sep = "")
}
else cat("\n", gettext("Coefficients:", domain = "R-survival"), "\n", sep = "")
print(coef, ...)
if (nrow(x$var)==length(coef))
cat("\n", gettextf("Scale fixed at %s",format(x$scale), domain = "R-survival"), "\n", sep = "")
else if (length(x$scale)==1) cat("\n", gettextf("Scale= %s", format(x$scale), domain = "R-survival"), "\n", sep = "")
else {
cat("\n", gettext("Scale:", domain = "R-survival"), "\n", sep = "")
print(x$scale, ...)
}
pdig <- max(1, getOption("digits")-4) # default it too high IMO
nobs <- length(x$linear)
chi <- 2*diff(x$loglik)
df <- sum(x$df) - x$idf # The sum is for penalized models
cat("\n", gettextf("Loglik(model)= %s Loglik(intercept only)= %s", format(round(x$loglik[2],1)), format(round(x$loglik[1],1)), domain = "R-survival"), sep = "")
if (df > 0)
cat("\n\t", gettextf("Chisq= %s on %d degrees of freedom, p= %s", format(round(chi,2)), round(df,1), format.pval(pchisq(chi, df, lower.tail=FALSE), digits=pdig), domain = "R-survival"), "\n", sep = "")
else cat("\n")
omit <- x$na.action
if (length(omit))
cat("n=", nobs, " (", naprint(omit), ")\n", sep="")
else cat("n=", nobs, "\n")
invisible(x)
}
|
e2148b50ee44b81d572d40681aef58fae127426e
|
3de36a93bafc5f58aaaeb316d2d7bf7c774e2464
|
/R/ordiArrowTextXY.R
|
08c43935719d01d1f69ca7ee085de657d9a38f5f
|
[] |
no_license
|
vanderleidebastiani/vegan
|
fc94bdc355c0520c383942bdbfb8fd34bd7b4438
|
dd2c622d0d8c7c6533cfd60c1207a819d688fd1f
|
refs/heads/master
| 2021-01-14T08:27:15.372938
| 2013-12-17T18:19:10
| 2013-12-17T18:19:10
| 15,258,339
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 968
|
r
|
ordiArrowTextXY.R
|
### Location of the text at the point of the arrow. 'vect' are the
### coordinates of the arrow heads, and 'labels' are the text used to
### label these heads, '...' passes arguments (such as 'cex') to
### strwidth() and strheight().
`ordiArrowTextXY` <-
function (vect, labels, ...)
{
w <- strwidth(labels, ...)
h <- strheight(labels, ...)
## slope of arrows
b <- vect[,2]/vect[,1]
## offset based on string dimensions
off <- cbind(sign(vect[,1]) * (w/2 + h/4), 0.75 * h * sign(vect[,2]))
## move the centre of the string to the continuation of the arrow
for(i in 1:nrow(vect)) {
move <- off[i,2] / b[i]
## arrow points to the top/bottom of the text box
if (is.finite(move) && abs(move) <= abs(off[i, 1]))
off[i, 1] <- move
else {
## arrow points to a side of the text box
move <- b[i] * off[i,1]
off[i, 2] <- move
}
}
off + vect
}
|
5a824045b7e65f7b153ee3e0c66c248a8b90e663
|
93d1fcc7758e5e99927be0529fb9d681db71e70c
|
/man/ma_r_ad.int_rbOrig.Rd
|
c143ba7d798ba6f580316c01b57def7f9f043ae5
|
[] |
no_license
|
psychmeta/psychmeta
|
ef4319169102b43fd87caacd9881014762939e33
|
b790fac3f2a4da43ee743d06de51b7005214e279
|
refs/heads/master
| 2023-08-17T20:42:48.778862
| 2023-08-14T01:22:19
| 2023-08-14T01:22:19
| 100,509,679
| 37
| 15
| null | 2023-08-14T01:06:53
| 2017-08-16T16:23:28
|
R
|
UTF-8
|
R
| false
| true
| 1,217
|
rd
|
ma_r_ad.int_rbOrig.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ma_r_ad_rb_orig.R
\name{ma_r_ad.int_rbOrig}
\alias{ma_r_ad.int_rbOrig}
\title{Interactive artifact-distribution meta-analysis correcting for Case II direct range restriction and measurement error}
\usage{
ma_r_ad.int_rbOrig(x)
}
\arguments{
\item{x}{List of bare-bones meta-analytic data, artifact-distribution objects for X and Y, and other meta-analysis options.}
}
\value{
A meta-analysis class object containing all results.
}
\description{
Interactive artifact-distribution meta-analysis correcting for Case II direct range restriction and measurement error
}
\references{
Schmidt, F. L., & Hunter, J. E. (2015).
\emph{Methods of meta-analysis: Correcting error and bias in research findings (3rd ed.)}.
Sage. \doi{10.4135/9781483398105}. Chapter 4.
Law, K. S., Schmidt, F. L., & Hunter, J. E. (1994).
Nonlinearity of range corrections in meta-analysis: Test of an improved procedure.
\emph{Journal of Applied Psychology, 79}(3), 425.
Raju, N. S., & Burke, M. J. (1983). Two new procedures for studying validity generalization.
\emph{Journal of Applied Psychology, 68}(3), 382. \doi{10.1037/0021-9010.68.3.382}
}
\keyword{internal}
|
c88b3ed765546e87cdab643ea64290dfcfa369c7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/BSDA/examples/Homework.Rd.R
|
5c79144cb4409c5a83ad83b0e03406fc33efe2e4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 340
|
r
|
Homework.Rd.R
|
library(BSDA)
### Name: Homework
### Title: Number of hours per week spent on homework for private and
### public high school students
### Aliases: Homework
### Keywords: datasets
### ** Examples
boxplot(time ~ school, data = Homework,
ylab = "Hours per week spent on homework")
#
t.test(time ~ school, data = Homework)
|
0919d16c74e701c2bd396127430a6eae2f037672
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615831425-test.R
|
4317fd6f931940bcb7ba5393fd78fb4c283ed32a
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 985
|
r
|
1615831425-test.R
|
testlist <- list(doy = numeric(0), latitude = numeric(0), temp = c(1.11755167473006e+282, 4.71368177582468e+139, 5.65292823326516e-40, 1.53140141627925e+224, 1.50419290762429e+129, -4.04978652716019e-308, 2.86826423903782e+281, -9.75369706896322e-89, -4.15938612166902e-209, 2.49946050374498e+237, 1.24978552383655e-221, 5.89400746775842e+234, -2.96105566795792e+282, -1.76848426558946e+304, -1.62865892671009e+184, 3.08145811467279e-231, 3.17300784295522e-221, 7.68567995330369e+281, -30637194.0141005, 7.64798547302967e-304, 9.57731098981491e+281, 3.86590694248572e+255, 4.08029877039107e+281, 8.38170143000329e-12, -1.24778528604136e+175, 2.79475965897434e-261, 4.71354375265258e+139, 9.87545322999083e-231, 35754250477336788992, -7.16626527610671e-69, 4.17274798185696e-255, 1.7232716023617e+44, -1.8615864475762e+106, -1.29901941325862e-227, 8.49227152323366e-314, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
7eb9b0672710516cdfa3ef92c531841a764ea520
|
d4d2d370f8cb50e002a3489d2e2b9186651ef92f
|
/man/kolmogorov_smirnov_test.Rd
|
d905c2bc6d4811cca9e674ede0b4239cdd8d62b0
|
[] |
no_license
|
momeara/RosettaFeatures
|
2c45012b042a76b0176a0924f1cc60fe3ba06e8b
|
2700b0735071971bbd2af91a6b1e7454ceeaa2a6
|
refs/heads/master
| 2021-01-19T03:54:05.386349
| 2017-03-24T14:07:21
| 2017-03-24T14:07:21
| 47,008,643
| 1
| 3
| null | 2016-06-16T23:00:32
| 2015-11-28T03:28:34
|
R
|
UTF-8
|
R
| false
| true
| 486
|
rd
|
kolmogorov_smirnov_test.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/support-comparison_statistics.R
\name{kolmogorov_smirnov_test}
\alias{kolmogorov_smirnov_test}
\title{Prefer to use the anderson_darling_2_sample comparison
cf https://asaip.psu.edu/Articles/beware-the-kolmogorov-smirnov-test}
\usage{
kolmogorov_smirnov_test(a, b)
}
\description{
Prefer to use the anderson_darling_2_sample comparison
cf https://asaip.psu.edu/Articles/beware-the-kolmogorov-smirnov-test
}
|
b38a42d38bffbe46e43b04fe304914eaa83d591a
|
bced4e9f5c173c81572eb12c131b7e8fb3b13816
|
/R/summary.BsProb.R
|
54f06b438929ac52833ceca5a00eea7c46334b0c
|
[] |
no_license
|
cran/BsMD
|
6708ccdab9632b351066d06088a2be2bb612d8a8
|
19040212ddc6a08f8629fb1395084a0efea10424
|
refs/heads/master
| 2023-07-19T11:15:59.320221
| 2023-07-07T18:10:11
| 2023-07-07T18:10:11
| 17,678,187
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,356
|
r
|
summary.BsProb.R
|
summary.BsProb <-
function (object, nMod = 10, digits = 3, ...)
{
nFac <- ncol(object$X) - object$blk
cat("\n Calculations:\n")
if (object$INDGAM == 0) {
if (object$INDG2 == 0) {
calc <- c(object$N, object$COLS, object$BLKS, object$MXFAC,
object$MXINT, object$P, object$GAMMA, object$mdcnt)
names(calc) <- c("nRun", "nFac", "nBlk", "mFac",
"mInt", "p", "g", "totMod")
}
else {
calc <- c(object$N, object$COLS, object$BLKS, object$MXFAC,
object$MXINT, object$P, object$GAMMA[1], object$GAMMA[2],
object$mdcnt)
names(calc) <- c("nRun", "nFac", "nBlk", "mFac",
"mInt", "p", "g[main]", "g[int]", "totMod")
}
}
else {
calc <- c(object$N, object$COLS, object$BLKS, object$MXFAC,
object$MXINT, object$P, object$GAMMA[1], object$GAMMA[object$NGAM],
object$mdcnt)
names(calc) <- c("nRun", "nFac", "nBlk", "mFac", "mInt",
"p", "g[1]", paste("g[", object$NGAM, "]", sep = ""),
"totMod")
}
out.list <- list(calc = calc)
print(round(calc, digits = digits))
prob <- data.frame(Factor = names(object$sprob), Code = rownames(object$prob),
Prob = round(object$sprob, digits), row.names = seq(length(object$sprob)))
if (object$INDGAM == 0) {
cat("\n Factor probabilities:\n")
print(prob, digits = digits)
cat("\n Model probabilities:\n")
ind <- seq(min(nMod, object$NTOP))
Prob <- round(object$ptop, digits)
NumFac <- object$nftop
Sigma2 <- round(object$sigtop, digits)
Factors <- apply(object$jtop, 1, function(x) ifelse(all(x ==
0), "none", paste(x[x != 0], collapse = ",")))
dd <- data.frame(Prob, Sigma2, NumFac, Factors)[ind,
]
print(dd, digits = digits, right = FALSE)
out.list[["probabilities"]] <- prob
out.list[["models"]] <- dd
}
if (object$INDGAM == 1) {
cat("\n Posterior probabilities for each gamma value:\n")
print(dd <- round(rbind(gamma = object$GAMMA, object$prob),
digits = digits))
out.list[["probabilities"]] <- dd
}
invisible(out.list)
}
|
b92896e400f15384fd93b72b2ba32140e659f7e4
|
be93098682095c32c706f55c1e424669d5ea4971
|
/man/scREhurdle-package.Rd
|
dcef8c41d1328f261947f539492be8a4d24c1087
|
[] |
no_license
|
mnsekula/scREhurdle
|
2aa7f77ce9ce5ab64311a05a7f8a5254615bfa21
|
1a99d834673f1585011f588e3001b409925c5071
|
refs/heads/master
| 2020-04-17T22:40:51.653391
| 2019-02-24T21:27:36
| 2019-02-24T21:27:36
| 167,004,331
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 525
|
rd
|
scREhurdle-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scREhurdle-package.R
\docType{package}
\name{scREhurdle-package}
\alias{scREhurdle-package}
\title{The 'scREhurdle' package.}
\description{
R package implementing the methods from "Detection of differentially expressed genes in discrete single-cell RNA sequencing
data using a hurdle model with correlated random effects".
}
\references{
Stan Development Team (2018). RStan: the R interface to Stan. R package version 2.18.2. http://mc-stan.org
}
|
dddff4306fcfdf80a9263c1e6fc6e6cb3f37ac02
|
631f70c82fb93562f0836ea4f17e0f18cf0b2b12
|
/man/schoenersD.Rd
|
0e13d9ef9182a0a67f8d028474cb800b4212918d
|
[] |
no_license
|
cjcampbell/isocat
|
3ec1b78d64b081e50479f020fbb6cb5466cecaa3
|
3c1a50cdfa250a657244d19b570845b61652d920
|
refs/heads/master
| 2022-03-08T18:08:56.885218
| 2022-03-02T21:05:54
| 2022-03-02T21:05:54
| 130,004,512
| 1
| 2
| null | 2020-07-02T16:19:21
| 2018-04-18T04:38:47
|
R
|
UTF-8
|
R
| false
| true
| 1,219
|
rd
|
schoenersD.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/schoenersD.R
\name{schoenersD}
\alias{schoenersD}
\title{Calculates Schoener's D-value between two RasterLayers.}
\usage{
schoenersD(rast1, rast2)
}
\arguments{
\item{rast1}{Input RasterLayer}
\item{rast2}{Input RasterLayer 2}
}
\description{
RasterLayers must have identical resolutions and extents. The function will
automatically normalize surfaces to sum to 1.
}
\details{
Calculates similarity value of two RasterLayers using Schoener's D-metric.
}
\examples{
# Generate example probability surfaces.
myiso <- raster::rasterFromXYZ(isoscape)
myiso_sd <- raster::rasterFromXYZ(isoscape_sd)
df <- data.frame(
ID = c(-100, -80, -50),
isotopeValue = c(-100, -80, -50),
SD_indv = rep(5, 3)
)
assignmentModels <- isotopeAssignmentModel(
ID = df$ID,
isotopeValue = df$isotopeValue,
SD_indv = df$SD_indv,
precip_raster = myiso,
precip_SD_raster = myiso_sd,
nClusters = FALSE
)
# Calculate Schoener's D-metric of spatial similarity between two of the
# example probability surfaces.
schoenersD(assignmentModels[[1]], assignmentModels[[2]])
## 0.969156
}
|
22d0659aec2e8af9dc6733ec76553f28c7a1badc
|
7527b44b1ad97f7b7481aa7d2677f308925fdda1
|
/R/submit.r
|
71ccbc3e1421ef36ba1e6914660ec7f91d30e1ac
|
[] |
no_license
|
turbaevsky/indicators
|
7c8c6d6497cab872068f90f9c0205aad55c99456
|
7e1fbca4e071384739bd9199d168f2bf0da3e9e1
|
refs/heads/master
| 2020-09-14T10:02:20.815776
| 2019-01-22T16:41:42
| 2019-01-22T16:41:42
| 67,811,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,505
|
r
|
submit.r
|
#####################################################################
# To show the units numbers did not send (submit) data in time
#####################################################################
#place <- readRDS('DBCopy/PI_Place.rds')
#placeAttributes <- readRDS('DBCopy/PI_PlaceAttribute.rds')
#submit <- readRDS('DBCopy/PI_DataSubmittal.rds')
#elements <- readRDS('DBCopy/PI_IndValues.rds')
#elements <- data #<<<<<<<<<<<<<<<<<<<< if needed
#relation <- readRDS('DBCopy/PI_PlaceRelationship.rds')
#Getting active units list
#activeStation <- merge(place,placeAttributes,by='LocId')
#activeStation <- subset(activeStation,activeStation$PlaceTypeId %in% c(19,22) #19 for stations, 22 for units
# & activeStation$AttributeTypeId == 7 & activeStation$EndDate >= '9999-01-01'
# & IsDeleted == 0, LocId)
#activeStation <- unlist(activeStation)
#Remove some extra units and fuel reprocessing factories
#activeStation <- activeStation[which(!activeStation %in% c(10159,10111,10115,1871,1872,1360,1569,1330))]
#source('functions.r')
#aDate <- 201606
uName <- function(centre,u){return(paste(centre,u,unlist(subset(place,place$LocId==u,select=AbbrevLocName))))}
submitProgress <- function(aDate,plot=TRUE){
as <- activeStation(aDate,'u')
uList <- c()
uByCentre <- c(0,0,0,0)
for (u in as)
{
incProgress(1/length(as))
if (!(u %in% submit[submit$YrMn == aDate,1])) # For submitted data only
{
uList <- c(uList,u)
centre <- unique(subset(relation,relation$LocId == u & relation$RelationId == 1
& relation$EndDate >= '9999-01-01', select=ParentLocId))
centre <- substr(as.character(place[place$LocId == unlist(centre),5]),4,4)
if (length(centre))
{
#print(c(u,centre))
if (centre == 'A') {uByCentre[1] <- uByCentre[1]+1; print(uName('AC',u))}
if (centre == 'M') {uByCentre[2] <- uByCentre[2]+1; print(uName('MC',u))}
if (centre == 'P') {uByCentre[3] <- uByCentre[3]+1; print(uName('PC',u))}
if (centre == 'T') {uByCentre[4] <- uByCentre[4]+1; print(uName('TC',u))}
}
}
}
#print (c(uList,length(uList),uByCentre))
print('Units did not submit data to the RCs (AC/MC/PC/TC):')
print(uByCentre)
#print('Units did not submit data:')
#print(subset(place,place$LocId %in% uList,select=AbbrevLocName))
if (plot) barplot(uByCentre,names.arg = c('A','M','P','T'), main = paste('Num of units did not \nsubmit their data for',aDate))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.