blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
213c5830c10bfd0306d3b0fe3f35f2d72a44d03b
|
80f7469eb388ae3ba95b11042dd00ff48d62e424
|
/man/trello_get_token.Rd
|
7caa7d01c4a20ad08f3343d5b15a4a0db7c983f7
|
[] |
no_license
|
vfulco/trelloR
|
b6bbb505093fb5b6259e3ebdf4982180bc7c6572
|
566e0a3ec1841cee25572452de98d18b98c6cdf9
|
refs/heads/master
| 2021-05-01T07:54:38.666171
| 2017-01-08T12:28:57
| 2017-01-08T12:28:57
| 79,654,801
| 0
| 0
| null | 2017-01-21T15:43:27
| 2017-01-21T15:43:27
| null |
UTF-8
|
R
| false
| true
| 1,346
|
rd
|
trello_get_token.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trello_get_token.R
\name{trello_get_token}
\alias{trello_get_token}
\title{Get A Secure Token}
\usage{
trello_get_token(key, secret, appname = "trello")
}
\arguments{
\item{key}{developer key}
\item{secret}{developer secret}
\item{appname}{optional app name, defaults to "trello"}
}
\description{
Authorize access to Trello API.
}
\details{
To access private data, a secure token is required. In order to create it, you will need your developer credentials ('key' and 'secret') - these can be obtained in \href{https://developers.trello.com/get-started/start-building#connect}{Trello developer guide} after login.
First time you create a token, you will be prompted to confirm the authorization in a browser (you only need to do this once). You will also be offered an option to store the authentication data in your working directory. Keep in mind you have to store your credentials in a secure, non-shared location.
\code{\link{trello_get_token}} call authentication functions from \code{\link{httr}}.
}
\examples{
# Source credentials from non-shared location (important!)
\dontrun{
source("mykeys.R")
token = trello_get_token(key, secret)
}
}
\seealso{
\code{\link[httr]{oauth_app}}, \code{\link[httr]{oauth_endpoint}}, \code{\link[httr]{oauth1.0_token}}
}
|
8f0052542f1e535ec21279a67ea8f7fc6696bbd3
|
e0aac8b978f4fe440a62de240b1fcf2bad45f8c4
|
/create_shiny_data/01_get-ccbio-quants.R
|
dee55b01904b25d1123aaa635415152f6c2651f3
|
[] |
no_license
|
vanichols/shiny_ryeSalus
|
1f73daefa0646444b039319806a595ef5b1d4cb6
|
4371895a3afb981533cbaa029ed3eded1647ba45
|
refs/heads/master
| 2023-06-27T15:36:20.581873
| 2021-07-22T03:06:20
| 2021-07-22T03:06:20
| 298,075,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,407
|
r
|
01_get-ccbio-quants.R
|
# Gina, modified the map.R code 9/22/2020
# trying to get Iowa data to make a shiny
# updated 11/2, making it generalizable to state
library(tidyverse)
library(maps)
library(mapdata)
library(magrittr)
# create quantile database ------------------------------------------------
# find 20, 50, and 80% quantiles
#--doy 92 = april 1
#--doy 106 = april 15
#--doy 122 = May 1
#--doy 136 = May 15
#--doy 153 = June 1
#--doy 167 = June 15
#--doy 183 = July 1
#--doy 197 = July 15
mytermdates <- c(92, 106, 122, 136, 153, 167, 183, 197)
rawdata <- list.files("regionalSim","raw")
myemptydf <- data.frame()
funGetQuants <- function(fun.filelist = rawdata,
fun.state = "IA",
fun.mytermdates = mytermdates,
fun.quants = myemptydf){
# testing
#fun.state <- "IN"
tmp.filelist <- fun.filelist[grepl(paste(fun.state), fun.filelist)]
for(i in 1:length(tmp.filelist)){
#i <- 1
cat("\r",round(i/length(tmp.filelist),2)*100,"%")
tmp.quants <-
readRDS(file.path("regionalSim", tmp.filelist[i])) %>%
as_tibble() %>%
filter(!is.na(CWAD),
DOY %in% mytermdates) %>% #--only keep days in my list
group_by(ExpID) %>%
separate(ExpID,c("lat","long","year","dop"),sep = "_") %>%
group_by(lat, long, dop, DOY) %>%
#summarise(CWAD = list(quantile(CWAD,probs = c(0.2,0.5,0.8)))) %>%
summarise(CWAD20 = quantile(CWAD, 0.2),
CWAD50 = quantile(CWAD, 0.5),
CWAD80 = quantile(CWAD, 0.8)) %>%
ungroup() %>%
mutate(state = substr(tmp.filelist[i], 1, 2),
lat = as.numeric(gsub("N","",lat)),
long = -as.numeric(gsub("W","",long)))
fun.quants <-
fun.quants %>%
bind_rows(tmp.quants)
i <- i + 1
}
return(fun.quants)
}
#quants_IN <- funGetQuants(rawdata, "IN")
#quants_IA <- funGetQuants(rawdata, "IA")
#quants_IL <- funGetQuants(rawdata, "IL")
quants_MI <- funGetQuants(rawdata, "MI")
quants_MI %>% write_csv("create_shiny_data/quants/MI_ccbio-quants.csv")
quants_MN <- funGetQuants(rawdata, "MN")
quants_MN %>% write_csv("create_shiny_data/quants/MN_ccbio-quants.csv")
quants_OH <- funGetQuants(rawdata, "OH")
quants_OH %>% write_csv("create_shiny_data/quants/OH_ccbio-quants.csv")
quants_WI <- funGetQuants(rawdata, "WI")
quants_WI %>% write_csv("create_shiny_data/quants/WI_ccbio-quants.csv")
|
0f6c07c88c18fe12146288bfb8cac88da41e77dc
|
830b84d4e05be365884585babebc75f206f3f72d
|
/man/plotTZprofile.Rd
|
65a9ca3028544f6c6ae1927a25a25e49358f8c51
|
[] |
no_license
|
galuardi/analyzepsat
|
439c1e95e767bd0499536d0697ef02e036887579
|
bf1332bf5adb4eb91144de1e7c5c0a47c651ad0b
|
refs/heads/master
| 2021-01-17T07:44:22.023727
| 2017-12-08T21:29:16
| 2017-12-08T21:29:16
| 32,556,813
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,220
|
rd
|
plotTZprofile.Rd
|
\name{plotTZprofile}
\Rdversion{1.1}
\alias{plotTZprofile}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plots the depth and temperature using the depth time series colored by temperature.
}
\description{
Plots the depth and temperature using the depth time series colored by temperature.
This uses temperature only where depth measurements were recorded.
}
\usage{
plotTZprofile(tzdata, zlim = NULL, pch = 21, cex = 1.2, font = 1, cex.lab = 1.2, cbrks = 33, ylab = "Depth (m)", cex.axis = 1, MWT = T)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{tzdata}{
Object of type returned from \code{\link{MWTextract}} containing a 'T' and 'Z' component.
}
\item{zlim}{
If desired this will restrict the depth limits for the plot. This may be useful if tehre are one or two spurious depth measurements.
}
\item{cbrks}{
Number of color breaks for temperature
}
\item{legend}{
Plots an SST legend
}
\item{MWT}{
Indicates if this data frame is of class psat
}
\item{}{
All other plot inputs are standard plot commands \code{\link{par}}
}
}
\details{
}
\value{
Returns a time series style plot
}
\references{
}
\author{
Benjamin Galuardi
}
\note{
}
\seealso{
}
|
f27cdaf8175977eaa8a5456ceb0c8a2c7643d94b
|
a247d7852235013f4733e28da4827bf4ef85de98
|
/data-table.r
|
f2f8703f571cafdf4b7e29ce8feb5f1700377d1a
|
[
"MIT"
] |
permissive
|
FiaDot/R-usage
|
da06f1f8d57594dde7404d8cf7890a469da8d721
|
3ad5975499ba463fcdd40f8ab2e7bb1670b5ce33
|
refs/heads/master
| 2016-09-06T13:22:19.373022
| 2015-09-13T05:47:27
| 2015-09-13T05:47:27
| 42,385,237
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 261
|
r
|
data-table.r
|
library(data.table)
library(ggplot2)
library(corrplot)
# install.packages("corrplot")
# install.packages("data.table")
setwd("D:/data")
getwd()
# test code
filename <- "1_result.csv_stat.csv"
d = read.csv(filename)
dt <- data.table(d)
plot(dt)
|
047c1d693e44d0fb9724f21f94c206b15c7431b6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/df2json/tests/test-all.R
|
ad81726a82f01996c2b704c3dd186b3726bcfc71
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 42
|
r
|
test-all.R
|
library(testthat)
test_package("df2json")
|
bfbb9c9a90bd24cedf74bfdeddc9ddcd6471cd22
|
0cf9f5aa0978ac801eb1e4fa5401d6ddf394840f
|
/R/classes.R
|
291d6104fc248cdc2316bd0d63fd5eae462360f8
|
[
"MIT"
] |
permissive
|
slopp/caselaw
|
b4a72b08056060422c18d26c8df480bdc4f3f785
|
e1ce00be1ca03a553f31a51c6b8bc978e29f8d0a
|
refs/heads/master
| 2022-12-26T20:17:18.626234
| 2020-10-09T22:03:30
| 2020-10-09T22:03:30
| 276,238,923
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,240
|
r
|
classes.R
|
#' Class representing the case law API
#'
#' @name caselaw
#'
#' @section Usage:
#' \preformatted{
#' cl <- cl_client$new()
#' }
#'
#' @section Details:
#'
#' This class allows a user to interact with a case law API and provides
#' utility functions for GETs including cursor pagination and
#' the specifics of the case law query parameter structure
#'
#' The constructor respects CASE_LAW_API_KEY environment variable if set, and will
#' use it to authorize all requests
#'
#'
#' @family R6 classes
#'
#' @export
cl_client <- R6::R6Class(
"caselaw",
public = list(
api_key = NULL,
base = NULL,
version = NULL,
initialize = function(base = "https://api.case.law",
version = "v1"){
self$api_key <- Sys.getenv("CASE_LAW_API_KEY")
if (self$api_key == "") {
warning("No CASE_LAW_API_KEY detected, will not be able to access some cases")
}
self$base <- base
self$version <- version
},
raise_error = function(res) {
if (httr::http_error(res)) {
err <- sprintf(
"%s request failed with %s",
res$request$url,
httr::http_status(res)$message
)
message(capture.output(str(httr::content(res))))
stop(err)
}
},
add_auth = function() {
if(!(self$api_key == "")) {
httr::add_headers(Authorization = paste0("Token ", self$api_key))
} else {
NULL
}
},
# params is a list with keys as the name, value as the value
add_url_params = function(path, params) {
# handle double quoting parameters
params <- lapply(params, function(x){ifelse(grepl("\\s", x), paste0('"', x, '"'), x)})
keys <- names(params)
if (!is.null(params)) {
# first argument is passed as url/?key=value
path <- paste0(path,"/?", keys[1], "=",params[[1]])
# remaining passed as url/?key=value&key=value
if(length(params) > 1) {
filters <- paste(sapply(2:length(params), function(i) {
sprintf("%s=%s", keys[i], params[[i]])
}), collapse = "&")
path <- paste0(path, "&", filters)
}
}
#url encode everything
URLencode(path)
},
GET_PAGES = function(path, writer = httr::write_memory(), parser = "parsed", limit = Inf) {
results <- list()
res <- self$GET(path, writer, parser)
results <- append(results, res$results)
c <- 0
while (!is.null(res$`next`) && c < limit) {
res <- self$GET_URL(res$`next`, writer, parser)
results <- append(results, res$results)
c <- c + 1
}
results
},
GET = function(path, writer = httr::write_memory(), parser = "parsed") {
req <- paste0(self$base,"/", self$version, "/", path)
self$GET_URL(url = req, writer = writer, parser = parser)
},
GET_URL = function(url, writer = httr::write_memory(), parser = "parsed") {
res <- self$GET_RESULT_URL(url = url, writer = writer)
self$raise_error(res)
httr::content(res, as = parser)
},
GET_RESULT_URL = function(url, writer = httr::write_memory()) {
httr::GET(
url,
self$add_auth(),
writer
)
}
)
)
|
08697f62f18966b29318347254ddff664e10f925
|
680a5de85b7153b6e3e8ffb0f81cf3f72405aa34
|
/main.R
|
6642409bd25407e278956788e81b3ea768de3c6e
|
[] |
no_license
|
TheLeopards/Assignment5
|
29a08508c94b1a2ce58350281fd6d97f47a2b5dc
|
dd5af90699ee4765f7dbc4bc5b978c888644f4c6
|
refs/heads/master
| 2021-01-10T15:11:41.037474
| 2016-01-11T08:10:15
| 2016-01-11T08:10:15
| 49,411,639
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,925
|
r
|
main.R
|
# Author: TheLeopards (samantha Krawczyk, Georgios Anastasiou)
# 8th January 2016
# Pre-processing chain to assess change in NDVI over time
library(sp)
library(raster)
download("https://www.dropbox.com/s/akb9oyye3ee92h3/LT51980241990098-SC20150107121947.tar.gz?dl=0", "landsat5")
download("https://www.dropbox.com/s/i1ylsft80ox6a32/LC81970242014109-SC20141230042441.tar.gz?dl=0", "landsat8")
# unziping files
untar('data/landsat5.tar.gz', exdir = "data/")
untar('data/landsat8.tar.gz', exdir = "data/")
# creating stacks for Landsat data
list_ls <- list.files('data/', pattern = '*.tif', full.names = TRUE)
ls5 <- stack(list_ls[10:24])
ls8 <- stack(list_ls[1:9])
# writing to file
ls5_2f <- writeRaster(x=ls5, filename='data/Ls5.grd', datatype='INT2S', overwrite=TRUE)
ls8_2f <- writeRaster(x=ls8, filename='data/Ls8.grd', datatype='INT2S', overwrite=TRUE)
# ensuring both datasets have the same extent
ls5_int <- intersect(ls5_2f, ls8)
ls8_int <- intersect(ls8, ls5_2f)
# Extract cloud Mask rasterLayer
fmask5 <- ls5_int[[1]]
fmask8 <- ls8_int[[1]]
# Remove fmask layer from the Landsat stack
ls5_NoCloudLayer <- dropLayer(ls5_int, 1)
ls8_NoCloudLayer <- dropLayer(ls8_int, 1)
source("R/cloud2NA.R")
# Apply the function on the two raster objects using overlay
ls5_CloudFree <- overlay(x = ls5_NoCloudLayer, y = fmask5, fun = cloud2NA)
ls8_CloudFree <- overlay(x = ls8_NoCloudLayer, y = fmask8, fun = cloud2NA)
#stakcing only Red and NIR for both landsats
list_ls5 <- list.files('data/', pattern = 'LC.*band[34].tif', full.names = TRUE)
list_ls8 <- list.files('data/', pattern = 'LC.*band[45].tif', full.names = TRUE)
stack5 <- stack(list_ls5)
stack8 <- stack(list_ls8)
# NDVI calculations
source("R/ndvOver.R")
ndvi5 <- overlay(x=stack5[[1]], y=stack5[[2]], fun=ndvOver)
ndvi8 <- overlay(x=stack8[[1]], y=stack8[[2]], fun=ndvOver)
# NDVI change over 30 years
NDVI_dif <- ndvi8 - ndvi5
plot(NDVI_dif)
|
1b649e09887c52d9d035c02dc0451bc89644f66f
|
2aad105780d14d8b0a0764fb0e2d4975e4516a8e
|
/R/check_iris.R
|
de1a95f1744b669cd94c4d588d0085a5b75c0253
|
[] |
no_license
|
phileas-condemine/comparaison_distanciers
|
4d32d1372b79717204dc4b168e70d13dde074f87
|
762606702804c1246b91abcb2e89b833dbb48d9e
|
refs/heads/master
| 2023-07-16T20:48:06.687676
| 2021-08-27T07:59:36
| 2021-08-27T07:59:36
| 198,305,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 691
|
r
|
check_iris.R
|
library(data.table)
library(readxl)
pop_iris = read_excel("external_data/base-ic-evol-struct-pop-2015.xls",sheet = "IRIS",skip = 5)
pop_iris= data.table(pop_iris)
pop_iris=pop_iris[P15_POP>0,c("P15_F1529","P15_F3044","P15_POP","LIBCOM","IRIS")]
pop_iris[,prop_femme_procreation := (P15_F1529 + P15_F3044)/P15_POP]
ecart_distr = pop_iris[,.(prop_min = min(prop_femme_procreation),
prop_max = max(prop_femme_procreation),
nb_IRIS = .N,pop_tot = sum(P15_POP)),by="LIBCOM"]
ecart_distr = ecart_distr[nb_IRIS >1]
ecart_distr = ecart_distr[,ecart := prop_max/prop_min]
setorder(ecart_distr,-ecart)
fwrite(ecart_distr,"output/ecart_prop_femmes_1544_entre_IRIS_perCOM.csv")
|
c415c4af407832f7a925134d4314270cdd18b042
|
09c6c0f81c427d8ba8deac97ce32197a26a93441
|
/unit_2-linear-models-overview/01-overview.R
|
e8e781fc4d072dd1b4d6ebe25004e94176d38f6b
|
[] |
no_license
|
wjamesTMC/ds-education-linear_regression
|
5b62dc1107abc9c0fdc7fad19a988933de87f909
|
ce4e65ab07e17afecb75a6d054e403e859065155
|
refs/heads/master
| 2020-03-25T03:20:12.835090
| 2018-10-23T11:28:33
| 2018-10-23T11:28:33
| 143,336,211
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,076
|
r
|
01-overview.R
|
# --------------------------------------------------------------------------------
#
# Overview
#
# --------------------------------------------------------------------------------
# Linear Models Overview Bookmark this page In the Linear Models section, you
# will learn how to do linear regression.
#
# After completing this section, you will be able to:
#
# Use multivariate regression to adjust for confounders.
# Write linear models to
# Describe the relationship between two or more variables.
# Calculate the least squares estimates for a regression model
# using the lm function.
# Understand the differences between tibbles and data frames.
# Use the do function to bridge R # functions and the tidyverse.
# Use the tidy, glance, and augment functions from # the broom package.
# Apply linear regression to measurement error models.
# This section has four parts: Introduction to Linear Models, Least Squares
# Estimates, Tibbles, do, and broom, and Regression and Baseball. There are
# comprehension checks that follow most videos.
|
2bcaee5dfb085f7cee6d488449e7388af6050c11
|
3fa8041faede40a2f0481294a7aa8773bc6e16a9
|
/write.R
|
d76f4955ff121d5730c237eec21bfe0791e45488
|
[] |
no_license
|
samssr/daily
|
47619ee98d79e544f6f52739234f0b123dfcc4d2
|
aa997e4c91c106d76112ff94932c3a7db0d1ce1c
|
refs/heads/master
| 2020-07-05T07:18:43.726933
| 2020-02-20T15:58:39
| 2020-02-20T15:58:39
| 202,568,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 141
|
r
|
write.R
|
library(stringr)
library(dplyr)
dat <- readClipboard()
# dat <- RCurl::base64Encode(dat,mode = "character")
write(dat,file = "./api.txt")
|
8637e9f516e67c4d4aa7ffa17811d5795710048a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/astrolibR/examples/planet_coords.Rd.R
|
fb70f7c37f44f059a83309088c217104c1a18c35
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
planet_coords.Rd.R
|
library(astrolibR)
### Name: planet_coords
### Title: Calculate low precision Right Ascension and declination for the
### planets given a date
### Aliases: planet_coords
### Keywords: misc
### ** Examples
# Find the RA, Dec of Venus on 1992 Dec 20
# Result: RA = 21 05 2.66 Dec = -18 51 45.7
planet_coords(c(1992,12,20)) # compute for all planets
adstring(ra[2],dec[2],1) # Venus is second planet
# This position is 37" from the full DE406 ephemeris position of
# RA = 21 05 5.24 -18 51 43.1
# Plot the declination of Mars for every day in the year 2001
jd = jdcnv(2001,1,1,0) # get Julian date of midnight on Jan 1
out = planet_coords(jd+seq(0,365), planet='mars')
plot(jd+seq(0,365), out$dec, pch=20, xlab='Day of 2001', ylab='Declination of Mars (degrees)')
|
52ed127788ac02bdd61dc8bdf56697194aed491a
|
918a9b0c0f5fe928eaa42c5287f31e1562de596a
|
/scripts/viridis.R
|
353f65403c0358fe366b3013dd8eb22569a4727e
|
[] |
no_license
|
Luohuhu/STAT-240
|
47c372e47120d6a81cfa993769f18dd34ac32e64
|
09399f99ba42931c5f49ca2dda77f80e18dc1f1e
|
refs/heads/main
| 2023-08-21T23:12:52.577234
| 2021-10-11T02:05:04
| 2021-10-11T02:05:04
| 414,400,786
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 403
|
r
|
viridis.R
|
## reset color defaults
## Source https://data-se.netlify.com/2018/12/12/changing-the-default-color-scheme-in-ggplot2/
library(viridisLite)
#### continuous variables color and fill
options(ggplot2.continuous.colour = "viridis")
options(ggplot2.continuous.fill = "viridis")
#### use viridis for discrete scales
scale_colour_discrete = scale_colour_viridis_d
scale_fill_discrete = scale_fill_viridis_d
|
9f14ed5aaa3c8d25e9cd325f5dbd0f166fdcf8ed
|
ddbec4e6028dd4799c79dca91f60d2baea132d8e
|
/datastructure1.R
|
d1c33b007084f8a40ca37fa32a5d8ff4a255469c
|
[] |
no_license
|
swarnavanaskar/analytics_1
|
d5c271686442b6f066b3b4970ef558ac3d6a1395
|
6feef8c6580922a93277ebc8b9f94c859101600f
|
refs/heads/master
| 2020-04-02T16:33:19.937918
| 2018-10-28T09:25:41
| 2018-10-28T09:25:41
| 154,617,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,907
|
r
|
datastructure1.R
|
# Data Structures in R
#control+enter when you are in the line to execute
# Vectors-----
x=1:10 #create seq of nos from 1 to 10
x
(x1 <- 1:20)
(x1=1:30)
(x2=c(1,2,13,4,5))
class(x2)
(x3=letters[1:10])
class(x3)
LETTERS[1:26]
(x3b = c('a',"SWARNAVA","4"))
class(x3b)
class(x4)
#access elements
(x6 = seq(0,100,by=3))
methods(class='numeric')
?seq
#[1] 0 2 4 6 8 10
ls() #variables in my environment
x6
length(x6)
x6[20]
x6[3] # access 3rd element
x6[seq(1, length(x6),2)]
#[1] 4
x6[c(2, 4)] # access 2nd and 4th element
x6[-1] # access all but 1st element
x6[-c(1:10, 15:20)]
x6[c(2, -4)] # cannot mix positive and negative integers
#Error in x[c(2, -4)] : only 0's may be mixed with negative subscripts
x6[c(2.4, 3.54)] # real numbers are truncated to integers
x6[-c(1,5,20)]
x6
length(x6)
(x= rnorm(100, mean = 60, sd=10))
summary(x)
quantile(x)
quantile(x,seq(0,1, 0.1)) #decile
quantile(x, seq(0,1, 0.01))#percentile
fivenum(x)
boxplot(x)
abline(h=fivenum(x))
stem(x)
hist(x)
#sort, order
set.seed(246)
(x6 = sample(1:20)) b
sort(x6)
#DataFrame----
#create Vectors to be combined into DF
(rollno = 1:30)
(sname = paste('student',1:30,sep=''))
(gender = sample(c('M','F'), size=30, replace=T, prob=c(.7,.3)))
table(gender)
prop.table(table(gender))
(marks = floor(rnorm(30,mean= 50,sd=10)))
(marks2 = ceiling(rnorm(30,40,5)))
(course = sample(c('BBA','MBA'), size=30, replace=T, prob=c(.5,.5)))
rollno; sname; gender
marks ; marks2; course
#create DF
df1= data.frame(rollno, sname, gender, marks, marks2, course, stringsAsFactors = F)
str(df1) #structure of DF
head(df1) #top 6 rows
head(df1,n=3) #top 3 rows
tail(df1) #last 6 rows
class(df1) # DF
summary(df1) #summary
df1$gender = factor(df1$gender)
df1$course = factor(df1$course)
str(df1)
summary(df1)
df2[marks>50 & gender=='F', c('rollno', 'sname','gender', 'marks')]
df2[marks>50 & gender=='F', c(1,2)]
|
bb1213381aab61cd5e92f629de7b9de1e2c90c8c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MSnbase/examples/Chromatogram-class.Rd.R
|
558c1ffd800d8103d237c6eb49fbc3b59eb7608a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,487
|
r
|
Chromatogram-class.Rd.R
|
library(MSnbase)
### Name: Chromatogram-class
### Title: Representation of chromatographic MS data
### Aliases: Chromatogram-class Chromatogram aggregationFun
### show,Chromatogram-method rtime,Chromatogram-method
### intensity,Chromatogram-method mz,Chromatogram-method
### precursorMz,Chromatogram-method fromFile,Chromatogram-method
### length,Chromatogram-method as.data.frame,Chromatogram-method
### filterRt,Chromatogram-method clean,Chromatogram-method
### plot,Chromatogram,ANY-method msLevel,Chromatogram-method
### isEmpty,Chromatogram-method productMz,Chromatogram-method productMz
### bin,Chromatogram-method
### ** Examples
## Create a simple Chromatogram object.
ints <- abs(rnorm(100, sd = 100))
rts <- seq_len(length(ints))
chr <- Chromatogram(rtime = rts, intensity = ints)
chr
## Extract intensities
intensity(chr)
## Extract retention times
rtime(chr)
## Extract the mz range - is NA for the present example
mz(chr)
## plot the Chromatogram
plot(chr)
## Create a simple Chromatogram object based on random values.
chr <- Chromatogram(intensity = abs(rnorm(1000, mean = 2000, sd = 200)),
rtime = sort(abs(rnorm(1000, mean = 10, sd = 5))))
chr
## Get the intensities
head(intensity(chr))
## Get the retention time
head(rtime(chr))
## What is the retention time range of the object?
range(rtime(chr))
## Filter the chromatogram to keep only values between 4 and 10 seconds
chr2 <- filterRt(chr, rt = c(4, 10))
range(rtime(chr2))
|
9bd3e4f4da3349350cd32d7e04495a758484a302
|
fdecc167ccbd9246139243e037f1fe11e5a6c443
|
/man/read_lca_parameters_from_xls.Rd
|
a6405ab87a54ed7bb2fb089dbe70faa218ddb71a
|
[
"MIT"
] |
permissive
|
zlfccnu/kwb.lca
|
254b445e87e3aa2a152aaec5090ddd6a0267fee8
|
e15557fb20fc87d11cf654179e47334f11515fd0
|
refs/heads/master
| 2023-03-15T07:39:26.618806
| 2020-01-10T13:46:41
| 2020-01-10T13:46:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 533
|
rd
|
read_lca_parameters_from_xls.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_lca_parameters_from_xls.R
\name{read_lca_parameters_from_xls}
\alias{read_lca_parameters_from_xls}
\title{Read LCA Parameters from an Excel File}
\usage{
read_lca_parameters_from_xls(file, country = "de")
}
\arguments{
\item{file}{path to the Excel file to be read}
\item{country}{country code: "de" for German or "en" for "English". Required
to convert the text values in the Value columns}
}
\description{
Read LCA Parameters from an Excel File
}
|
ad3900f0c3ff3fb18d61567ac6568071a8d09ccc
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/abd/examples/Pseudoscorpions.Rd.R
|
6a40519125d849f8e0331ada07cb50ccf778e24e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 304
|
r
|
Pseudoscorpions.Rd.R
|
library(abd)
### Name: Pseudoscorpions
### Title: Multiple Mating in Pseudoscorpions
### Aliases: Pseudoscorpions
### Keywords: datasets
### ** Examples
str(Pseudoscorpions)
bwplot(successful.broods ~ treatment, Pseudoscorpions)
aggregate(successful.broods ~ treatment, Pseudoscorpions, favstats)
|
44c8c0f78595e36bf5814a5b6a2493029bfc461c
|
92562f35e515ef901dc2623dbee39f7235cd8301
|
/R/corrected_z_test.R
|
b7ff0a6b220551ca023290774bbf5280ec42acac
|
[
"MIT"
] |
permissive
|
HorridTom/meancomppp
|
25faa19a79c61cd3c054d0d2689187f4b79ba852
|
93988af280e563394e9b2599d9a8414b78d17618
|
refs/heads/master
| 2020-09-14T17:03:46.673845
| 2019-11-21T17:44:12
| 2019-11-21T17:44:12
| 223,194,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,170
|
r
|
corrected_z_test.R
|
#' corrected_z_test
#'
#' @param data partially paired data
#' @param sig_level significance level for the hypothesis test
#'
#' @return list object containing in particular:
#' z_corr, the corrected z statistic for the hypothesis test
#' stat_sig, the result of the hypothesis test
#' @export
#'
#' @examples
#' corrected_z_test(data = simulate_pp_data())
corrected_z_test <- function(data, sig_level = 0.05) {
# first calculate sample means and variances (Xbar obs 0,1, S^2 obs 0,1)
sample_stats <- data %>%
dplyr::group_by(group) %>%
dplyr::summarise(sample_mean = mean(value, na.rm = TRUE),
sample_variance = var(value, use = "complete.obs"))
wide_data <- data %>%
tidyr::pivot_wider(names_from = group, values_from = value)
paired_data <- wide_data %>%
tidyr::drop_na() %>%
dplyr::mutate(pre_centred = pre - mean(pre),
post_centred = post - mean(post),
prod = pre_centred*post_centred)
# calculate the number of paired and unpaired (in each group) observations
n <- nrow(paired_data)
n0 <- wide_data %>% tidyr::drop_na(pre) %>% nrow() - n
n1 <- wide_data %>% tidyr::drop_na(post) %>% nrow() - n
S01 <- paired_data %>%
dplyr::summarise(S01 = (1/(n-1))*sum(prod)) %>%
dplyr::pull(S01)
mean0 = sample_stats %>% dplyr::filter(group == "pre") %>% dplyr::pull(sample_mean)
mean1 = sample_stats %>% dplyr::filter(group == "post") %>% dplyr::pull(sample_mean)
var0 = sample_stats %>% dplyr::filter(group == "pre") %>% dplyr::pull(sample_variance)
var1 = sample_stats %>% dplyr::filter(group == "post") %>% dplyr::pull(sample_variance)
mean_diff = mean0 - mean1
var_corr = (var0 / (n0 + n)) + (var1 / (n1 + n)) - (2*n*S01 / ((n0 + n)*(n1 + n)))
z_corr = mean_diff / sqrt(var_corr)
pval <- 2*pnorm(-abs(z_corr))
stat_sig <- pval < sig_level
output <- list()
output$data <- data
output$n <- n
output$n0 <- n0
output$n1 <- n1
output$sample_stats <- sample_stats
output$S01 <- S01
output$mean_diff <- mean_diff
output$var_corr <- var_corr
output$z_corr <- z_corr
output$stat_sig <- stat_sig
output$pval <- pval
output
}
|
cfd633a4e5570c5b9588f52b3ffddf79410f6e74
|
ff2b418f76f82ecdd399951f9750fe3386f834f6
|
/week_5/project_1/TFIDF for project1.R
|
4c3f3646ff4aaf75d84c1a3b038041ff180b25c7
|
[] |
no_license
|
b03602023/1062CSX_project
|
545b636331e2038a2df578754f90fe27fecd204e
|
e42ed5911830b5c1641638998ad9d624f6db0f7c
|
refs/heads/master
| 2021-01-25T10:49:47.428765
| 2018-06-25T02:00:53
| 2018-06-25T02:00:53
| 123,371,347
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,365
|
r
|
TFIDF for project1.R
|
#------getpost-------
rm(list=ls(all.names=TRUE))
library(httr)
prefex <- "https://graph.facebook.com/v2.10/"
token <- "EAACEdEose0cBABh9UHkbum4Bu2lcd7gZBilEFMofndeV8PNJXkWZCzfFg8GcaukItTvmE9EElr8rWKTyQpbc2DDEjgZAbW6Da0rTxBQ4MSzjSrzagkNT3xJWgnG9iV6ZA2A5ZCzETc3xZCRbZAKqZC0x9uohQTD41n3ZA4M8R2yuNmZBwKwc0tseCYDkmijWLfBL4ZD"
number <- 1 #只爬入一篇貼文post
# 101501458062251287為朱立倫的id
# 136845026417486 為柯文哲的id
# 175549807657 為低碳生活部落客的id
# 限定從2012-01-01到2018-04-07 時間內的貼文
target <- "10150145806225128/posts?limit="
control <- "&until=2018-04-07&since=2018-01-01"
control <- c(control, paste0("&until=",2017:2012,"-12-31","&since=",2017:2012,"-01-01"))
count=1
getpost <- function(control, count){
attrs <- paste0(target, number, control,"&access_token=")
url <- paste0(prefex, attrs, token)
# 限定content是屬於httr這個library的
res <- httr::GET(url)
data <- httr::content(res)
#把data先解開他list的結構,再用matrix方式存下來
#groups= matrix(unlist(data$data))
groups <- matrix(unlist(data$data)) #groups到變成文字雲的時候就是變成文件doc了
#存成檔案(因為要分梯次存,所以藉由count這個變數來存取每一篇文章)
filename = paste0(count, ".txt")
write.table(groups,filename)
#要跳到下一頁
after = data$paging$cursors$after # after : This is the cursor that points to the end of the page of data that has been returned.
nextflg= data$paging$`next` # == nextflg= data$paging[[2]]
#nextflg是要用來判斷是否到動態頁的最底端了( the last page of data)
while(class(nextflg) != "NULL"){
count=count+1
#print(count)
#attrs = paste0(target, number, control, "&after=", after,"&access_token=")
#url = paste0(prefex,attrs,token)
#上面的code和nextflg的意思是一樣的
nurl = nextflg
nres= httr::GET(nurl)
ndata = httr::content(nres)
ngroups= matrix(unlist(ndata$data))
#p1=ndata[["data"]][[1]]$message
#p1=ndata$data[[1]]$message
##可用try_catch來測試,while loop停在哪一段 可以記錄走到哪一段停止
after = ndata$paging$cursors$after
nextflg = ndata$paging$`next` # ndata$paging[[2]]
filename = paste0(count, ".txt") #檔名
write.table(ngroups,filename)
}
return(count+1)
}
for(i in 1:length(control)){
count <- getpost(control[i], count)
#print(paste0(count,'!!!'))
}
#-------clean data--------
library(NLP)
library(tm)
library(jiebaRD)
library(jiebaR) #斷詞用
library(RColorBrewer)
library(wordcloud)
library(tmcn) #segmentCN
filenames <- list.files(getwd(), pattern="*.txt") #pattern: an optional regular expression. Only file names which match the regular expression will be returned.
files <- lapply(filenames, readLines) #Read some or all text lines from a connection.
docs <- Corpus(VectorSource(files)) #Representing and computing on corpora(語料庫).
toSpace <- content_transformer(function(x, pattern) {
return (gsub(pattern, " ", x))
}
)
docs <- tm_map(docs,toSpace,"V1")
docs <- tm_map(docs,toSpace,"\n")
docs <- tm_map(docs,toSpace, "1")
# 清除大小寫英文與數字
docs <- tm_map(docs,toSpace, "[A-Za-z0-9]")
#移除標點符號 (punctuation)
#移除數字 (digits)、空白 (white space)
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, stripWhitespace)
#-------斷詞--------
mixseg = worker()
segment <- c("新北")
new_user_word(mixseg,segment) #Add user word
#斷詞 mixseg[groups]
jieba_tokenizer=function(d){
unlist(segment(d[[1]],mixseg))
}
seg = lapply(docs, jieba_tokenizer)
#轉成文件
#詞頻結果:
library(Matrix) #nnzero
freqFrame = as.data.frame(table(unlist(seg)))
seg <- Corpus(VectorSource(seg))
tdm <- TermDocumentMatrix(seg, control = list(wordLengths = c(1,10)))
# 可用matrix呈現: as.matrix(tdm)
#-------caculate TF-IDF ----------
tf <- as.matrix(tdm)/apply(tdm, 2, sum) #term frequency: the number of words in every document
idf <- log10(ncol(tdm)/apply(tdm, 1, nnzero))
tfidf <- tf*idf #TF-IDF
# View(head(tfidf)) #TF-IDF
s_tfidf <- apply(tfidf, 1, sum)
s_tfidf <- s_tfidf[order(s_tfidf, decreasing = TRUE)]
#s_tfidf[s_tfidf>0.23]
tfidf <- as.data.frame(tfidf)
s_tfidf[s_tfidf>0.23][1:10]
|
878774de5c52d335abc0519163002eb55fc7cb93
|
5edf3ebc52f12c8b7ed4dbc1aa5f97a8e8929605
|
/models/openml_stock/classification_binaryClass/ca3055556ced97040c944c88d0b2f0ba/code.R
|
ac8978021d220bbf9b7e59400bf18baa550c5e44
|
[] |
no_license
|
lukaszbrzozowski/CaseStudies2019S
|
15507fa459f195d485dd8a6cef944a4c073a92b6
|
2e840b9ddcc2ba1784c8aba7f8d2e85f5e503232
|
refs/heads/master
| 2020-04-24T04:22:28.141582
| 2019-06-12T17:23:17
| 2019-06-12T17:23:17
| 171,700,054
| 1
| 0
| null | 2019-02-20T15:39:02
| 2019-02-20T15:39:02
| null |
UTF-8
|
R
| false
| false
| 3,156
|
r
|
code.R
|
#paczki i seed
set.seed(123, "L'Ecuyer")
library(jsonlite)
library(OpenML)
library(farff)
library(digest)
#wczytujemy dataset
dataset<-read_json("dataset.json",simplifyVector = TRUE)
preprocessing<-dataset$variables
dataset$source=="openml"
pattern<-regexec("\\d+$",dataset$url)
ID<-regmatches(dataset$url,pattern)
ID<-as.numeric(ID)
dane<-getOMLDataSet(ID)
train<-dane$data
#sprawdzamy paczki
listLearners(check.packages = TRUE)
#robimy taska i learnera
classif_task = makeClassifTask(id = "task", data = train, target =dane$target.features)
classif_learner<-makeLearner("classif.logreg",predict.type = "prob")
#testy Acc, AUC, Specificity, Recall, Precision, F1 regresja:MSE, RMSE, MAE, R2 | "f1", "acc" "auc" "tnr" "tpr" "ppv"| "mse" "mae" "rmse" "rsq"
Rcuda<-list(f1=f1, acc=acc, auc=auc, tnr=tnr, tpr=tpr ,ppv=ppv)
measures<-intersect(listMeasures(classif_task),c("f1", "acc", "auc", "tnr", "tpr" ,"ppv"))
cv <- makeResampleDesc("CV", iters = 5)
r <- resample(classif_learner, classif_task, cv,measures = Rcuda[measures])
r<-r$aggr
pattern<-regexec("\\.test\\.mean$",names(r))
regmatches(names(r),pattern)<-""
r<-as.list(r)
Rcuda<-lapply(Rcuda,function(x){NA})
Rcuda[names(r)]<-r
#bierzemy parametry
parametry<-getParamSet(classif_learner)
parametry<-parametry$pars
parametry<-lapply(parametry, FUN=function(x){ ifelse(is.null(x$default), NA, x$default)})
hiper<-getHyperPars(classif_learner)
parametry[names(hiper)]<-hiper
#haszujemy
hash <- digest(list(classif_task,classif_learner))
hash
#robimy jsony
modeldozapisu<-list(
id= hash,
added_by= "wernerolaf",
date= format.Date(Sys.Date(),"%d-%m-%Y") ,
library= "mlr",
model_name= "classif.logreg",
task_id=paste("classification_",dane$target.features,sep = ""),
dataset_id= dataset$id,
parameters=parametry,
preprocessing=dataset$variables
)
modeldozapisu<-toJSON(list(modeldozapisu),pretty = TRUE,auto_unbox = TRUE)
taskdozapisu<-list(id=paste("classification_",dane$target.features,sep = ""),added_by= "wernerolaf",
date= format.Date(Sys.Date(),"%d-%m-%Y") ,
dataset_id= dataset$id,type="classification",target=dane$target.features)
auditdozapisu<-list(id=paste("audit_",hash,sep = ""),
date= format.Date(Sys.Date(),"%d-%m-%Y"),added_by= "wernerolaf",
model_id=hash,task_id=paste("classification_",
dane$target.features,sep = ""),
dataset_id=dataset$id,performance=Rcuda)
taskdozapisu<-toJSON(list(taskdozapisu),pretty = TRUE,auto_unbox = TRUE)
auditdozapisu<-toJSON(list(auditdozapisu),pretty = TRUE,auto_unbox = TRUE)
#zapisujemy
task_id=paste("classification_",dane$target.features,sep = "")
dir.exists(task_id)
dir.create(task_id)
setwd(file.path(getwd(),task_id))
write(taskdozapisu,"task.json")
dir.create(hash)
setwd(file.path(getwd(),hash))
write(modeldozapisu,"model.json")
write(auditdozapisu,"audit.json")
# info o sesji
sink("sessionInfo.txt")
sessionInfo()
sink()
|
0ce1df2caf1faad8afeff5b5f89d8bd71f09bd15
|
2ee316fe8a552c8fd9804200df2cf5a78561f4c6
|
/01_Data_Science--R Basics/02_Vectors&Sorting/Exercises/2. Sorting/Exercise 2 - Order.R
|
98ecdee6eaacd9fd975e9f03195216c11c57cebb
|
[] |
no_license
|
vishwasbasotra/Data-Science-Professional-Certificate-by-Harvard-University
|
67afe5f44b1f9988eb2809d64c15110ca046d112
|
b8483334628695ef6af95e9487e610befc212b3e
|
refs/heads/master
| 2022-09-26T02:25:04.911961
| 2020-06-07T21:02:22
| 2020-06-07T21:02:22
| 260,374,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
Exercise 2 - Order.R
|
# Access population from the dataset and store it in pop
pop <- murders$population
"Use the command order to find the vector of indexes that
order 'pop' and store in object 'ord'.
"
ord <- order(pop)
"Find the index number of the entry with the smallest
population size"
which.min(pop)
|
184962562d5d43a93ccf773d856c5b51e59162d3
|
16f75b6adf36d01a8acd35d0626a7ce234b3eb84
|
/script/clean_data/02_FIPScode.R
|
58148d0490f3ce6858b0467f4f49f346f84f0853
|
[] |
no_license
|
amikami102/rainfall_and_voterturnout
|
3375124bc7b0a143d6009f727e6b14289f30289f
|
dd16f909a41c7303dbbae310de7648ccbcf56880
|
refs/heads/master
| 2020-08-12T07:25:58.531750
| 2019-10-13T03:34:34
| 2019-10-13T03:34:34
| 214,252,274
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,578
|
r
|
02_FIPScode.R
|
####---------------------------
# This script creates a new column called
# "fips_code" for all cces data sets which records
# the 5-digit FIPS code of respondent's state and
# county of residence.
#--------------------------------
# We use 2010 Census to match county
# name to their FIPS code.
#------------------------------
# cces2006: v1004 gives the 5-digit FIPS code, but some
# entries are 4 digits long either because the state FIPS portion
# is missing the pre-pending 0 or because of error in data entry (one
# in Mississippi and another in Tennessee).
#------------------------------
# cces2008: V269 and V251 give pre-election county
# and state FIPS code respectively.
#-------------------------------
# cces2010: V277 gives pre-election 5-digit FIPS code
#-------------------------------
# cces2012, 2014, 2016: countyfips gives the pre-election
# 5-digit FIPS code
####-----------------------------
library(rio)
library(tidyverse)
library(logging)
library(argparse)
# load `counties00`, `counties10`, `states`
load("output/counties.Rdata")
# set up command line arguments
parser <- ArgumentParser(description = "Add 'fips_code' to CCES datasets.")
parser$add_argument('--cces', type = "character",
help = "Directory storing processed CCES datasets.",
default = "./output/cces_processed")
args <- parser$parse_args()
# define function to clean cces 2006
clean_06 <- function(cces_df = cces2006){
#------------------------------
# Pad FIPS code for Alabama, Alaska, Arkansas, Arizona, California,
# Colorado, Connecticut with pre-pending 0.
# There is one entry in Tennessee and Mississippi each that
# has nonsensical 4-digit as its FIPS code. We recode these as NA.
#------------------------------
need_pad <- c("AL", "AK", "AR", "AZ", "CA", "CO", "CT")
cces_df %>%
mutate(fips_code = ifelse(v1002 %in% need_pad,
str_pad(v1004, width = 5,
side = "left", pad = 0),
ifelse(v1002 %in% c("TN", "MS") & str_length(v1002) < 5,
NA, v1004)))
}
# define functions to clean cces 2008
clean_08 <- function(cces_df = cces2008){
cces_df %>%
mutate(fips_code = paste0(str_trim(V251, "right"),
str_pad(V269,
width = 3,
side = "left",
pad = "0")))
}
# define functions to clean cces 2010
clean_10 <- function(cces_df = cces2010){
cces_df %>% mutate(fips_code = as.character(V277))
}
# define functions to clean cces 2012, 2014, 2016
## `countyfips` has an 'AsIs' attribute that we don't need.
.unAsIs <- function(X) {
# This function reverses the effect of I() and
# removes the 'AsIs' attribute.
#----------------------
# X (variable name)
#----------------------
if(class(X)=="AsIs") {
class(X) <- class(X)[-match("AsIs", class(X))]
attr(X, "comment") <- NULL
}
return(X)
}
clean_12 <- function(cces_df){
cces_df %>% mutate(fips_code = .unAsIs(countyfips))
}
add_fips_code <- function(file, year){
# Add "fips_code" column to the cces dataset
#-----------------
# file (str, file path to cces*.rds)
#-----------------
# select the appropriate function for cleaning the dataset
if(year == "2006"){
cces_df <- import(file) %>% clean_06
}
if(year == "2008"){
cces_df <- import(file) %>% clean_08
}
if(year == "2010"){
cces_df <- import(file) %>% clean_10
}
if(year %in% seq(2012, 2018, by = 2)){
cces_df <- import(file) %>% clean_12
}
export(cces_df, file)
loginfo("'fips_code' added to %s", file)
}
# main
main <- {
rds_list <- list.files(args$cces,
full.names = TRUE,
pattern = "cces[0-9]{4}.rds$")
rds_list %>% set_names(seq(2006, 2018, by = 2)) %>%
imap(~ add_fips_code(.x, .y))
# save output of this script to zip file
zip(zipfile = file.path(args$cces, "script02_output"),
files = rds_list)
}
|
2ad3d111e4a2278d4562034584e137e3ad4d10da
|
8c20cb1afd621c732382ffe50a53b1a978010a42
|
/R/simulateAccumulationRate.R
|
92534dd14ea7cd7c070e286d822e6a6c33e7ce12
|
[] |
no_license
|
BlasBenito/virtualPollen
|
01daa3bec05c5caeefa2f52109df8a7df115c0d0
|
b33c7929ce802f3764fdcee39a911243984698ee
|
refs/heads/master
| 2022-02-28T17:42:46.520588
| 2022-02-11T17:46:27
| 2022-02-11T17:46:27
| 177,762,046
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,913
|
r
|
simulateAccumulationRate.R
|
#' Simulates a virtual sediment accumulation rate.
#'
#' @description Generates a virtual sediment accumulation rate to be applied to the results of \code{\link{simulatePopulation}}.
#'
#'
#' @usage simulateAccumulationRate(
#' seed=50,
#' time=1:1000,
#' output.min=10,
#' output.max=40,
#' direction=1,
#' plot=TRUE
#' )
#'
#' @param seed integer, seed to be used by \code{\link{set.seed}} to configure the state of the pseudo-random number generator. It defines the shape of the curve.
#' @param time vector of time values (ideally the same used to generate the simulations). \strong{Important}: the \code{time} column goes from "left to right", meaning that oldest samples have the lowest values of age/time, and viceversa.
#' @param output.min numeric, in years per centimetre, minimum sediment accumulation rate (10 by default).
#' @param output.max numeric, in years per centimetre, maximum sediment accumulation rate (40 bu default).
#' @param direction integer, values 1 or -1, to invert the resulting accumulation rate.
#' @param plot boolean, plots output accumulation rate if \code{TRUE}.
#'
#' @details The accumulation rate curve is generated through a random walk smoothed by a GAM model. The value of the \code{seed} argument changes the shape of the curve, but the user has no more control than trying different values to achieve a curve closer to the desired one. If \code{plot} is set to \code{TRUE}, the accumulation rate curve is printed on screen, but not exported to pdf.
#'
#' @author Blas M. Benito <blasbenito@gmail.com>
#'
#' @return A dataframe like \code{\link{accumulationRate}}, with the following columns:
#' \itemize{
#' \item \emph{time}: numeric, time or age of the given case.
#' \item \emph{accumulation.rate}: numeric, in years per centimetre, simulated accumulation rate.
#' \item \emph{grouping}: integer, grouping variable to aggregate together (with \code{\link{aggregateSimulation}}) samples deposited in the same centimetre according \emph{accumulation.rate}.
#' }
#'
#' @seealso \code{\link{simulatePopulation}}, \code{\link{aggregateSimulation}}
#'
#' @examples
#'
#'acc.rate <- simulateAccumulationRate(
#' seed = 50,
#' time = 1:1000,
#' output.min = 10,
#' output.max = 40,
#' direction = 1,
#' plot = TRUE
#' )
#'
#'str(acc.rate)
#'
#' @export
simulateAccumulationRate <- function(seed = 50,
time = 1:1000,
output.min = 10,
output.max = 40,
direction = 1,
plot = TRUE
){
#setting random seed for repeatibility
set.seed(seed)
#generating a random walk
accumulation.rate <- cumsum(sample(c(-0.1, 0, 0.1), max(time), TRUE))
if(direction == -1){
accumulation.rate = rev(accumulation.rate)
}
#fitting a gam to the data and predicting a smoothed accumulation rate curve
temp.data <- data.frame(accumulation.rate, time)
temp.gam <- gam(accumulation.rate ~ s(time, k = 10), data = temp.data)
accumulation.rate <- predict(temp.gam, type = "response")
#scaling it between given bounds
accumulation.rate <- rescaleVector(x = as.vector(accumulation.rate), new.min = output.min, new.max = output.max, integer = TRUE)
accumulation.rate <- as.vector(accumulation.rate)
#plotting data
temp.df <- data.frame(time, accumulation.rate)
if(plot == TRUE){
temp.plot <- ggplot(data = temp.df, aes(x = time, y = accumulation.rate)) +
geom_line(color = viridis(10)[3], size = 0.5) +
geom_ribbon(aes(ymin = 0, ymax = accumulation.rate), fill = viridis(10)[1], alpha = 0.3) +
xlab("Time") +
ylab("Acc. rate") +
scale_y_continuous(breaks = seq(0, output.max, by = 10)) +
scale_x_continuous(breaks = seq(0, max(time), by = max(time)/5)) +
cowplot::theme_cowplot() +
theme(legend.position = "none",
panel.background = element_blank())
print(temp.plot)
}
#generating a grouping variable (consecutive numbers with same value are put in separated groups)
#applying rle to identify groups of consecutivee integers
accumulation.rate.rle <- rle(accumulation.rate)
accumulation.rate.rle <- data.frame(value = accumulation.rate.rle$values, length = accumulation.rate.rle$lengths)
#using rle as guide to build the groups
accumulation.rate.groups <- vector()
start.group <- 0
for(i in 1:nrow(accumulation.rate.rle)){
value <- accumulation.rate.rle[i, "value"]
length <- accumulation.rate.rle[i, "length"]
times <- start.group + (1:round(length/value, 0))
accumulation.rate.groups <- c(accumulation.rate.groups, rep(times, each = value))
start.group <- max(times)
}
accumulation.rate.groups <- accumulation.rate.groups[1:max(time)]
output <- data.frame(time = time, accumulation.rate = accumulation.rate, grouping = accumulation.rate.groups)
return(output)
} #end of function
|
5f30545c060b550d81fbbebacae467759dd34b28
|
3243f82998ccb54bf721435e5957c35632e1091b
|
/cachematrix.R
|
212ad0e8fe1f231a88b351a3c69405a22c002604
|
[] |
no_license
|
Daisybeth/datasciencecoursera
|
f3b1e61b61ba08d4da63f5d3e823e5bd7aff5718
|
14ca871687ed93d1387f9dc16cb321c48b307155
|
refs/heads/master
| 2020-12-02T08:54:10.785979
| 2020-01-09T16:17:50
| 2020-01-09T16:17:50
| 230,952,438
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,479
|
r
|
cachematrix.R
|
## **These functions calulcate the inverse of a given matrix (x) and cache this object**
## **This can then be retrieved without recalculating the inverse of x**
# Builds a set of functions to inverse a given matrix (x) and returns these functions within a list to the parent environment through <<-
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
#Initilises objects
set <- function(y) {
x <<- y
m <<- NULL
# assigns input argument to the x object in the parent environment (<<-)
# assigns value of NULL to the m object in the parent environment, clearing the value of m from previous executions of the function
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
# Defines getters and setters
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
# Creates new object by returning a list
}
## CacheSolve or calculates and caches, or retrieves the inverse of a given matrix if already cached using makeCacheMatrix.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
# If inverse of given matrix has already been calculated and cached in makeCacheMatrix, it is retrieved
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
# If this inverse matrix has not been calculated, it is calculated and cached, as well as returned.
}
|
c7c4844aa74d2552f7907cb5ee48cea36072cf8c
|
c3c9f11298fb747f840e2e4ec0397992db25214d
|
/R/lspartition_illustration.R
|
c05b93dae9e56a05bd826c368ef9025d1b7fdead
|
[] |
no_license
|
lnsongxf/lspartition
|
b11ef2779cabc28dde6a73dfa64c3ddc987b13d2
|
fd62c3861e41c0c6e6a55ef9042ef2f3cf8e29f6
|
refs/heads/master
| 2023-03-09T03:14:41.560698
| 2021-02-28T13:59:40
| 2021-02-28T13:59:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,883
|
r
|
lspartition_illustration.R
|
################################################################################
# lspartition: illustration file
# Authors: M. D. Cattaneo, M. Farrell and Y. Feng
# Last update: 07-AUG-2019
################################################################################
rm(list=ls(all=TRUE))
library(TeachingDemos); library(lspartition); library(ggplot2)
set.seed(1234)
#######################################################
## SET =TRUE TO GENERATE OUTPUTS
do.output = TRUE
#######################################################
## Install NPPACKAGE Package
if(do.output) txtStart("output/lspartition_1.txt", results = FALSE)
install.packages("lspartition", dependencies = TRUE)
library(lspartition)
if(do.output) txtStop()
## read data
if(do.output) txtStart("output/lspartition_2.txt")
data <- read.csv("bikesharing.csv", header = TRUE)
summary(data)
if(do.output) txtStop()
## outcome, covariate
if(do.output) txtStart("output/lspartition_3.txt")
y <- data$count
x <- data$atemp
g <- data$workingday
if(do.output) txtStop()
####kappa selection
if(do.output) txtStart("output/lspartition_4.txt")
summary(lspkselect(y, x, kselect="imse-rot", subset=(g==1)))
if(do.output) txtStop()
if(do.output) txtStart("output/lspartition_5.txt")
summary(lspkselect(y, x, kselect="imse-dpi", subset=(g==1)))
if(do.output) txtStop()
if(do.output) txtStart("output/lspartition_6.txt", result = FALSE)
summary(lspkselect(y, x, kselect="imse-dpi", ktype="qua", subset=(g==1)))
if(do.output) txtStop()
## pointwise inference
if(do.output) txtStart("output/lspartition_7.txt")
est_workday_bc1 <- lsprobust(y, x, neval=20, bc= "bc1", nknot=8, subset = (g==1))
est_workday_bc3 <- lsprobust(y, x, neval=20, bc= "bc3", nknot=8, bnknot=10, subset = (g==1))
summary(est_workday_bc1)
if(do.output) txtStop()
if(do.output) txtStart("output/lspartition_8.txt")
lsprobust.plot(est_workday_bc1, xlabel="Temperature", ylabel="Number of Rentals", legendGroups = "Working Days") + theme(text=element_text(size=17), legend.position=c(.15,.9))
ggsave("output/pointwise1.pdf", width=6.8, height=5.5)
lsprobust.plot(est_workday_bc3, xlabel="Temperature", ylabel="Number of Rentals") + theme(text=element_text(size=17), legend.position="none")
ggsave("output/pointwise2.pdf", width=6.8, height=5.5)
if(do.output) txtStop()
## uniform inference: numerator matrix
if(do.output) txtStart("output/lspartition_9.txt")
est_workday_bc1 <- lsprobust(y, x, bc = "bc1", nknot = 4, uni.method = "pl", uni.ngrid = 100, uni.out = T, subset = (g==1))
round(est_workday_bc1$uni.output$t.num.pl[1:5,],3)
if(do.output) txtStop()
## uniform inference: plug-in method
if(do.output) txtStart("output/lspartition_10.txt")
est_workday_bc1 <- lsprobust(y, x, neval=20, bc= "bc1", uni.method="pl", nknot=8, subset = (g==1), band = T)
est_workday_bc1$sup.cval
if(do.output) txtStop()
if(do.output) txtStart("output/lspartition_11.txt")
lsprobust.plot(est_workday_bc1, CS="all", xlabel="Temperature", ylabel="Number of Rentals", legendGroups = "Working Days") + theme(text=element_text(size=17), legend.position=c(.15,.9))
ggsave("output/uniform1.pdf", width=6.8, height=5.5)
if(do.output) txtStop()
## bootstrap
if(do.output) txtStart("output/lspartition_12.txt")
est_workday_bc3 <- lsprobust(y, x, neval=20, bc= "bc3", nknot=8, bnknot=10, uni.method="wb", subset = (g==1), band = T)
est_workday_bc3$sup.cval
lsprobust.plot(est_workday_bc3, CS="all", xlabel="Temperature", ylabel="Number of Rentals", legendGroups = "Working Days") + theme(text=element_text(size=17), legend.position=c(.15,.9))
ggsave("output/uniform2.pdf", width=6.8, height=5.5)
if(do.output) txtStop()
####Two groups: pointwise
if(do.output) txtStart("output/lspartition_13.txt")
est_workday <- lsprobust(y, x, neval=20, bc= "bc3", nknot=8, subset = (g==1))
est_nworkday <- lsprobust(y, x, neval=20, bc= "bc3", nknot=8, subset = (g==0))
lsprobust.plot(est_workday, est_nworkday, legendGroups=c("Working Days", "Nonworking Days"), xlabel="Temperature", ylabel="Number of Rentals", lty=c(1,2)) + theme(text=element_text(size=17), legend.position=c(.2,0.85))
ggsave("output/diff1.pdf", width=6.8, height=5.5)
if(do.output) txtStop()
## Two groups: diff
if(do.output) txtStart("output/lspartition_14.txt")
diff <- lsplincom(y, x, data$workingday, R=c(-1,1), band=T, cb.method="pl")
summary(diff)
if(do.output) txtStop()
if(do.output) txtStart("output/lspartition_15.txt")
lsprobust.plot(diff, CS="all", xlabel="Temperature", ylabel="Number of Rentals", legendGroups="Difference between Working and Other Days") + theme(text=element_text(size=17), legend.position=c(.36,.2))
ggsave("output/diff2.pdf", width=6.8, height=5.5)
if(do.output) txtStop()
## Two groups: diff, smoother fit
if(do.output) txtStart("output/lspartition_16.txt")
diff <- lsplincom(y, x, data$workingday, R=c(-1,1), band=T, cb.method="pl", m=3)
lsprobust.plot(diff, CS="all", xlabel="Temperature", ylabel="Number of Rentals") + theme(text=element_text(size=17), legend.position="none")
ggsave("output/diff3.pdf", width=6.8, height=5.5)
if(do.output) txtStop()
###################################################################
# The following section shows how to manually depict the estimated
# curve, confidence region, and true function in the same plot.
# It is NOT reported in the paper.
###################################################################
# Linear spline fit using lsprobust()
est_workday_bc1 <- lsprobust(y, x, neval=20, bc= "bc1", uni.method="pl", nknot=8, subset = (g==1), band = T)
xeval <- est_workday_bc1$Estimate[,"X1"] # evaluation points
yhat <- est_workday_bc1$Estimate[,"tau.cl"] # fitted value, uncorrected
yhat.bc <- est_workday_bc1$Estimate[,"tau.bc"] # fitted value, bias corrected
se <- est_workday_bc1$Estimate[,"se.rb"] # standard errors, bias corrected
cval <- est_workday_bc1$sup.cval # critical value for confidence band
# A global polynomial fit, treated as the true function
polyfit <- lm(y~x+I(x^2)+I(x^3)+I(x^4), subset=(g==1))$coefficients
y.true <- polyfit[1]+xeval*polyfit[2]+xeval^2*polyfit[3]+xeval^3*polyfit[4]+xeval^4*polyfit[5]
##############################
# Plotting using R base graphs
# True function
plot(xeval, y.true, type="l", col="purple")
# fitted values
lines(xeval, yhat, lty=3)
# confidence intervals
segments(xeval, yhat.bc-1.96*se, xeval, yhat.bc+1.96*se, col="darkgrey")
# confidence band
lines(xeval, yhat.bc-cval*se, lty=2, col="navy")
lines(xeval, yhat.bc+cval*se, lty=2, col="navy")
#############################
# Alternatively, adding the true function after lsprobust.plot()
fig <- lsprobust.plot(est_workday_bc1, CS="all", xlabel="Temperature", ylabel="Number of Rentals")+theme(text=element_text(size=17), legend.position="none")
fig <- fig +geom_line(data=data.frame(x=xeval, y=y.true), aes(x=xeval, y=y.true), col="purple", lty=2)
|
ef37041ec8367d2d36b33328e2d7de0a30efaa62
|
5d8481c35b70c7a898a3a1a3a4fc0841cc0301c5
|
/BacklogMonitoring.R
|
b3c8a5ec2a290917ccf0e11ddbd209e118761d91
|
[] |
no_license
|
robert-krasinski/VelocityReporting
|
638e0c6a3cc7da9d38595bcc86ef7d1a35628194
|
366e6ee684de881c46b9ee99cb5b1f38a9309b3f
|
refs/heads/master
| 2020-06-15T09:52:20.366061
| 2017-03-24T14:09:34
| 2017-03-24T14:09:34
| 75,303,699
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,092
|
r
|
BacklogMonitoring.R
|
# first try Windows CP1252, although that's almost surely not supported on Mac:
Sys.setlocale("LC_ALL", "UTF-8") # Make sure not to omit the `"LC_ALL",` first argument, it will fail.
#Sys.setlocale("LC_ALL", "UTF-8") # the name might need to be 'CP1252'
# next try IS08859-1(/'latin1'), this works for me:
#Sys.setlocale("LC_ALL", "pt_PT.ISO8859-1")
# Try "pt_PT.UTF-8" too...
# in your program, make sure the Sys.setlocale worked, sprinkle this assertion in your code before attempting to read.csv:
#stopifnot(Sys.getlocale('LC_CTYPE') == "pt_PT.ISO8859-1")
library(xlsx)
library(treemap)
library(data.tree)
rm(list=ls(all=TRUE))
projects <- c('VEL', 'VBS', 'VIN')
getLatestFile <- function(filePattern)
{
files <- list.files(path = "./data/", pattern = filePattern)
files <- sort(files, decreasing = TRUE)
latestFile <- paste("./data/", files[1], sep = "")
return(latestFile)
}
latestFile <- getLatestFile("linkedVxt.*.csv")
linkedVXT <- read.csv(
file= latestFile,
head=TRUE,sep=",", dec=".", stringsAsFactors=FALSE)
linkedVXT <- linkedVXT[startsWith(linkedVXT$linkedKey, 'VXT'),]
latestFile <- getLatestFile("VelocityIssues.*.csv")
issues <- read.csv(
file= latestFile,
head=TRUE,sep=",", dec=".", stringsAsFactors=FALSE)
issues <- subset(issues, select=-c(id, sprint, updated, priority,
severity, component, created,
reporter, devOwner,
workTimeToComplete, timeToComplete,
movedToComplete, aggregatetimeoriginalestimate,
timeoriginalestimate, transitions, remainingEstimate))
latestFile <- getLatestFile("VXTAndRelated.*.csv")
vxtAndRelated <- read.csv(
file= latestFile,
head=TRUE,sep=",", dec=".", stringsAsFactors=FALSE)
vxtAndRelated <- subset(vxtAndRelated, select=-c(id, sprint, updated, priority,
severity, component, created,
reporter, devOwner,
workTimeToComplete, timeToComplete,
movedToComplete, aggregatetimeoriginalestimate,
timeoriginalestimate, transitions, remainingEstimate,
codeReviewToDev, testsToDev))
vxtAndRelated$summary <- gsub("/", "", vxtAndRelated$summary)
#remove vxts with not applicable versions
vxtAndRelated <- vxtAndRelated[!(vxtAndRelated$fixVersion %in% c('0.2 ','0.3 ', 'POC ',
'Post ITH Go-Live 1.4 ', 'Post ITH Go-Live 1.3 ',
'Post ITH Go-Live 1.1 ', 'Post ITH Go-Live 1.2 ',
'Post ITH Go-Live ', '2.0 ', '3.0 ', 'Activiti ',
'') & vxtAndRelated$project == 'VXT'),]
#do not display rejected issues
vxtAndRelated <- vxtAndRelated[vxtAndRelated$status != 'Rejected',]
#vxtAndRelated <- vxtAndRelated[vxtAndRelated$key == 'VWF-1699',]
#View(vxtAndRelated)
#stop()
latestFile <- getLatestFile("RelatedIssues.*.csv")
issueLinks <- read.csv(
file= latestFile,
head=TRUE,sep=",", dec=".", stringsAsFactors=FALSE)
#View(issueLinks)
#stop()
vxtIssues <- vxtAndRelated[vxtAndRelated$project == 'VXT',]
vxtIssues <- vxtIssues[!vxtIssues$status %in% c('Done', 'Rejected'),]
vxtIssues <- vxtIssues[order(vxtIssues$fixVersion),]
#View(vxtIssues)
#stop()
#epics <- vxtEpic[vxtEpic$project != 'VXT',]
#relatedIssues <- vxtAndRelated
#epics <- epics[epics$key == 'VBS-1545',]
#relatedIssues <- relatedIssues[!epics$status %in% c('Done', 'Rejected'),]
#relatedIssues <- relatedIssues[relatedIssues$key == 'II-778',]
#View(relatedIssues)
#stop()
source('./sprintsLib.R')
sprints <- loadSprints()
sprintIssueFiles <- list.files(path = "./data/", pattern = "SprintIssue.*.csv")
sprintIssueFiles <- sort(sprintIssueFiles, decreasing = TRUE)
latestSprintIssueFile <- paste("./data/", sprintIssueFiles[1], sep = "")
print(latestSprintIssueFile)
sprintIssues <- read.csv(
file= latestSprintIssueFile,
head=TRUE,sep=",", dec=".", stringsAsFactors=FALSE)
createBacklogNodes3 <- function()
{
issueLinksFiltered <- issueLinks[!grepl('VXT',issueLinks$linkedKey),]
issueLinksFiltered <- issueLinksFiltered[!grepl('VMCM',issueLinksFiltered$key),]
issueLinksFiltered <- issueLinksFiltered[!grepl('VMCM',issueLinksFiltered$linkedKey),]
issueLinksFiltered <- issueLinksFiltered[!grepl('ITH',issueLinksFiltered$linkedKey),]
issueLinksFiltered <- issueLinksFiltered[!grepl('ITH',issueLinksFiltered$key),]
#View(issueLinksFiltered)
#stop()
backlog <- merge(vxtIssues, issueLinksFiltered, by.x ="key", by.y = "key", all.x = TRUE, suffixes = c('.vxt', '.child1'))
#View(backlog)
#stop()
#vxtAndRelatedTmp <- vxtAndRelated[vxtAndRelated$key == 'VEL-1541',]
#View(vxtAndRelatedTmp)
#stop()
backlog <- merge(backlog, vxtAndRelated, by.x = 'linkedKey', by.y = 'key', all.x = TRUE, suffixes = c('.vxt', '.child1'))
backlog$key.child1 <- backlog$linkedKey
backlog <- merge(backlog, issueLinksFiltered, by.x ="key.child1", by.y = "key", all.x = TRUE, suffixes = c('.child1', '.child2'))
#backlogTmp <- backlog[backlog$key == 'VXT-190',]
#issueLinksFilteredTmp <- issueLinksFiltered[issueLinksFiltered$key == 'VDO-198',]
#View(issueLinksFilteredTmp)
#stop()
backlog <- merge(backlog, vxtAndRelated, by.x = 'linkedKey.child2', by.y = 'key', all.x = TRUE, suffixes = c('.child1', '.child2'))
colnames(backlog)[which(names(backlog) == "minorVersion")] <- "minorVersion.child2"
colnames(backlog)[which(names(backlog) == "linkedKey.child2")] <- "key.child2"
colnames(backlog)[which(names(backlog) == "fixVersion")] <- "fixVersion.child2"
colnames(backlog)[which(names(backlog) == "type")] <- "type.child2"
colnames(backlog)[which(names(backlog) == "status")] <- "status.child2"
colnames(backlog)[which(names(backlog) == "project")] <- "project.child2"
colnames(backlog)[which(names(backlog) == "summary")] <- "summary.child2"
#backlog$minorVersion.child2 <- backlog$minorVersion
#backlog$key.child2 <- backlog$
#backlog$key.child1
#backlogTmp <- subset(backlog, select=c(linkedkey.child1))
#View(backlog)
#stop()
#View(backlog)
#stop()
#according to this so question. apostrophes can break plot
#http://stackoverflow.com/questions/40401045/large-data-tree-causes-plot-to-error
backlog$summaryCleaned.vxt <- gsub("['\"/%-,_]", " ", backlog$summary.vxt)
backlog$summaryCleaned.child1 <- gsub("['\"/%-,_]", " ", backlog$summary.child1)
#backlog$summaryCleaned.vxt <- gsub("/", " ", backlog$summaryCleaned.vxt)
#backlog$summaryCleaned.child1 <- gsub("/", " ", backlog$summaryCleaned.child1)
backlog$summaryCleaned.child2 <- gsub("['\"/%-,_]", " ", backlog$summary.child2)
#backlog$summaryCleaned.child2 <- gsub("/", " ", backlog$summaryCleaned.child2)
#backlog$summaryCleaned.child1 <- gsub("'", " ", backlog$summary.child1)
#View(backlog)
backlog <- backlog[order(backlog$fixVersion.vxt),]
backlog$pathString <- paste("VXT",
backlog$fixVersion.vxt,
paste(backlog$key, backlog$summaryCleaned.vxt, backlog$status.vxt, sep = ", "),
backlog$fixVersion.child1,
paste(backlog$key.child1, backlog$type.child1, backlog$summaryCleaned.child1, backlog$status.child1, sep = ", "),
backlog$fixVersion.child2,
paste(backlog$key.child2, backlog$type.child2, backlog$summaryCleaned.child2, backlog$status.child2, sep = ", "),
sep = "/")
#backlog$pathString <- gsub("['\"/%-,_]", " ", backlog$pathString)
backlog$pathString <- gsub("NA", "", backlog$pathString)
#backlog <- head(backlog,1)
write.xlsx(backlog, sheetName = "data", append = FALSE,
"./data/backlogTree.xlsx")
#options(error=stop)
#View(backlog)
#stop()
backlog <- backlog[backlog$fixVersion.vxt == '1.1 CURRENT ',]
#backlog <- backlog[backlog$fixVersion.vxt %in% c('1.1a ', '1.1b ', '1.1c ', '1.1d ', '1.1e ', '1.1f '),]
#View(backlog)
#stop()
#stop()
backlogTree <- as.Node(backlog)
}
GetNodeShape <- function(node){
if(grepl( c("Epic"), node$name)) return("ellipse")
return("underline")
}
GetNodeColor <- function(node){
if(grepl( c("Awaiting Prioritisation"), node$name)) return("red")
if(grepl( c("Idea"), node$name)) return("red")
if(grepl( c("Backlog"), node$name)) return("red")
if(grepl( c("Tech. Scoping"), node$name)) return("red")
if(grepl( c("Refinement"), node$name)) return("red")
if(grepl( c("Completed"), node$name)) return("green")
if(grepl( c("Done"), node$name)) return("green")
if(grepl( c("Awaiting Review"), node$name)) return("green")
if(grepl( c("In Progress"), node$name)) return("orange")
if(grepl( c("In Development"), node$name)) return("orange")
if(grepl( c("In Testing"), node$name)) return("orange")
if(grepl( c("In Code review"), node$name)) return("orange")
if(grepl( c("Ready to Test"), node$name)) return("orange")
return("black")
}
plotVersionTree <- function(){
#9
backlogTree <- createBacklogNodes3()
print(backlogTree, limit = 1000)
SetGraphStyle(backlogTree, rankdir = "LR")
SetNodeStyle(backlogTree, fontname = 'helvetica', shape = GetNodeShape, color = GetNodeColor)
plot(backlogTree)
}
#plotVersionTree()
#warnings()
#stop()
backlogIssues <- issues[!issues$status %in% c('Completed', 'Awaiting Review', 'Rejected', 'Idea', 'Frozen'),]
backlogIssues <- backlogIssues[backlogIssues$type != 'Sub-task',]
#View(backlogIssues)
#stop()
#backlogIssues <- backlogIssues[backlogIssues$fixVersion == '1.0 - ITH Live',]
backlogIssues <- merge(backlogIssues, linkedVXT, by.x = 'key', by.y = 'key',
all.x = TRUE, suffixes = c('.backlog', '.vxt'))
backlogIssues$islinkedVXT <- ifelse(backlogIssues$type == 'Bug', 'Bug',
ifelse(!is.na(backlogIssues$linkedKey), 'linked VXT', 'not linked VXT'))
backlogIssues$count <- 1
backlogIssuesAggr <- aggregate( x=cbind(backlogIssues$count),
by=list(backlogIssues$project,
backlogIssues$islinkedVXT),
FUN = sum)
colnames(backlogIssuesAggr )[1] <- "project"
colnames(backlogIssuesAggr )[2] <- "isLinked"
#colnames(backlogIssuesAggr )[3] <- "type"
colnames(backlogIssuesAggr )[3] <- "count"
backlogIssuesAggr$isLinked <- paste(backlogIssuesAggr$isLinked, ":" ,
backlogIssuesAggr$count)
#View(backlogIssuesAggr)
#stop()
for (currentProject in projects) {
backlogIssuesAggrPerProject <- backlogIssuesAggr[backlogIssuesAggr$project == currentProject,]
plot <- pie(backlogIssuesAggrPerProject$count,
labels = backlogIssuesAggrPerProject$isLinked,
col = c("red", "green", "orange"),
main=paste("Issues in backlog for project:", currentProject, "in all versions."))
print(plot)
}
#stop()
# colnames(backlogIssuesAggr )[1] <- "project"
# colnames(backlogIssuesAggr )[2] <- "linkedVXT"
# colnames(backlogIssuesAggr )[3] <- "issuesCount"
# backlogIssuesAggr$notLinked <- backlogIssuesAggr$issuesCount - backlogIssuesAggr$linkedVXT
#
#
#stop()
backlogIssues$backlogStatus <- ifelse(backlogIssues$status == 'Idea', "Idea",
ifelse(backlogIssues$status %in% c('Refinement', 'Tech Refinement', 'Estimation'), 'Refinement',
ifelse(backlogIssues$status == 'Backlog', 'Ready', 'In Progress')))
#View(backlogIssues)
#stop()
backlogIssuesAggrStatus <- aggregate( x=cbind(backlogIssues$count),
by=list(backlogIssues$project,
backlogIssues$backlogStatus),
FUN = sum)
colnames(backlogIssuesAggrStatus )[1] <- "project"
colnames(backlogIssuesAggrStatus )[2] <- "backlogStatus"
#colnames(backlogIssuesAggr )[3] <- "type"
colnames(backlogIssuesAggrStatus )[3] <- "count"
backlogIssuesAggrStatus$backlogStatusLabel <- paste(backlogIssuesAggrStatus$backlogStatus, ":" ,
backlogIssuesAggrStatus$count)
#View(backlogIssuesAggrStatus)
#stop()
for (currentProject in projects) {
backlogIssuesAggrStatusPerProject <- backlogIssuesAggrStatus[backlogIssuesAggrStatus$project == currentProject,]
plot <- pie(backlogIssuesAggrStatusPerProject$count,
labels = backlogIssuesAggrStatusPerProject$backlogStatusLabel,
#col = c("orange", "blue",),
main=paste("Issues in backlog for project:", currentProject, "in all versions"))
print(plot)
}
#----------------------------------------------------------------------------------------------------------
#visualise future sprints completeness
sprintsWithIssues <- merge(sprintIssues, backlogIssues, by="key", all.x = TRUE)
sprintsWithIssues <- sprintsWithIssues[sprintsWithIssues$status != 'Rejected',]
#temp <- sprintsWithIssues[sprintsWithIssues$sprintId == '330',]
#View(temp)
#stop()
sprintsWithIssues <- merge(sprintsWithIssues, sprints, by.x ="sprintId", by.y = "id", all.x = TRUE, all.y = TRUE)
futureSprintsIssues <- sprintsWithIssues[sprintsWithIssues$state == 'future',]
futureSprintsIssues <- futureSprintsIssues[!is.na(futureSprintsIssues$key),]
#View(futureSprintsIssues)
#stop()
backlogIssuesAgrPerSprint <- aggregate( x=cbind(futureSprintsIssues$count),
by=list(futureSprintsIssues$project, futureSprintsIssues$name,
futureSprintsIssues$backlogStatus),
FUN = sum)
colnames(backlogIssuesAgrPerSprint )[1] <- "project"
colnames(backlogIssuesAgrPerSprint )[2] <- "sprintName"
#colnames(backlogIssuesAggr )[3] <- "type"
colnames(backlogIssuesAgrPerSprint )[3] <- "status"
colnames(backlogIssuesAgrPerSprint )[4] <- "count"
for(currentProject in projects)
{
backlogIssuesAgrPerSprintCurrProject <- backlogIssuesAgrPerSprint[backlogIssuesAgrPerSprint$project == currentProject,]
plot <- ggplot(data = backlogIssuesAgrPerSprintCurrProject,
aes(x = backlogIssuesAgrPerSprintCurrProject$sprintName,
y = backlogIssuesAgrPerSprintCurrProject$count,
fill = backlogIssuesAgrPerSprintCurrProject$status,
label = backlogIssuesAgrPerSprintCurrProject$count)) +
geom_bar(stat="identity") +
ggtitle(paste("Future sprints status for project:", currentProject)) +
#scale_fill_manual( values = c("yellow", "orange") ) +
ylab("Number of issues") +
xlab("Sprint name") +
labs(fill = "Status") +
geom_text(size = 3, position = position_stack(vjust = 0.5)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(plot)
}
#View(backlogIssuesAgrPerSprint)
#stop()
#----------------------------------------------------------------------------------------------------------
futureSprintsAggr <- aggregate( x=cbind(futureSprintsIssues$count),
by=list(futureSprintsIssues$project, futureSprintsIssues$name,
futureSprintsIssues$islinkedVXT),
FUN = sum)
colnames(futureSprintsAggr )[1] <- "project"
colnames(futureSprintsAggr )[2] <- "sprintName"
#colnames(backlogIssuesAggr )[3] <- "type"
colnames(futureSprintsAggr )[3] <- "linkStatus"
colnames(futureSprintsAggr )[4] <- "count"
for(currentProject in projects)
{
futureSprintsAggrCurrProject <- futureSprintsAggr[futureSprintsAggr$project == currentProject,]
#View(futureSprintsAggrCurrProject)
plot <- ggplot(data = futureSprintsAggrCurrProject,
aes(x = futureSprintsAggrCurrProject$sprintName,
y = futureSprintsAggrCurrProject$count,
fill = futureSprintsAggrCurrProject$linkStatus,
label = futureSprintsAggrCurrProject$count)) +
geom_bar(stat="identity") +
ggtitle(paste("Future sprints status for project:", currentProject)) +
#scale_fill_manual( values = c("yellow", "orange") ) +
ylab("Number of issues") +
xlab("Sprint name") +
labs(fill = "Status") +
geom_text(size = 3, position = position_stack(vjust = 0.5)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
print(plot)
}
#View(futureSprintsAggr)
#----------------------------------------------------------------------------------------------------------
write.xlsx(backlogIssues, sheetName = "issues", append = FALSE,
"./data/backlog.xlsx")
|
463de633e3e3c6a5c4ead172cb32e08b28e791ef
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query09_falsequ_1344/query09_falsequ_1344.R
|
8610efa4744d9c9d4e2815be0c537cda0944d915
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 67
|
r
|
query09_falsequ_1344.R
|
fa0c3c2faf9b4fcdef98677986e24322 query09_falsequ_1344.qdimacs 17 17
|
3beb3d61c31cb14e3ee3c90bf77672ea38529efc
|
a2863ae7988e85fec7f63341d9d80c9139ab0f70
|
/IncredibleHack/server.R
|
0c122dbbb78cf0b14b4b8912e65c1005a047d963
|
[] |
no_license
|
draja54/IncredibleHack
|
91416f309747c1e8fd2594b8242d2bb53b090dd2
|
3f12004bce3950f362204b1faa6a4a1b86963fbb
|
refs/heads/master
| 2021-01-05T01:12:36.425338
| 2020-02-16T04:04:27
| 2020-02-16T04:04:27
| 240,825,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,033
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(DT)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# generate bins based on input$bins from ui.R
sampleData <- read.csv("~/IncredibleHack/Data/data_set.csv")
output$mytable = DT::renderDataTable({
DT::datatable( sampleData,, selection = 'single', filter = "top", options = list(
initComplete = JS(
"function(settings, json) {",
"$(this.api().table().header()).css({'background-color': '#000', 'color': '#fff'});",
"}"),
orderClasses = TRUE,searchHighlight = TRUE))
})
output$result = renderPrint({
s = input$mytable_rows_selected
if (length(s)) {
cat('x1: ')
cat(sampleData[s,]$x1)
cat('\n')
cat('x2: ')
cat(sampleData[s,]$x2)
cat('\n')
cat('x3: ')
cat(sampleData[s,]$x3)
cat('\n')
cat('x4: ')
cat(sampleData[s,]$x4)
cat('\n')
cat('x5: ')
cat(sampleData[s,]$x5)
cat('\n')
res = input$x1 * sampleData[s,]$x1 +
input$x2 * sampleData[s,]$x2 +
input$x3 * sampleData[s,]$x3 +
input$x4 * sampleData[s,]$x4 +
input$x5 * sampleData[s,]$x5
cat(res)
cat('\n')
cat("Result: ")
if(res>input$threshold)
cat("Loan!")
else
cat("No Loan")
}
})
})
|
410f709adcd131491c5d5d2858bff8bd57bb681a
|
7275a7bfd93be8d6fa94d74d76d75e05378881f6
|
/tests/testthat/test-plots.R
|
e34bca0a4ea5ed4fbc39978758645981f988b926
|
[] |
no_license
|
sizespectrum/mizerExperimental
|
2a6b4c1ab39b36d3bf7de95ecd80ed5c3e7936fc
|
204fc144dc36833842eaad931e90f9d66924fab4
|
refs/heads/master
| 2023-05-10T22:10:02.631431
| 2023-05-03T12:11:33
| 2023-05-03T12:11:33
| 243,489,553
| 1
| 5
| null | 2023-03-30T07:49:01
| 2020-02-27T10:13:48
|
R
|
UTF-8
|
R
| false
| false
| 2,655
|
r
|
test-plots.R
|
# Initialisation ----------------
species_params <- NS_species_params_gears
# Make species names numeric because that created problems in the past
species_params$species <- 1:(nrow(species_params))
species_params$pred_kernel_type <- "truncated_lognormal"
params <- newMultispeciesParams(species_params, inter, no_w = 30,
n = 2/3, p = 0.7, lambda = 2.8 - 2/3)
sim <- project(params, effort = 1, t_max = 3, dt = 1, t_save = 1)
sim0 <- project(params, effort = 0, t_max = 3, dt = 1, t_save = 1)
species <- c(11, 10)
# Mark some species as background
params_bkgrd <- params
params_bkgrd@A[1:3] <- NA
# params object with single species
sp_single <- data.frame(species = 1, w_max = 1000, h = 30)
params_single <- newMultispeciesParams(sp_single, no_w = 30)
# Make some data frame for plotDataFrame
sampleDf <- plotBiomass(sim, return_data = TRUE)
# Need to use vdiffr conditionally
expect_doppelganger <- function(title, fig, ...) {
testthat::skip_if_not_installed("vdiffr")
vdiffr::expect_doppelganger(title, fig, ...)
}
# plots have not changed ----
test_that("plots have not changed", {
p <- plotDataFrame(sampleDf, params)
expect_doppelganger("Plot Data Frame", p)
# the next line only needed until NS_params is upgraded
params <- setColours(NS_params, c(Fishing = "red"))
p <- plotDeath(params, species = "Haddock")
expect_doppelganger("Plot Death", p)
p <- plotResourcePred(NS_params)
expect_doppelganger("Plot Resource Pred", p)
p <- plotResourceLevel(NS_params)
expect_doppelganger("Plot Resource", p)
p <- plotEnergyBudget(NS_params, species = "Haddock")
expect_doppelganger("Plot Energy Budget", p)
p <- plotYieldVsSize(NS_params, species = "Haddock")
expect_doppelganger("Plot Yield vs Size", p)
})
# plotly functions do not throw error
test_that("plotly functions do not throw error", {
expect_error(plotlyDeath(params, species = species), NA)
expect_error(plotlyResourcePred(params), NA)
expect_error(plotlyEnergyBudget(params, species = species), NA)
expect_error(plotlyYieldVsSize(params, species = species), NA)
})
# testing the plot outputs
test_that("return_data is identical",{
expect_equal(dim(plotDeath(sim, species = species, return_data = TRUE)), c(784,4))
expect_equal(dim(plotResourcePred(sim, return_data = TRUE)), c(612,3))
expect_equal(dim(plotResourceLevel(sim, return_data = TRUE)), c(51,3))
expect_equal(dim(plotEnergyBudget(sim, species = species, return_data = TRUE)[[1]]), c(224,4))
expect_equal(dim(plotYieldVsSize(sim, species = species, return_data = TRUE)[[1]]), c(43,4))
}
)
|
e4c347bfb47af2a2239f22c24132155d030c4c73
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/meteor/inst/testfiles/E_Penman/libFuzzer_E_Penman/E_Penman_valgrind_files/1612738392-test.R
|
ea7e79740cef1cf23dbae1fd07a938edaaa45a87
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 481
|
r
|
1612738392-test.R
|
testlist <- list(Rext = numeric(0), Rs = c(-4.99215858773676e+304, -2.97403382465903e+284, -2.63554871998898e-82, NaN, 5.72778080503264e+250, NaN, NaN, 4.6918049798495e-317, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = numeric(0), temp = numeric(0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result)
|
e0d2a8a5f6ce8963e874462936d20fdba3ef230c
|
c1938a2d3461a22b3601798396c27f097655fb62
|
/H2O/examples/GLRM.R
|
e90e9380b1f377ddcee9a75342b176860a93600c
|
[] |
no_license
|
a-ghorbani/notebooks
|
9feead587fc5c8e6c5e9543ce7f892ff3e93152e
|
86a636330a74fbcb82f2c69ec1479695edf45b60
|
refs/heads/master
| 2020-03-26T15:50:16.235013
| 2017-04-22T18:05:27
| 2017-04-22T18:05:27
| 46,682,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,592
|
r
|
GLRM.R
|
#=============================================================
# Load required packages
#=============================================================
require(h2o)
require(lattice)
h2o.init()
#port = 54324,
# username = "aghorbani",
# password = Sys.getenv("h2oPass"),
# startH2O = FALSE)
#h2o.removeAll()
#=============================================================
# Initiate problem parameters
#=============================================================
k <- 10 # Low rank
n1 <- 40 # number of numerical columns
n2 <- 30 # number of ordinal columns
n3 <- 30 # number of binary columns
n <- n1+n2+n3 # number of columns
m <- 100 # number of rows
#=============================================================
# Initiate Low rank matrix
#=============================================================
X <- rnorm(m*k)
dim(X) <- c(m,k)
#=============================================================
# Initiate archetype matrix
#=============================================================
Y <- rnorm(k*n)
dim(Y) <- c(k,n)
#=============================================================
# High dimentional data (actual data)
#=============================================================
data <- X %*% Y
data <- as.data.frame(data)
c.num <- c(1:n1) # numerical columns indices
c.ord <- c((n1+1):(n1+n2)) # ordinal columns indices
c.bin <- c((n1+n2+1):n) # binary columns indices
#=============================================================
# Convert to Ordinal
# 1,2 ... 7
#=============================================================
tmp <- data[,c.ord]
tmp <- round((tmp - min(tmp)) / (max(tmp) - min(tmp)) * 6 + 1)
data[,c.ord] <- tmp
data[,c.ord] <- as.data.frame(lapply(data[,c.ord], as.factor))
#=============================================================
# Convert to Boolean
# 0, 1
#=============================================================
data[,c.bin] <- ( sign(data[,c.bin]) + 1 ) / 2
#=============================================================
# Make part of data missing
#=============================================================
r.na <- c(40:50)
c.na <- c((n1-3):(n-10))
data[r.na,c.na] <- NA
#=============================================================
# Upload data into H2O
#=============================================================
data_df <- as.h2o(data, destination_frame = "data_df")
#=============================================================
# Fit GLRM model
#=============================================================
glrm.fit <- h2o.glrm(
training_frame = data_df,
validation_frame = data_df,
k = k,
ignore_const_cols = FALSE,
loss = "Quadratic",
multi_loss = "Categorical",
loss_by_col = c(rep("Hinge",n3)),
loss_by_col_idx = c(c.bin)-1,
regularization_x = "Quadratic",
regularization_y = "Quadratic",
gamma_x = 0.01,
gamma_y = 0.01
)
#=============================================================
# h2o.predict will reconstructs data
#=============================================================
glrm.reconst <- h2o.predict(glrm.fit, data_df)
#=============================================================
# Plot original vs Prediction
#=============================================================
p1 <- levelplot(
x = t(data),
xlab = "",
ylab = "",
main = "Original data",
colorkey = list(at=seq(from=-10,to=10,length=11)),
at = seq(from=-10,to=10,length=11),
scales = list(draw = FALSE),
col.regions = rainbow(11))
p2 <- levelplot(
x = t(as.data.frame(h2o.getFrame(glrm.fit@model$representation_name))),
xlab = "",
ylab = "",
main = "Low dim. rep.",
colorkey = list(at=seq(from=-10,to=10,length=11)),
at = seq(from=-10,to=10,length=11),
scales = list(draw = FALSE),
col.regions = rainbow(11))
p3 <- levelplot(
x = t(as.data.frame(glrm.reconst)),
xlab = "",
ylab = "",
main = "Reconst. (from Low-Ranked X.Y)",
colorkey = list(at=seq(from=-10,to=10,length=11)),
at = seq(from=-10,to=10,length=11),
scales = list(draw = FALSE),
col.regions = rainbow(11))
print(p1, position = c(0, 0, 0.42, 1), more = TRUE)
print(p2, position = c(0.42, 0, 0.58, 1), more = TRUE)
print(p3, position = c(0.58, 0, 1, 1))
|
6b72492d334d9d1dea9cb1fee55125f5a7c5d4d9
|
864735bee686c07a8549d689af9f3fa162e33c6a
|
/R/memo.R
|
0cc106a1c92c68373a123e13886af6def898cea5
|
[] |
no_license
|
emraher/eertemplates
|
7ab61d6a6a6ecc77ee5f9b6a46ce143d289cf7dd
|
5f7eba2d33d9a5b4b09b7526d42be220e2253bd0
|
refs/heads/master
| 2023-09-02T19:29:16.340844
| 2021-11-15T14:10:36
| 2021-11-15T14:10:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,312
|
r
|
memo.R
|
#' Steve's Memo Template
#'
#' A template for memos. For more information, see here:
#' <http://svmiller.com/blog/2019/06/r-markdown-memo-template/>.
#'
#' # About YAML header fields
#'
#' This section documents some of the YAML fields to know
#' for this template.
#'
#'
#' | FIELD | DESCRIPTION |
#' | ------ | ----------- |
#' | `from` | name of the author |
#' | `to` | the intended recipient |
#' | `subject` | the title of the memo |
#' | `date` | the date of the memo |
#' | `memorandum` | logical. If `TRUE`, includes MEMORANDUM on top of the memo |
#' | `graphics` | logical. If `TRUE`, allows for graphic logo on memo |
#' | `width` | adjustable with for logo, if `graphics: TRUE` |
#' | `logoposition` | position of logo, if `graphics: TRUE` |
#' | `logo` | file for logo, if `graphics: TRUE` |
#'
#' @inheritParams rmarkdown::pdf_document
#' @param ... Arguments to [`rmarkdown::pdf_document`].
#' @md
#' @export
memo <- function(...){
templ <- system.file("rmarkdown", "templates", "memo", "resources", "template.tex", package = "stevetemplates")
rmarkdown::pdf_document(template = templ,
...)
}
#' @rdname memo
#' @export
templ_memo <- function() {
print(system.file("rmarkdown", "templates", "memo", "resources", "template.tex", package = "stevetemplates"))
}
|
d94ffd09ad608eac3ee0a5cfa2ca2ff083fb6952
|
b819df606b40063a4ab0542d8f830546ab5523c2
|
/run_analysis.R
|
e92d5a4b9be460ea43d6e2981f04b590d9aca59c
|
[] |
no_license
|
datadogmd/human-activity-project
|
3186d45338aaae4743c6b2af4fe0e690fa54a095
|
05b5f2858f2f44848f87116cf102948e28f1d881
|
refs/heads/master
| 2016-09-03T07:13:47.083308
| 2015-08-24T16:38:35
| 2015-08-24T16:38:35
| 41,204,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,273
|
r
|
run_analysis.R
|
# Clear all variables.
rm(list = ls())
# Load libraries.
library("dplyr")
# run_analysis.R expects the data to be unzipped in a folder named "data"
# located in the working directory.
datafolder <- "./data/getdata_projectfiles_UCI HAR Dataset/UCI HAR Dataset"
# Read data.
features <- read.table(paste(datafolder, "/features.txt", sep = ""))
activity <- read.table(paste(datafolder, "/activity_labels.txt", sep = ""))
test.x <- read.table(paste(datafolder, "/test/X_test.txt", sep = ""))
test.y <- read.table(paste(datafolder, "/test/Y_test.txt", sep = ""))
test.s <- read.table(paste(datafolder, "/test/subject_test.txt", sep = ""))
train.x <- read.table(paste(datafolder, "/train/X_train.txt", sep = ""))
train.y <- read.table(paste(datafolder, "/train/Y_train.txt", sep = ""))
train.s <- read.table(paste(datafolder, "/train/subject_train.txt", sep = ""))
# Join train and test datasets using rbind().
data.s <- rbind(train.s, test.s)
data.x <- rbind(train.x, test.x)
data.y <- rbind(train.y, test.y)
# Assign column names.
colnames(data.s) <- "subject"
colnames(data.x) <- features[, 2]
colnames(data.y) <- "code"
colnames(activity) <- c("code", "activity")
# Assign activity name to activity in y data using merge(). The merged data has to be re-ordered
# by index to retrieve the correct order.
data.y$index <- row(data.y)
data.y.names <- merge(data.y, activity, by.x = "code")
# Join the x and y data using cbind(). Rather than create an ordered copy of the merged data frame,
# embed the order() operation directly in cbind.
data.df <- cbind(data.s, data.y.names[order(data.y.names$index),], data.x)
# Select columns that have mean or std in the column name.
mean.data.df = subset(data.df, select = c(subject, activity, grep("mean",tolower(colnames(data.df))), grep("std", tolower(colnames(data.df)))))
# Output data to text file.
write.table(mean.data.df, file = "./meandata.txt", row.names = FALSE)
# Calculate variable mean by subject and activity.
meansummary <- mean.data.df %>%
group_by(subject, activity) %>%
summarise_each(funs(mean))
# Output summary data to text file.
write.table(meansummary, file = "./meansummary.txt", row.names = FALSE)
|
50fda27fe550ca97bec692d117308f55dc1b82b1
|
fd9f793260f6d9f61a90cec7fc1b40e30cbd4ad2
|
/ch02/03.확률과 확률통계/190620(2) - 이산확률-이항분포,정규분포예제.R
|
862e62fafb7008486ac1c703a2af025b2cc4f85d
|
[] |
no_license
|
kwonhyoseong/R_Statistics
|
1e8f574aebe7495893a6b3f218cbf4ad91ee7d3e
|
264d1847de5132092ce7f97bb134917376ab907e
|
refs/heads/master
| 2022-02-19T07:09:39.132115
| 2019-06-28T08:33:11
| 2019-06-28T08:33:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,270
|
r
|
190620(2) - 이산확률-이항분포,정규분포예제.R
|
# 3. 확률과 확률 분포
install.packages('prob')
library(prob)
# 코인을 던졌을때 나오는 패키지
tosscoin(1)
tosscoin(2)
# 확률을 알려주는 확률코드
tosscoin(2, makespace = T)
# 주사위가 나올 경우의 수
rolldie(1)
# 한 주머니 안에 가능한 경우의 수가 나오는 조합만을 알려주는 코드
urnsamples(1:3, size = 2)
# 복원추출 빼고 넣고 를 반복해서 나올 수 잇는 경우의수
urnsamples(1:3, size = 2,replace = T)
# 5 C 2
urnsamples(c(rep('R',3), rep('B',2)),size=2)
# 3-2, 확률변수의 평균과 기대값
x <- c(0, 1, 2)
px <- c(1/4, 2/4, 1/4)
EX <- sum( x * px )
EX
# x값하고의 확률을 더하는것 (기대값)
x2 <- x^2
x2
EX2 <- sum(x2 * px); EX2 # x 제곱의 기댓값
VARX <- EX2 - EX^2 ; VARX
# 확률 분포
# 3-3 R을 이용한 이항분포 계산
n <- 6
p <- 1/3
x <- 0:n
dbinom(2, size = n, prob = p)
dbinom(4, size = n, prob = p)
# 1~6 일 때의 값을 구한다.
px <- dbinom(x, size = n, prob = p) ; px
plot(x, px, type = 's',xlab = '성공회수', ylab='확률(P[X=x])',
main='B(6,1/3)')
# ggplot으로 만들기
df <- data.frame(x,px)
df
ggplot(df, aes(x=x, y=px)) + geom_bar(stat='identity')
#
plot(x, px, type = 's',xlab = '성공회수', ylab='확률(P[X=x])',
main='B(6,1/3)', lwd=10, col = 'red')
#ggplot으로 만들기
df <- data.frame(x,px)
df
ggplot(df, aes(x=x, y=px,fill=x)) + geom_line(stat='identity')
# nomal 디스크립션을 이렇게 수식으로 써볼수 있다.
pbinom(2, n, p)
pbinom(4, n, p)
pbinom(4, n, p) -pbinom(2, n, p)
dbinom(3, n, p) +dbinom(4, n, p)
#q바이넘
qbinom(0.1, n, p)
# 누적확률의 0.1이 됐을때의 확룰
qbinom(0.5, n, p)
#랜덤함수
set.seed(1234) ; rbinom(10, n, p)
#예제 3-4, R의 분포함수를 이용한 기댓값과 분산
n <- 6
p <- 1/3
x <- 0:n
px <- dbinom(x, size = n, prob = p)
(ex <- sum(x * px)) # ( 결과값까지 나온다.)
ex2 <- sum(x^2 *px)
(varx <- ex2 - ex^2)
n * p # 이항 분포의 기댓값 : np
n * p * (1-p) #
options(digits=3)
mu <- 170
sigma <- 6
ll <- mu - 3*sigma
ul <- mu + 3*sigma
x <- seq(ll, ul, by=0.01)
nd <- dnorm(x, mean= mu, sd= sigma)
plot(x, nd, type='1', xlab='x', ylab='P(X=x)', lwd=2, col='red')
##############
|
84a72d78bfa520ff8d777faaa7fd586e796da241
|
e9308a298ecc021917931f0e86d2788d2ab4ccc1
|
/man/summary.Rd
|
f0dda78d3ef062814e8e4af9d50ef76f3e9645fa
|
[] |
no_license
|
jcfaria/ScottKnott
|
51f9c2d77d64793f16ec45673ffe0d322b3accdd
|
85f053e0f848d49735418e09948db8970cd82e65
|
refs/heads/master
| 2021-06-03T14:18:46.738234
| 2020-10-15T23:55:15
| 2020-10-15T23:55:15
| 4,248,644
| 1
| 3
| null | 2020-10-03T01:29:57
| 2012-05-07T10:56:07
|
R
|
ISO-8859-1
|
R
| false
| false
| 1,040
|
rd
|
summary.Rd
|
\name{summary}
\alias{summary.SK}
\title{
Summary Method for SK Objects
}
\description{
Returns (and prints) a summary list for \code{SK} objects.
}
\usage{
\method{summary}{SK}(object,
\dots)
}
\arguments{
\item{object}{A given object of the class \code{SK}.}
\item{\dots}{Potential further arguments (required by generic).}
}
\author{
José Cláudio Faria (\email{joseclaudio.faria@gmail.com})\cr
Enio Jelihovschi (\email{eniojelihovs@gmail.com})\cr
Ivan Bezerra Allaman (\email{ivanalaman@gmail.com})
}
\references{
Chambers, J.M. and Hastie, T.J. (1992) \emph{Statistical Models in S}.
Wadsworth and Brooks/Cole.
}
\seealso{
\code{\link{SK}}
}
\examples{
##
## Examples: Completely Randomized Design (CRD)
## More details: demo(package='SK')
##
data(CRD2)
## From: formula
sk1 <- with(CRD2,
SK(y ~ x,
data=dfm,
which='x',
id.trim=5))
summary(sk1)
}
\keyword{package}
|
009c4a8af5e7644aa933d8012b77ba6180180821
|
2b285112c5a30b07803898a3d1933344c6d03546
|
/eda_ts.R
|
a5f219384c47934731472dce9390357cf7ff9e7b
|
[] |
no_license
|
wikimedia-research/Discovery-WDQS-Usage-Explore
|
48cc65f8bf477c3982d589961322dee07640dbbf
|
6a3b53d055646d394e34f71c74ca87eab4af2fa8
|
refs/heads/master
| 2020-12-06T17:05:28.628222
| 2016-10-04T20:52:59
| 2016-10-04T20:52:59
| 67,155,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,070
|
r
|
eda_ts.R
|
webrequest_by_country <- readr::read_rds("data/webrequest_by_country.rds")
user_by_country <- readr::read_rds("data/user_by_country.rds")
user_by_os <- readr::read_rds("data/user_by_os.rds")
user_by_browser <- readr::read_rds("data/user_by_browser.rds")
user_by_device <- readr::read_rds("data/user_by_device.rds")
user_by_agent_type <- readr::read_rds("data/user_by_agent_type.rds")
webrequest_by_referer_class <- readr::read_rds("data/webrequest_by_referer_class.rds")
md_query_per_user <- readr::read_rds("data/md_query_per_user.rds")
md_1byte_size <- readr::read_rds("data/md_1byte_size.rds")
library(dplyr)
library(ggplot2)
library(magrittr)
import::from(ggthemes, theme_tufte)
theme_set(theme_tufte(base_family = "Gill Sans", base_size = 18))
webrequest_by_country$n_user_query <- as.numeric(webrequest_by_country$n_user_query)
webrequest_by_country$dt %<>% lubridate::ymd()
user_by_country$n_spider <- as.numeric(user_by_country$n_spider)
user_by_country$dt %<>% lubridate::ymd()
temp <- webrequest_by_country %>%
mutate(n_user_query = ifelse(is.na(n_user_query), 0, n_user_query)) %>%
group_by(dt) %>%
summarise(all_query=sum(n_query),user_query=sum(n_user_query)) %>%
mutate(spider_query=all_query-user_query) %>%
tidyr::gather("type", "n", 2:4)
{ggplot(temp, aes(x=dt, y=n, colour=type)) +
geom_line() +
geom_segment(data = filter(temp, type=="all_query", dt == temp$dt[which.max(n)]),
aes(y = 0, yend=n, x = dt, xend = dt),
color = "black", linetype = "dashed") +
geom_text(data = filter(temp, type=="all_query", dt == temp$dt[which.max(n)]),
aes(label = paste0(dt,", ",n," total queries")),
hjust = "right", vjust = "top", color = "black", nudge_x = -0.1, nudge_y = -0.05, size=7) +
scale_x_date(name = "Date") +
scale_y_continuous(labels=polloi::compress, name = "Number of Queries") +
scale_color_discrete(name="User Type",
breaks=c("all_query", "spider_query", "user_query"),
labels=c("All", "Known Automata", "User")) +
ggtitle("Number of WDQS Queries", subtitle="July 1st - August 29th")} %>%
ggsave("all_query_ts.png", ., path = "figures", width = 10, height = 10, units = "in", dpi = 300)
{user_by_country %>%
mutate(n_spider = ifelse(is.na(n_spider), 0, n_spider)) %>%
group_by(dt) %>%
summarise(all_user=sum(n_user),spider=sum(n_spider)) %>%
mutate(user=all_user-spider) %>%
tidyr::gather("agent_type", "n", 2:4) %>%
ggplot(aes(x=dt, y=n, colour=agent_type)) +
geom_line() +
scale_x_date(name = "Date") +
scale_y_continuous(name = "Number of Users (IP+UA)") +
scale_color_discrete(name="User Type",
breaks=c("all_user", "spider", "user"),
labels=c("All", "Known Automata", "User")) +
ggtitle("Number of WDQS Users", subtitle="July 1st - August 29th")} %>%
ggsave("all_user_ts.png", ., path = "figures", width = 10, height = 10, units = "in", dpi = 300)
# median no. query by user ts
md_query_per_user$dt %<>% lubridate::ymd()
{ggplot(md_query_per_user, aes(x=dt, y=median_n_query_per_user)) +
geom_line() +
scale_x_date(name = "Date") +
scale_y_continuous(name = "Median Number of Queries per User (IP+UA)") +
ggtitle("Median Number of Queries per User", subtitle="July 1st - August 29th")} %>%
ggsave("md_query_per_user_ts.png", ., path = "figures", width = 10, height = 10, units = "in", dpi = 300)
# median time first byte ts
md_1byte_size$dt %<>% lubridate::ymd()
{ggplot(md_1byte_size, aes(x=dt, y=median_time_firstbyte)) +
geom_line() +
scale_x_date(name = "Date") +
scale_y_continuous(name = "Median Time to First Byte (Second)") +
ggtitle("Median Time to First Byte", subtitle="July 1st - August 29th")} %>%
ggsave("median_time_firstbyte_ts.png", ., path = "figures", width = 10, height = 10, units = "in", dpi = 300)
# median response size ts
{ggplot(md_1byte_size, aes(x=dt, y=median_response_size)) +
geom_line() +
scale_x_date(name = "Date") +
scale_y_continuous(name = "Median Response Size") +
ggtitle("Median Response Size", subtitle="July 1st - August 29th")} %>%
ggsave("median_response_size_ts.png", ., path = "figures", width = 10, height = 10, units = "in", dpi = 300)
# top country ts
top_query_country <- webrequest_by_country %>%
group_by(country) %>%
summarise(n_query=sum(n_query)) %>%
arrange(desc(n_query)) %>%
top_n(10, n_query) %>%
{.$country}
temp <- webrequest_by_country %>%
filter(country %in% top_query_country) %>%
group_by(dt,country) %>%
summarise(all_query=sum(n_query))
query_country_ts_p1 <- ggplot(temp, aes(x=dt, y=all_query, colour=country)) +
geom_line() +
geom_segment(data = filter(temp, country==temp$country[which.max(temp$all_query)], dt == temp$dt[which.max(temp$all_query)]),
aes(y = 0, yend=all_query, x = dt, xend = dt),
color = "black", linetype = "dashed") +
geom_text(data = filter(temp, country==temp$country[which.max(temp$all_query)], dt == temp$dt[which.max(temp$all_query)]),
aes(label = paste0(dt,", ",all_query," queries in ", country)),
hjust = "right", vjust = "top", color = "black", nudge_x = -0.1, nudge_y = -0.05) +
scale_x_date(name = "Date") +
scale_y_continuous(breaks=c(1,2e5,4e5,6e5),labels=polloi::compress, name = "Number of Queries") +
ggtitle("Top 10 Countries by Number of WDQS Queries", subtitle="July 1st - August 29th")
query_country_ts_p2 <- ggplot(temp, aes(x=dt, y=all_query, colour=country)) +
#geom_smooth(se = FALSE, method = "gam", formula = y ~ s(x, k = 9)) +
geom_smooth(se = FALSE, method = "loess", span = 0.3) +
scale_x_date(name = "Date") +
scale_y_log10(labels=polloi::compress, name = "Number of Queries") +
ggtitle("Top 10 Countries by Number of WDQS Queries", subtitle="July 1st - August 29th, Smoothed")
query_country_ts_p <- plot_grid(plotlist = list(query_country_ts_p1, query_country_ts_p2), ncol = 2)
ggsave("query_country_ts.png", query_country_ts_p, path = "figures", width = 15, height = 5, units = "in", dpi = 300)
top_user_country <- user_by_country %>%
group_by(country) %>%
summarise(n_user=sum(n_user)) %>%
arrange(desc(n_user)) %>%
top_n(10, n_user) %>%
{.$country}
temp <- user_by_country %>%
filter(country %in% top_user_country) %>%
group_by(dt,country) %>%
summarise(all_user=sum(n_user))
user_country_ts_p1 <- ggplot(temp, aes(x=dt, y=all_user, colour=country)) +
geom_line() +
geom_segment(data = filter(temp, country==temp$country[which.max(temp$all_user)], dt == temp$dt[which.max(temp$all_user)]),
aes(y = 0, yend=all_user, x = dt, xend = dt),
color = "black", linetype = "dashed") +
geom_text(data = filter(temp, country==temp$country[which.max(temp$all_user)], dt == temp$dt[which.max(temp$all_user)]),
aes(label = paste0(dt,", ",all_user," users in ", country)),
hjust = "right", vjust = "top", color = "black", nudge_x = -0.1, nudge_y = -0.05) +
scale_x_date(name = "Date") +
scale_y_continuous(name = "Number of Users (IP+UA)") +
ggtitle("Top 10 Countries by Number of WDQS Users", subtitle="July 1st - August 29th")
user_country_ts_p2 <- ggplot(temp, aes(x=dt, y=all_user, colour=country)) +
geom_smooth(se = FALSE, method = "gam", formula = y ~ s(x, k = 50)) +
# geom_smooth(se = FALSE, method = "loess", span = 0.3) +
scale_x_date(name = "Date") +
scale_y_continuous(name = "Number of Users (IP+UA)") +
ggtitle("Top 10 Countries by Number of WDQS Users", subtitle="July 1st - August 29th, Smoothed")
user_country_ts_p <- plot_grid(plotlist = list(user_country_ts_p1, user_country_ts_p2), ncol = 2)
ggsave("user_country_ts.png", user_country_ts_p, path = "figures", width = 14, height = 5, units = "in", dpi = 300)
# Exclude US spider 0816-0819
temp <- webrequest_by_country %>%
mutate(n_user_query = ifelse(is.na(n_user_query), 0, n_user_query)) %>%
mutate(n_query = ifelse(webrequest_by_country$country=="United States" & webrequest_by_country$dt %in% seq(as.Date("2016-08-16"), as.Date("2016-08-19"), "day"),
n_user_query, n_query)) %>%
group_by(dt) %>%
summarise(all_query=sum(n_query),user_query=sum(n_user_query)) %>%
mutate(spider_query=all_query-user_query) %>%
tidyr::gather("type", "n", 2:4)
{ggplot(temp, aes(x=dt, y=n, colour=type)) +
geom_line() +
geom_segment(data = filter(temp, type=="all_query", dt == temp$dt[which.max(n)]),
aes(y = 0, yend=n, x = dt, xend = dt),
color = "black", linetype = "dashed") +
geom_text(data = filter(temp, type=="all_query", dt == temp$dt[which.max(n)]),
aes(label = paste0(dt,", ",n," total queries")),
hjust = "right", vjust = "top", color = "black", nudge_x = -0.1, nudge_y = -0.05, size=7) +
scale_x_date(name = "Date") +
scale_y_continuous(breaks=c(1, seq(2e5,6e5,2e5)), labels=polloi::compress, name = "Number of Queries") +
scale_color_discrete(name="User Type",
breaks=c("all_query", "spider_query", "user_query"),
labels=c("All", "Known Automata", "User")) +
ggtitle("Number of WDQS Queries", subtitle="July 1st - August 29th, Excluding US Known Automata from Aug 16-19")} %>%
ggsave("all_query_ecl_us_spider0816_ts.png", ., path = "figures", width = 10, height = 10, units = "in", dpi = 300)
# BFAST on query
library(bfast)
source("seasonal.R")
query_ts <- webrequest_by_country %>%
mutate(n_user_query = ifelse(is.na(n_user_query), 0, n_user_query)) %>%
mutate(n_query = ifelse(webrequest_by_country$country=="United States" & webrequest_by_country$dt %in% seq(as.Date("2016-08-16"), as.Date("2016-08-19"), "day"),
n_user_query, n_query)) %>%
group_by(dt) %>%
summarise(all_query=sum(n_query),user_query=sum(n_user_query)) %>%
{.$all_query} %>%
ts(frequency=7)
bpfit <- bfast(query_ts, h=.25, season="harmonic", max.iter=100)
bpfit
png("figures/adjust_query_decompose.png",width = 10, height = 10, units = "in", res = 300)
# plot(bpfit, type="components", main="BFAST Decomposition: Adjusted Number of Queries")
out <- bpfit$output[[2]]
ft <- cbind(seasonal = out$St, trend = out$Tt, remainder = out$Nt)
tsp(ft) <- tsp(bpfit$Yt)
ft <- list(time.series = ft)
seasonal(ft, out, main = "BFAST Decomposition: Adjusted Number of Queries")
dev.off()
# BFAST on user
user_ts <- user_by_country %>%
mutate(n_spider = ifelse(is.na(n_spider), 0, n_spider)) %>%
group_by(dt) %>%
summarise(all_user=sum(n_user),spider=sum(n_spider)) %>%
{.$all_user} %>%
ts(frequency=7)
bpfit <- bfast(user_ts, h=.25, season="harmonic", max.iter=100)
bpfit
png("figures/user_decompose.png",width = 10, height = 10, units = "in", res = 300)
# plot(bpfit, type="components", main="BFAST Decomposition: The Number of Users")
out <- bpfit$output[[1]]
ft <- cbind(seasonal = out$St, trend = out$Tt, remainder = out$Nt)
tsp(ft) <- tsp(bpfit$Yt)
ft <- list(time.series = ft)
seasonal(ft, out, main = "BFAST Decomposition: The Number of Users")
dev.off()
|
7317527b29b503c0fce4b9eb52f6e19d9764d13c
|
688185e8e8df9b6e3c4a31fc2d43064f460665f1
|
/R/rbind.trackdata.R
|
f4a95b2ff51525513586a81ffc57303b02fe4ded
|
[] |
no_license
|
IPS-LMU/emuR
|
4b084971c56e4fed9032e40999eeeacfeb4896e8
|
eb703f23c8295c76952aa786d149c67a7b2df9b2
|
refs/heads/master
| 2023-06-09T03:51:37.328416
| 2023-05-26T11:17:13
| 2023-05-26T11:17:13
| 21,941,175
| 17
| 22
| null | 2023-05-29T12:35:55
| 2014-07-17T12:32:58
|
R
|
UTF-8
|
R
| false
| false
| 2,289
|
r
|
rbind.trackdata.R
|
##' A method of the generic function rbind for objects of class trackdata
##'
##' Different track data objects from one segment list are bound by combining
##' the $data columns of the track data object by rows. Track data objects
##' are created by \code{\link{get_trackdata}}.
##'
##' All track data objects have to be track data of the same segment list.
##' Thus $index and $ftime values have to be identically for all track data
##' objects. The number of columns of the track data objects must match. Thus
##' a track data object of more than one formant and single columned F0 track
##' data object can not be rbind()ed.
##'
##' @aliases rbind.trackdata rbind
##' @param \dots track data objects
##' @return A track data object with the same $index and $ftime values of the
##' source track data objects and with $data that includes all columns of
##' $data of the source track data objects.
##' @author Jonathan Harrington
##' @seealso \code{\link{rbind}} \code{\link{cbind.trackdata}}
##' \code{\link{trackdata}} \code{\link{get_trackdata}}
##' @keywords methods
##' @examples
##'
##' data(vowlax)
##'
##' #segment list vowlax - first segment only
##' vowlax[1,]
##'
##' #F0 track data object for vowlax - first segment only
##' vowlax.fund[1]
##'
##' #rms track data object for vowlax - first segment only
##' vowlax.rms[1]
##'
##' #now combine both track data objects
##' fund.rms.lax = rbind(vowlax.fund[1:10,], vowlax.rms[1:10,])
##'
##' #the combined track data object
##' #The first ten rows in $data keep vowlax.fund data, the 11th to last row keeps vowlax.rms data
##' fund.rms.lax
##'
##'
##'
##' @export
"rbind.trackdata" <- function (...)
{
mat <- NULL
for (j in list(...)) {
if (is.matrix(j$data))
mat$data <- rbind(mat$data, j$data)
else mat$data <- c(mat$data, j$data)
mat$index <- rbind(mat$index, j$index)
if (!is.null(j$ftime))
mat$ftime <- rbind(mat$ftime, j$ftime)
}
diffinds <- mat$index[, 2] - mat$index[, 1] + 1
right <- cumsum(diffinds)
first.left <- diffinds - 1
left <- right - first.left
mat$index <- cbind(left, right)
if (version$major >= 5) {
oldClass(mat) <- "trackdata"
}
else {
class(mat) <- "trackdata"
}
mat
}
|
3350ed70493c67612cf14f1dc7ae542a86b653d6
|
b080803cad0a97516391eae2744f1c89fd0ea523
|
/examples/process-agg/scripts/plot-time.R
|
248fa7aac7af9a9acc2b2bfa176199db5770eb84
|
[
"MIT"
] |
permissive
|
arosch/duckdb
|
3f26e54c0f0c49535f08dfa496a2680c3806eae9
|
c383e9446d0fe73f1001fc9c323dfc13a903c78c
|
refs/heads/master
| 2020-08-17T00:58:15.913545
| 2020-05-27T13:23:58
| 2020-05-27T13:23:58
| 215,583,242
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,314
|
r
|
plot-time.R
|
library(ggplot2)
library(gridExtra)
library(egg)
# command line arguments
args = commandArgs(trailingOnly=TRUE)
if (length(args) != 2) {
stop("Rscript efficiency-plot.R <data file> <output file>", call.=FALSE)
}
# import csv data
dataFile <- args[1]
df <- read.table(dataFile, header=TRUE, sep=",", numerals="warn.loss")
pplot <- ggplot(data=df, aes(x=tuples, y=time.sec, group=approach, color=approach, shape = approach))
bp <- pplot + geom_point() +
facet_wrap(vars(query), scales="fixed") +
stat_summary(fun.y = mean, geom = "line") +
labs (x="Number of Tuples in million", y="Execution Time") +
theme_bw() +
scale_x_continuous(breaks = c(0, 200000000, 400000000, 600000000), labels = c("0", "200", "400", "600")) +
scale_y_continuous(breaks = c(0, 5, 10, 15), labels = c("0s", "5s", "10s", "15s")) +
scale_color_manual(breaks = c("StringAgg", "ArrayAgg", "ShaAgg"), values=c("#619CFF", "#F8766D", "#00BA38")) +
scale_shape_manual(breaks = c("StringAgg", "ArrayAgg", "ShaAgg"), values = c(16, 17, 15)) +
coord_cartesian(xlim = c(100000, 600000000), ylim = c(0, 15)) +
theme(axis.title.x=element_blank(), strip.background =element_rect(fill="white"), legend.position = "none")
sp <- pplot + geom_point() +
facet_wrap(vars(query), scales="fixed") +
stat_summary(fun.y = mean, geom = "line") +
labs (x="Number of Tuples in million", y="Execution Time") +
theme_bw() +
scale_x_continuous(breaks = c(0, 25000000, 50000000, 75000000, 100000000), labels = c("0", "25", "50", "75", "100")) +
scale_y_continuous(breaks = c(0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2), labels = c("0s", "0.2s", "0.4s", "0.6s", "0.8s", "1.0s", "1.2s")) +
scale_color_manual(breaks = c("StringAgg", "ArrayAgg", "ShaAgg"), values=c("#619CFF", "#F8766D", "#00BA38")) +
scale_shape_manual(breaks = c("StringAgg", "ArrayAgg", "ShaAgg"), values = c(16, 17, 15)) +
coord_cartesian(ylim = c(0,1.25), xlim = c(0, 100000000)) +
theme(strip.text.x = element_blank(), legend.title = element_blank(), legend.position = "bottom")
p <- ggarrange(bp, sp, widths = c(5,5), heights = c(2, 2))
# save to output file
ofile <- args[2]
ggsave(ofile, width=4.5, height=4.5, dpi=300, p)
|
4f402d3d2d40ece16ac18804869d71aa045ddaca
|
224807bcc64ee023d59db89da1ff436a2aa44ba8
|
/tests/testthat/test-cell_info.R
|
f6fba7688e98dfbe0ef17f9f278b996d07777954
|
[] |
no_license
|
sdcTools/sdcTable
|
cf963624c44510e8c77c6b4ba83fe064a84c168c
|
ade7328a1c73b3fa2d7f17b725ad193389d0bde6
|
refs/heads/master
| 2023-09-03T14:59:24.853963
| 2023-08-16T06:27:56
| 2023-08-16T06:27:56
| 61,604,088
| 7
| 5
| null | 2019-10-04T10:14:39
| 2016-06-21T05:14:13
|
R
|
UTF-8
|
R
| false
| false
| 1,268
|
r
|
test-cell_info.R
|
context("test cell_info()")
test_that("cellInfo works", {
skip_on_cran()
sdc <- sdc_testproblem(with_supps = TRUE)
expect_is(sdc, "sdcProblem")
expect_equal(sum(sdc@problemInstance@sdcStatus == "u"), 1)
# check correct input
expect_error(cell_info(p))
expect_error(cell_info(p, 1))
# vector input
specs_vec <- c(region = "D", gender = "male")
res <- cell_info(sdc, specs = specs_vec)
expect_identical(nrow(res), 1L)
expect_identical(res$id, 14L)
expect_identical(res$strID, "0401")
expect_identical(res$region, "D")
expect_identical(res$gender, "male")
expect_identical(res$freq, 11)
expect_identical(res$val, 366)
expect_identical(res$sdcStatus, "s")
# data.frame input
specs_df <- data.frame(
region = c("A", "D", "A"),
gender = c("male", "female", "female")
)
res <- cell_info(sdc, specs = specs_df)
expect_identical(nrow(res), 3L)
expect_identical(res$id, as.integer(c(5, 15, 6)))
expect_identical(res$sdcStatus, c("s", "s", "u"))
# protect the table
sdc_safe <- protectTable(sdc, method = "SIMPLEHEURISTIC")
res <- cell_info(sdc_safe, specs = specs_df)
expect_identical(nrow(res), 3L)
expect_identical(res$id, as.integer(c(5, 15, 6)))
expect_identical(res$sdcStatus, c("x", "s", "u"))
})
|
d9e82a7bb977ebff8348c2b32d5a1e8f8f16f623
|
4128746ae0e5bdc33e21844ca4b1c617231aae55
|
/lib/states/wa/seattle.R
|
4a3f2d11c8995762a53cffd3ac27a3c8c19aa39e
|
[] |
no_license
|
stanford-policylab/opp
|
b71f80888110035318d860114c7572fe00a4664f
|
fdb3b9f8ea9fd85a5b0fb9dcde19021bcf2c8231
|
refs/heads/master
| 2023-05-26T03:04:16.760645
| 2023-05-17T20:23:46
| 2023-05-17T20:23:46
| 103,974,255
| 85
| 53
| null | 2023-05-17T20:27:01
| 2017-09-18T18:09:02
|
HTML
|
UTF-8
|
R
| false
| false
| 3,580
|
r
|
seattle.R
|
source("common.R")
# VALIDATION: [YELLOW] The Seattle PD doesn't appear to put out Annual Reports
# or statistics on all traffic stops, but the numbers seem reasonable given the
# population. Unfortunately, a lot of relevant demographic data appears to be
# missing.
# NOTE: The Seattle PD has a smaller dataset focused only on Terry stops here:
# https://www.seattle.gov/police/information-and-data/terry-stops
load_raw <- function(raw_data_dir, n_max) {
d <- load_years(raw_data_dir, n_max)
colnames(d$data) <- make_ergonomic(colnames(d$data))
types <- load_single_file(raw_data_dir, "types.csv")
tr_type <- translator_from_tbl(types$data, "type_code", "translation")
mutate(
d$data,
type_description = tr_type[type]
) %>%
bundle_raw(c(d$loading_problems, types$loading_problems))
}
clean <- function(d, helpers) {
vehicle <- helpers$load_json("vehicle.json")
ped_pattern <- paste(c(
"DISTURBANCE",
"FOOT",
"HARAS",
"MISCHIEF",
"NARCOTIC",
"NOISE",
"PROPERTY",
"PROSTITUT",
"SEX",
"SHOTS",
"WELFARE"
), collapse = "|")
# NOTE: pri in original dataset means 'priority'
d$data %>%
# NOTE: when rin is null, almost every column is null, so filter out
filter(
!is.na(rin)
) %>%
helpers$add_lat_lng(
"address"
) %>%
helpers$add_shapefiles_data(
) %>%
rename(
location = address,
violation = mir_description,
precinct = first_prec,
disposition = disposition_description
) %>%
separate_cols(
poss_race_sex = c("subject_race", "subject_sex"),
sep = 1
) %>%
separate_cols(
date_time = c("date", "time"),
officer_no_1 = c("officer_id", "officer_name"),
officer_name = c("officer_last_name", "officer_first_name"),
sep = " "
) %>%
mutate(
type = if_else(
str_detect(violation, "PEDESTRIAN")
| (str_detect(violation, "PURSUIT")
& !is.na(type_description)
& str_detect(type_description, ped_pattern))
| (str_detect(violation, "MISCELLANEOUS")
& !is.na(type_description)
& str_detect(type_description, ped_pattern)),
"pedestrian",
"vehicular"
),
date = parse_date(date, "%Y/%m/%d"),
officer_last_name = str_replace_all(officer_last_name, ",", ""),
officer_first_name = str_replace_all(officer_first_name, ",", ""),
officer_first_name = str_trim(officer_first_name),
# officer_no is not unique, so combine with last name to get hash
officer_id_hash = simple_map(
str_c(officer_id, officer_last_name),
simple_hash
),
subject_race = tr_race[subject_race],
subject_sex = tr_sex[subject_sex],
subject_dob = parse_date(subj_dob, "%Y%m%d"),
arrest_made = str_sub(disposition, 1, 1) == "A",
# NOTE: includes criminal and non-criminal citations
citation_issued = str_detect(disposition, "CITATION"),
warning_issued = str_detect(disposition, "WARN"),
outcome = first_of(
arrest = arrest_made,
citation = citation_issued,
warning = warning_issued
),
v = coalesce(veh, vehcile),
vehicle_color = str_extract(v, str_c(vehicle$colors, collapse = "|")),
vehicle_make = str_extract(v, str_c(vehicle$makes, collapse = "|")),
vehicle_model = str_extract(v, str_c(vehicle$models, collapse = "|")),
vehicle_year = str_extract(v, "\\d{4}"),
vehicle_registration_state =
str_extract(v, str_c(valid_states, collapse = "|"))
) %>%
rename(
raw_type_description = type_description,
raw_vehicle_description = v
) %>%
standardize(d$metadata)
}
|
cb4b2124d8719d3a2886c702bcc567ea9a9a906b
|
c24949a902e18b051d7e4ed6cf348e06a1ee138a
|
/rmagi/tests/testthat/test_z02_HMC-noODE-shortened.R
|
572e1c46ee50c8fb1bd50ac5d67443459cbe4dff
|
[] |
no_license
|
yyang97/magi
|
2025786815a887b71ac551557fcf5770117caaaa
|
9a6585bb9623782a22a4fc7aba36a788fdae7655
|
refs/heads/master
| 2023-03-31T08:03:16.407736
| 2021-04-10T00:51:02
| 2021-04-10T00:51:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,436
|
r
|
test_z02_HMC-noODE-shortened.R
|
testthat::context("test run HMC-noODE")
### Required variables
### - fn.sim with nobs rows (noisy V and R in cols 1 & 2, using sigma = 0.1)
### - VRtrue with 401 rows (V and R true)
library(magi)
VRtrue <- read.csv(system.file("testdata/FN.csv", package="magi"))
phitrue <- list(
compact1 = c(2.618, 6.381, 0.152, 9.636),
rbf = c(0.838, 0.307, 0.202, 0.653),
matern = c(2.04, 1.313, 0.793, 3.101),
periodicMatern = c(2.04, 1.313, 9, 0.793, 3.101, 9)
)
nobs <- 41
set.seed(Sys.time())
kerneltype <- sample(c("compact1","rbf","matern"),1)
pram.true <- list(abc=c(0.2, 0.2, 3),
phi=phitrue[[kerneltype]])
#noise level
noise <- 0.01
pram.true$sigma <- noise
fn.true <- VRtrue
fn.true$time <- seq(0,20,0.05)
fn.sim <- fn.true
set.seed(123)
fn.sim[,1:2] <- fn.sim[,1:2]+rnorm(length(unlist(fn.sim[,1:2])), sd=noise)
fn.sim <- fn.sim[seq(1,nrow(fn.sim), length=nobs),]
tvec.nobs <- fn.sim$time
foo <- outer(tvec.nobs, t(tvec.nobs),'-')[,1,]
r <- abs(foo)
r2 <- r^2
signr <- -sign(foo)
n.iter <- 50 # number of HMC iterations
phisig <- matrix(NA,n.iter,length(phitrue[[kerneltype]])+1) # phi and sigma
fn <- function(par) -phisigllikC( par, data.matrix(fn.sim[!is.nan(fn.sim[,1]),1:2]), r, kerneltype)$value
gr <- function(par) -as.vector(phisigllikC( par, data.matrix(fn.sim[!is.nan(fn.sim[,1]),1:2]), r, kerneltype)$grad)
marlikmap <- optim(rep(1,5), fn, gr, method="L-BFGS-B", lower = 0.0001)
marlikmap$par
c(pram.true$phi, pram.true$sigma)
-marlikmap$value
loglikAtTruth <- phisigllikC( c(pram.true$phi, pram.true$sigma), data.matrix(fn.sim[!is.nan(fn.sim[,1]),1:2]), r, kerneltype)$value
test_that("maximum likelihood should be higher than value at true parameter",{
expect_gt(-marlikmap$value, loglikAtTruth)
})
phisig[1,] <- marlikmap$par
##### Reference values (truth)
lower_b <- c( 0, 0, 0, 0, 0 )
upper_b <- c( Inf, Inf, Inf, Inf, Inf)
full_llik <- c()
full_llik[1] <- phisigllikC( phisig[1,], data.matrix(fn.sim[!is.nan(fn.sim[,1]),1:2]), r, kerneltype)$value
accepts <- 0
paccepts <- 0
yobs <- data.matrix(fn.sim[,1:2])
if(kerneltype=="matern"){
stepLow <- 0.01
}else if(kerneltype=="rbf"){
stepLow <- 0.01
}else if(kerneltype=="compact1"){
stepLow <- 0.01
}
for (t in 2:n.iter) {
foo <- phisigSample(data.matrix(fn.sim[,1:2]), r, phisig[t-1,],
rep(runif(1,stepLow,2*stepLow),5), 200, T, kerneltype)
phisig[t,] <- foo$final
accepts <- accepts + foo$acc
full_llik[t] <- foo$lpr
}
burnin <- n.iter/2
## Best sampled
id.best <- which.max(full_llik)
startphi <- apply(phisig[-(1:burnin),1:4], 2, mean)
startsigma <- mean(phisig[-(1:burnin),5])
sigLow <- quantile(phisig[-(1:burnin),5], 0.001)
sigHigh <- quantile(phisig[-(1:burnin),5], 0.999)
gpfit <- list(sigma=phisig[,5],
rphi=phisig[,3:4],
vphi=phisig[,1:2],
lp__=full_llik,
lglik=full_llik)
plotx <- seq(0,20,0.1)
gpfit$vtrue <- getMeanCurve(fn.sim$time, fn.sim$Vtrue, plotx,
gpfit$vphi, sigma.mat=gpfit$sigma, kerneltype)
gpfit$rtrue <- getMeanCurve(fn.sim$time, fn.sim$Rtrue, plotx,
gpfit$rphi, sigma.mat=gpfit$sigma, kerneltype)
post.noODE <- magi:::summary.post.noODE(paste0("C-GPfit-",noise,"-",kerneltype,".pdf"),
fn.true, fn.sim, gpfit, pram.true, plotx)
startX <- c(post.noODE$init.epost$vtrue, post.noODE$init.epost$rtrue)
logliknoODEOutEpost <- logliknoODE( cbind(post.noODE$init.epost$vtrue, post.noODE$init.epost$rtrue),
calCov(post.noODE$init.epost$vphi, r, signr, kerneltype=kerneltype),
calCov(post.noODE$init.epost$rphi, r, signr, kerneltype=kerneltype),
post.noODE$init.epost$sigma,
data.matrix(fn.sim[!is.nan(fn.sim[,1]),1:2]))
logliknoODEOutTrue <- logliknoODE( data.matrix(fn.true[seq(1,nrow(fn.true), length=nobs), 1:2]),
calCov(pram.true$phi[1:2], r, signr, kerneltype=kerneltype),
calCov(pram.true$phi[3:4], r, signr, kerneltype=kerneltype),
pram.true$sigma,
data.matrix(fn.sim[!is.nan(fn.sim[,1]),1:2]))
# FIXME the rbf seems to have very full small likelihood
# but variance of rbf kernel is definitely correct
logliknoODEOutEpost
logliknoODEOutTrue
|
ff3bc18db903e822068394edfe0668a4280483f1
|
72504818c6707235d7902cda47929c4d3a362f1e
|
/man/box_fresh_auth.Rd
|
bfa5d7216c23f8b12698f17c9f22f6a7d1b69c93
|
[
"MIT"
] |
permissive
|
ijlyttle/boxr
|
4a933f67a0a885d646d07c858c61f1f4844b5c40
|
bc2f867855a1fbbbe0b4e75b59250ba197053b9a
|
refs/heads/master
| 2021-01-12T10:55:15.304085
| 2019-03-29T00:05:38
| 2019-03-29T00:05:38
| 72,751,933
| 0
| 0
| null | 2016-11-03T14:11:35
| 2016-11-03T14:11:34
| null |
UTF-8
|
R
| false
| true
| 685
|
rd
|
box_fresh_auth.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/boxr_auth.R
\name{box_fresh_auth}
\alias{box_fresh_auth}
\title{Obtain a fresh Box token}
\usage{
box_fresh_auth(cache = "~/.boxr-oauth", ...)
}
\arguments{
\item{cache}{Passed to \code{cache} in \code{\link[=httr]{httr()}}.}
\item{...}{Passed to \code{\link[=box_auth]{box_auth()}}}
}
\description{
Very simply, deletes the old token file before trying to re-authorise. This
is often the solution to authorisation problems raised by users!
}
\seealso{
\code{\link[=box_auth]{box_auth()}} for the usual method of authorisation, and
\code{\link[=box_auth_on_attach]{box_auth_on_attach()}} for a lazy one.
}
|
a99324e51fd51779519093771d872c1b652c1f39
|
a99d389911551bf880a692d1142809bc8ef5d7d8
|
/R/SetSortOrder.R
|
c5860e11f7fff62dbaf183e655a9f5aa19256102
|
[
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
jfisher-usgs/RSurvey
|
97357051cee074e0ad75163225a924adebf6925d
|
52b9597a512eb672ded04cd665a001d83096737e
|
refs/heads/master
| 2020-12-22T21:11:59.449895
| 2020-06-30T18:02:43
| 2020-06-30T18:02:43
| 1,533,926
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,183
|
r
|
SetSortOrder.R
|
#' GUI: Sort Order
#'
#' A graphical user interface (\acronym{GUI}) for specifying the variable used to sort the data set.
#'
#' @param col.ids character.
#' Vector of variable names
#' @param sort.on integer.
#' Index for the variable used to sort the data set.
#' @param parent tkwin.
#' \acronym{GUI} parent window
#'
#' @return Returns an object of integer class that specifies the index of the variable used to sort the data set.
#' Attributes for this object include:
#' \code{decreasing}, a logical value indicating if the sort order is increasing or decreasing; and
#' \code{na.last}, a logical value for controlling the treatment of \code{NA}s during sorting.
#' If true, missing values in the data are put last; otherwise, they are put first;
#' if \code{NA}, they are removed.
#'
#' @author J.C. Fisher, U.S. Geological Survey, Idaho Water Science Center
#'
#' @seealso \code{\link{order}}
#'
#' @keywords misc
#'
#' @import tcltk
#'
#' @export
#'
#' @examples
#' \dontrun{
#' col.ids <- c("Variable1", "Variable2", "Variable3")
#' sort.on <- 2
#' attr(sort.on, "decreasing") <- TRUE
#' attr(sort.on, "na.last") <- FALSE
#' SetSortOrder(col.ids, sort.on)
#' }
#'
SetSortOrder <- function(col.ids, sort.on=NULL, parent=NULL) {
# save sort order
SaveSortOrder <- function() {
col.id <- as.character(tclvalue(col.id.var))
decreasing <- as.logical(as.integer(tclvalue(decreasing.var)))
na.last <- as.integer(tclvalue(na.last.var))
na.last <- if (na.last %in% 0:1) as.logical(na.last) else NA
if (col.id == "") {
sort.on <- NULL
} else {
sort.on <- which(col.ids == col.id)
attr(sort.on, "decreasing") <- decreasing
attr(sort.on, "na.last") <- na.last
}
rtn <<- sort.on
tclvalue(tt.done.var) <- 1
}
# initialize return value
rtn <- sort.on
# assign variables linked to tk widgets
col.id.var <- tclVar()
decreasing.var <- tclVar(0)
na.last.var <- tclVar(1)
tt.done.var <- tclVar(0)
# set variables
idx <- 0L
if (is.integer(sort.on) && idx %in% seq_along(col.ids)) {
idx <- as.integer(sort.on)
decreasing <- attr(sort.on, "decreasing")
if (!is.null(decreasing)) tclvalue(decreasing.var) <- as.logical(decreasing)
na.last <- attr(sort.on, "na.last")
if (!is.null(na.last)) {
if (is.logical(na.last)) tclvalue(na.last.var) <- as.integer(na.last)
if (is.na(na.last)) tclvalue(na.last.var) <- 2
}
}
# open gui
tclServiceMode(FALSE)
tt <- tktoplevel()
if (!is.null(parent)) {
tkwm.transient(tt, parent)
geo <- unlist(strsplit(as.character(tkwm.geometry(parent)), "\\+"))
geo <- as.integer(geo[2:3]) + 25
tkwm.geometry(tt, sprintf("+%s+%s", geo[1], geo[2]))
}
tktitle(tt) <- "Sort Order"
tkwm.resizable(tt, 1, 0)
# frame 0
f0 <- tkframe(tt, relief="flat")
f0.but.2 <- ttkbutton(f0, width=12, text="OK", command=SaveSortOrder)
f0.but.3 <- ttkbutton(f0, width=12, text="Cancel",
command=function() tclvalue(tt.done.var) <- 1)
f0.but.4 <- ttkbutton(f0, width=12, text="Help",
command=function() {
print(utils::help("SetSortOrder", package="RSurvey"))
})
tkgrid("x", f0.but.2, f0.but.3, f0.but.4, pady=c(15, 10), padx=c(4, 0))
tkgrid.columnconfigure(f0, 0, weight=1)
tkgrid.configure(f0.but.4, padx=c(4, 10))
tkpack(f0, fill="x", side="bottom", anchor="e")
# frame 1
f1 <- ttkframe(tt, relief="flat")
f1.lab.1.1 <- tklabel(f1, text="Variable to sort on")
vals <- c("", col.ids)
if (length(vals) == 1) vals <- paste0("{", vals, "}")
f1.box.1.2 <- ttkcombobox(f1, state="readonly", textvariable=col.id.var, values=vals)
tcl(f1.box.1.2, "current", idx)
f1.lab.2.2 <- ttklabel(f1, text="Order")
f1.rad.2.3 <- ttkradiobutton(f1, variable=decreasing.var, value=FALSE,
text="increasing", width=10)
f1.rad.3.3 <- ttkradiobutton(f1, variable=decreasing.var, value=TRUE,
text="decreasing", width=10)
f1.lab.2.4 <- ttklabel(f1, text="NAs")
f1.rad.2.5 <- ttkradiobutton(f1, variable=na.last.var, value=1, text="place last")
f1.rad.3.5 <- ttkradiobutton(f1, variable=na.last.var, value=0, text="place first")
f1.rad.4.5 <- ttkradiobutton(f1, variable=na.last.var, value=2, text="remove")
tkgrid(f1.lab.1.1, f1.box.1.2, pady=c(15, 5))
tkgrid("x", f1.lab.2.2, f1.rad.2.3, f1.lab.2.4, f1.rad.2.5, "x")
tkgrid("x", "x", f1.rad.3.3, "x", f1.rad.3.5, "x")
tkgrid("x", "x", "x", "x", f1.rad.4.5, "x")
tkgrid.configure(f1.box.1.2, sticky="ew", columnspan=6)
tkgrid.configure(f1.lab.2.2, padx=c(0, 4))
tkgrid.configure(f1.lab.2.4, padx=c(20, 4))
tkgrid.configure(f1.rad.2.5, f1.rad.3.5, f1.rad.4.5, sticky="w")
tkgrid.columnconfigure(f1, 6, weight=1, minsize=0)
tkpack(f1, fill="x", padx=10)
# bind events
tclServiceMode(TRUE)
tkbind(tt, "<Destroy>", function() tclvalue(tt.done.var) <- 1)
# gui control
tkfocus(tt)
tkgrab(tt)
tkwait.variable(tt.done.var)
tclServiceMode(FALSE)
tkgrab.release(tt)
tkdestroy(tt)
tclServiceMode(TRUE)
return(rtn)
}
|
1fbb23fc73b568ae69aacc6244dbd49f5878dadc
|
8884de5c13cb902326178a5bcb45d5bc3b77bca9
|
/ui.R
|
f8c69b0c57fa81f037dbb1fc7e14c72afe8336ed
|
[] |
no_license
|
gonzalor/Coursera-DataProducts-Shiny
|
ebf91b241ba9dc862a174cac79d0135684b49034
|
4be3a3cb999cbf8da8278a6165cdc10aa586ba9a
|
refs/heads/master
| 2021-01-10T10:53:51.270299
| 2015-09-26T14:39:37
| 2015-09-26T14:39:37
| 43,144,089
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,614
|
r
|
ui.R
|
# Shiny application - User Interface
#
library(shiny)
library(leaflet)
suppressPackageStartupMessages(library(ggplot2))
# Choices for drop-downs
varsXAxis <- c(
"Types" = "types",
"Zones" = "zones"
)
varsYAxis <- c(
"Count" = "count",
"Resolution Days" = "resolution"
)
shinyUI(
navbarPage("Dashboard", id="nav",
tabPanel("Plot",
pageWithSidebar(
# Issues By Type
headerPanel("Resolved Issues in 2014"),
# Configuration
sidebarPanel(
# X Axis Selection
selectInput("xaxis", "X Axis", varsXAxis),
# Y Axis Selection
selectInput("yaxis", "Y Axis", varsYAxis),
# Choose Zones
selectInput("zones", "Zones", c("All Zones"="", structure(zones, names=zones)), multiple=TRUE),
# Choose Types
selectInput("types", "Types", c("All Types"="", structure(types, names=types)), multiple=TRUE),
# Type of Scale
checkboxInput("chkByTypeLogScale", "Logarithmic Scale")
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("issuesByType")
)
)
),
tabPanel("Prediction",
pageWithSidebar(
# Predict Resolution time
headerPanel("Resolution Time in Days Prediction"),
# Configuration
sidebarPanel(
# Choose Zone
selectInput("predZone", "Zone", c("Choose Zone..."="", structure(zones, names=zones)), multiple=FALSE),
# Choose Type
selectInput("predType", "Type", c("Choose Issue Type.."="", structure(types, names=types)), multiple=FALSE)
),
# Show a plot of the generated distribution
mainPanel(
verbatimTextOutput("prediction")
)
)
),
tabPanel("Data Table",
fluidRow(
column(2, selectInput("tblzones", "Zones", c("All Zones"="", structure(zones, names=zones)), multiple=TRUE)),
column(2,selectInput("tbltypes", "Types", c("All Types"="", structure(types, names=types)), multiple=TRUE)),
column(2, numericInput("tblMinDays", "Min Days", min=0, max=100, value=0)),
column(2, numericInput("tblMaxDays", "Max Days", min=0, max=100, value=100))
),
hr(),
DT::dataTableOutput("table")
),
tabPanel("Help",
h2("About the Project"),
p("This application is part of the Course Project for the Coursera Developing Data Products Course (devdataprod-032). It intends to show a simple dashboard for a company that serves drinking water and process sewer liquids."),
h3("The Data"),
p("The sample data was taken form the issue tracker system of the company and anonimized for the purposes of this work. The data.frame has the following structure"),
verbatimTextOutput("datasummary"),
h2("How to use the application"),
p("The application consists of different tabs, each of one demostrates an example application."),
h3("Plot"),
p("This panel allows the user to configure a boxplot chart by selecting the X and Y axis. Also, the data frame can be filtered by type and/or zone. Because for some comparissons there are boxplots near zero, it can be selected a logarithmic scale for de Y axis."),
p("The main purpose of this panel is to play with different configurations and see how the graphic responds."),
h3("Prediction"),
p("It allows to predict the time (in days) expected for the issue resolution based on the type and zone. The prediction is made using a linear regression."),
h3("Data Table"),
p("This tab allows to browse the data frame in detail filtering by type, zone and/or time of resolution."),
h3("Help"),
p("This page.")
)
)
)
|
500a8121f4254335b624af1243862d70376e5e2e
|
6b57ed4964727602b75250c5dbcfa10653cf2ee0
|
/tests/test_contestMD.R
|
0a4ceca20ca7129f114d2e962c6a05d87cc1af28
|
[] |
no_license
|
runehaubo/lmerTestR
|
278b4b1a4f99387cc0216193a5236cae6e6a4f0c
|
35dc5885205d709cdc395b369b08ca2b7273cb78
|
refs/heads/master
| 2021-06-05T16:38:46.427782
| 2020-10-23T06:59:55
| 2020-10-23T06:59:55
| 117,861,877
| 47
| 10
| null | 2020-10-19T13:52:50
| 2018-01-17T16:26:02
|
HTML
|
UTF-8
|
R
| false
| false
| 4,190
|
r
|
test_contestMD.R
|
# test_contestMD.R
library(lmerTest)
# WRE says "using if(requireNamespace("pkgname")) is preferred, if possible."
# even in tests:
assertError <- function(expr, ...)
if(requireNamespace("tools")) tools::assertError(expr, ...) else invisible()
assertWarning <- function(expr, ...)
if(requireNamespace("tools")) tools::assertWarning(expr, ...) else invisible()
# Kenward-Roger only available with pbkrtest and only then validated in R >= 3.3.3
# (faulty results for R < 3.3.3 may be due to unstated dependencies in pbkrtest)
has_pbkrtest <- requireNamespace("pbkrtest", quietly = TRUE) && getRversion() >= "3.3.3"
data("sleepstudy", package="lme4")
####################################
## Tests of contestMD
####################################
fm <- lmer(Reaction ~ Days + I(Days^2) + (1|Subject) + (0+Days|Subject),
sleepstudy)
# Basic tests:
L <- diag(3L)
contestMD(fm, L)
# Tests of ddf arg:
contestMD(fm, L, ddf="Sat")
if(has_pbkrtest)
contestMD(fm, L, ddf="Kenward-Roger")
assertError(contestMD(fm, L, ddf="sat")) # Invalid ddf arg.
# Tests of simple 2-df test:
(ans <- contestMD(fm, L[2:3, ], ddf="Sat"))
stopifnot(nrow(ans) == 1L,
ans$NumDF == 2L)
if(has_pbkrtest) {
(ans <- contestMD(fm, L[2:3, ], ddf="Kenward-Roger"))
stopifnot(nrow(ans) == 1L,
ans$NumDF == 2L)
}
# Tests of simple 1-df test:
(ans <- contestMD(fm, L[3, , drop=FALSE], ddf="Sat"))
stopifnot(nrow(ans) == 1L,
ans$NumDF == 1L)
if(has_pbkrtest) {
(ans <- contestMD(fm, L[3, , drop=FALSE], ddf="Kenward-Roger"))
stopifnot(nrow(ans) == 1L,
ans$NumDF == 1L)
}
# Test of vector input:
(ans <- contestMD(fm, L[3, ], ddf="Sat")) # OK since length(L[3, ]) == length(fixef(fm))
stopifnot(nrow(ans) == 1L,
ans$NumDF == 1L)
assertError(contestMD(fm, c(1, 0))) # L is too short
assertError(contestMD(fm, c(1, 0, 1, 1))) # L is too long
# Test of list input:
assertError(contestMD(fm, list(L[3, , drop=FALSE]), ddf="Sat")) # Need L to be a matrix
# zero-row L's are allowed (if ncol(L) is correct):
ans1 <- contestMD(fm, L[0, , drop=FALSE], ddf="Sat")
stopifnot(nrow(ans1) == 0L)
if(has_pbkrtest) {
ans2 <- contestMD(fm, L[0, , drop=FALSE], ddf="Kenward-Roger")
stopifnot(nrow(ans2) == 0L)
}
# Test wrong ncol(L):
assertError(contestMD(fm, L[2:3, 2:3])) # need ncol(L) == length(fixef(fm))
# row-rank deficient L are allowed:
L <- rbind(c(1, 0, 1),
c(0, 1, 0),
c(1, -1, 1))
ans <- contestMD(fm, L)
stopifnot(nrow(L) == 3L,
qr(L)$rank == 2,
ans$NumDF == 2)
if(has_pbkrtest) {
ans_KR <- contestMD(fm, L, ddf="Kenward-Roger")
stopifnot(ans_KR$NumDF == 2)
}
# Test of 0-length beta
fm1 <- lmer(Reaction ~ 0 + (1|Subject) + (0+Days|Subject),
sleepstudy)
stopifnot(length(fixef(fm1)) == 0L)
L <- numeric(0L)
(ans <- contestMD(fm1, L))
stopifnot(nrow(ans) == 0L)
L <- matrix(numeric(0L), ncol=0L)
(ans <- contestMD(fm1, L))
stopifnot(nrow(ans) == 0L)
## rhs argument:
data("cake", package="lme4")
model <- lmer(angle ~ recipe * temp + (1|recipe:replicate), cake)
(L <- diag(length(fixef(model)))[2:3, ])
(an <- anova(model, type="marginal"))
ct <- contestMD(model, L, rhs = 0)
ct2 <- contestMD(model, L, rhs = c(2, 2))
stopifnot(
isTRUE(all.equal(ct[1, ], an[1, ], check.attributes=FALSE, tolerance=1e-6)),
ct[, "F value"] < ct2[, "F value"]
)
L2 <- rbind(L, L[1, ] + L[2, ]) # rank deficient!
contestMD(model, L2, rhs = c(0, 0, 0)) # no warning
assertWarning(contestMD(model, L2, rhs = c(2, 2, 2))) # warning since L2 is rank def.
if(has_pbkrtest)
assertWarning(contestMD(model, L2, rhs = c(2, 2, 2), ddf="Kenward-Roger"))
fm <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy)
contestMD(fm, L=cbind(0, 1))
contestMD(fm, L=cbind(0, 1), rhs=10)
if(has_pbkrtest) {
contestMD(fm, L=cbind(0, 1), ddf="Kenward-Roger")
contestMD(fm, L=cbind(0, 1), ddf="Kenward-Roger", rhs=10)
}
## Test 'lmerMod' method:
fm <- lme4::lmer(Reaction ~ Days + (Days|Subject), sleepstudy)
contestMD(fm, L=cbind(0, 1))
contestMD(fm, L=cbind(0, 1), rhs=10)
if(has_pbkrtest) {
contestMD(fm, L=cbind(0, 1), ddf="Kenward-Roger")
contestMD(fm, L=cbind(0, 1), ddf="Kenward-Roger", rhs=10)
}
|
f3782c69af8dda01ac34ba23596c67c7b651e1ee
|
9e0f6cb11b63bf93ba817f13750f12c9dd44ed28
|
/fmle_bb.r
|
dbbaecd60e48f0061ec83e1d98490338f7978f54
|
[] |
no_license
|
slarge/fmle_bb
|
192a56be7def4866fc10805e0c712ceceb9483f3
|
2fcd1539f1217bd1f678cbeea1ecfd52f8052e41
|
refs/heads/master
| 2021-05-04T03:11:53.785924
| 2016-10-25T09:26:34
| 2016-10-25T09:26:34
| 71,358,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,706
|
r
|
fmle_bb.r
|
# fmle_bb() {{{
setGeneric('fmle_bb', function(object, start, ...)
standardGeneric('fmle_bb'))
setMethod('fmle_bb',
signature(object='FLModel', start='FLPar'),
function(object, start, method='Nelder-Mead', fixed=list(),
control=list(trace=1), lower=rep(-Inf, dim(params(object))[2]),
upper=rep(Inf, dim(params(object))[2]), ...)
{
values <- as.list(FLCore::iter(start,1))
names(values) <- dimnames(start)$params
fmle_bb(object, values, method, fixed, control, lower, upper, ...)
}
)
# fmle {{{
setMethod("fmle_bb", signature(object="FLSR", start="ANY"),
function(object, start, ...)
{
res <- callNextMethod()
# AR1 models
if('rho' %in% dimnames(params(object))$params)
{
n <- dim(rec(res))[2]
rho <- c(params(res)['rho',])
residuals(res) <- as.numeric(NA)
residuals(res)[,-1] <- (rec(res)[,-1] - rho*rec(res)[,-n] - fitted(res)[,-1] +
rho*fitted(res)[,-n])
}
# lognormal models
else if(object@logerror)
residuals(res) <- log(rec(res)) - log(fitted(res))
return(res)
}
) # }}}
setMethod('fmle_bb',
signature(object='FLModel', start='ANY'),
function(object, start, method='Nelder-Mead', fixed=list(),
control=list(trace=1), lower=rep(-Inf, dim(params(object))[1]),
upper=rep(Inf, dim(params(object))[1]), seq.iter=TRUE, preconvert = FALSE,
inParallel = TRUE, ...)
{
## Figure out what should be pre-processed locally and how to export to BB ##
# rm(list = ls())
# data(ple4)
# ple4SR<-as.FLSR(ple4)
# #### Specifying the stock recruitment relationship and error model
# model(ple4SR)<-bevholt()
# ple4SR <- propagate(ple4SR, iter = 1000)
# object <- ple4SR
# method='Nelder-Mead'
# fixed=list()
# seq.iter=TRUE
# control=list(trace=1)
# library(doParallel)
# library(FLCore)
# #
# TODO Check with FL
args <- list(...)
call <- sys.call(1)
logl <- object@logl
# get parameter names by matching elements in param slot
parnm <- names(formals(logl))[names(formals(logl))%in%
dimnames(object@params)$param]
# get fixed parameter names
fixnm <- names(fixed)
# fixed must match params
if(any(!fixnm %in% parnm)) {
stop("some named arguments in 'fixed' are not arguments to the
supplied log-likelihood")
}
# HACK! clean up fixed list if elements are named vectors
fixed <- lapply(fixed, function(x){ names(x) <- NULL; x})
# create list of input data
# get FLQuant slots' names
datanm <- getSlotNamesClass(object, 'FLArray')
# Include FLQuants contents too
flqs <- getSlotNamesClass(object, 'FLQuants')
for (i in length(flqs)) {
datanm <- c(datanm, names(slot(object, flqs[i])))
}
datanm <- c(datanm, getSlotNamesClass(object, 'numeric'))
# get those in formals of logl
datanm <- datanm[datanm%in%names(formals(logl))]
# limits
if(method %in% c('L-BFGS-B', 'Brent'))
{
if(missing(lower) && !is.null(lower(object)))
# if is(lower, function)
lower <- lower(object)[match(parnm, names(fixed), nomatch=0)==0]
if(missing(upper) && !is.null(upper(object)))
upper <- upper(object)[match(parnm, names(fixed), nomatch=0)==0]
} else
{
lower <- -Inf
upper <- Inf
}
# gr function
if(!is.null(body(object@gr)))
{
gr <- function(par)
{
pars <- as.list(par)
names(pars) <- names(start)
pars[fixnm] <- lapply(fixed, FLCore::iter, it)
return(-1*(do.call(object@gr, args=c(pars, data))))
}
} else
gr <- NULL
# create logl function
loglfoo <- function(par) {
pars <- as.list(par)
names(pars) <- names(start)
pars[fixnm] <- lapply(fixed, FLCore::iter, it)
return(-1*(do.call(logl, args=c(pars, data))))
}
# input data
alldata <- list()
# slots
for(i in datanm[!datanm %in% names(covar(object))]) {
alldata[[i]] <- slot(object, i)
}
if(length(covar(object)) > 0) {
for (i in datanm[datanm%in%names(covar(object))]) {
alldata[[i]] <- covar(object)[[i]]
}
}
# add dimnames if used
dimna <- dimnames(slot(object, datanm[1]))[names(slot(object, datanm[1]))%in%
all.vars(object@model)]
if(length(dimna) > 0)
{
# get them in the right shape
dimdat <- lapply(dimna, function(x)
{
out <- slot(object, datanm[1])
out[] <- as.numeric(x)
return(out)
})
alldata <- c(alldata, dimdat)
}
# iterations
if(seq.iter)
{
iterReps <- dims(object)$iter
# iters in fixed
if(length(fixnm) >= 1)
{
fiter <- unlist(lapply(fixed, length))
if(!all(fiter == 1))
{
fiter <- fiter[fiter > 1]
# all iters in fixed are equal?
if(any(fiter/fiter[1] != 1))
stop("objects in fixed have different number of iters")
# are iter in object 1 and fixiter > 1? use fixiter
if(iterReps == 1 & fiter > 1)
iterReps <- fiter
# are they different and > 1? STOP
else if(fiter > 1 & fiter != iterReps)
stop("different iters in fixed and object")
}
}
} else {
iterReps <- 1
}
# logLik
logLik <- rep(NA, iterReps)
class(logLik) <- 'logLik'
attr(logLik, 'df') <- length(parnm) - length(fixed)
object@logLik <- logLik
# Correct FLPar, fitted and residuals
if(iterReps > dim(object@params)[length(dim(object@params))])
{
params(object) <- FLPar(iter=iterReps, params=dimnames(object@params)$params)
}
fitted(object) <- propagate(fitted(object), iterReps)
residuals(object) <- propagate(residuals(object), iterReps)
# vcov
object@vcov <- array(NA, dim=c(rep(length(parnm)-length(fixed),2), iterReps),
dimnames=list(parnm[!parnm%in%names(fixed)],parnm[!parnm%in%names(fixed)],
iter=1:iterReps))
object@hessian <- object@vcov
# for (it in 1:iterReps) {
# data
# if(seq.iter){
# data <- lapply(alldata, FLCore::iter, it)
# } else {
# data <- alldata
# }
# do preconversion of data objects
# if(preconvert) {
# data <- lapply(data, c)
# }
#
# start values
if(missing(start)) {
# add call to @initial
if(is.function(object@initial)) {
start <- lapply(1:iterReps, function(x) as(do.call(object@initial,
args = lapply(alldata,
FLCore::iter,
x)[names(formals(object@initial))]),
'list'))
# start <- as(do.call(object@initial,
# args = data[names(formals(object@initial))]),
# 'list')
} else {
start <- formals(logl)[names(formals(logl))%in%parnm]
}
} else {
# HACK! clean up fixed list if elements are named vectors
start <- lapply(start, function(x){ names(x) <- NULL; x})
}
# MAKE SURE ThiS STILL WORKS
if(!is.null(fixnm)){
start[fixnm] <- NULL
}
if(any(!names(start) %in% parnm)) {
stop("some named arguments in 'start' are not arguments to the
supplied log-likelihood")
}
## START PARALLEL STUFF ##
unregister <- function() {
env <- foreach:::.foreachGlobals
rm(list=ls(name=env), pos=env)
} # close unregister function
if(inParallel == TRUE) {
#
detectedCores <- parallel::detectCores() - 2
cl <- parallel::makeCluster(detectedCores)
doParallel::registerDoParallel(cores = cl)
#
} # Close inParallel == TRUE
#
# Register a sequential backend
if(inParallel == FALSE) {
foreach::registerDoSEQ()
} # Close inParallel == FALSE
#
# it = 1
# dat <- list(start = start,
# alldata = alldata,
# loglfoo = loglfoo,
# parnm = parnm,
# gr = gr)
out <- foreach(it = 1:iterReps,
# .export = c("alldata", "start", "parnm", "loglfoo", "gr"),
.packages = c("FLCore")) %dopar% {
data <- lapply(alldata, FLCore::iter, it)
startit <- start[[it]][order(match(names(start[[it]]), parnm))]
# add small number to start if 0
startit <- lapply(startit, function(x) if(x == 0) x/100000 else x)
if(is.null(startit)) {
stop("No starting values provided and no initial function available")
}
# TODO protect environment
out <- do.call('optim', c(list(par = unlist(startit),
fn=loglfoo,
# parnm = parnm,
method=method,
hessian=TRUE,
control=control,
lower=lower,
upper=upper,
gr=gr)))
out$iter <- it
# warning if convergence is not 0, and do not load results
if(out$convergence != 0) {
warning("optimizer could not achieve convergence")
}
return(out)
} # Close FOREACH
if(inParallel == TRUE){
parallel::stopCluster(cl = cl)
} # close inParallel == TRUE
if(inParallel == FALSE){
foreach::registerDoSEQ()
} # close inParallel == FALSE
unregister()
stopImplicitCluster()
#
# output
for(ir in 1:iterReps) {
# place out$par in right iter dim
FLCore::iter(object@params[names(start[[ir]]),], ir) <- out[[ir]]$par
# fixed
if(length(fixed) > 0) {
FLCore::iter(object@params, ir)[fixnm,] <- unlist(lapply(fixed,
FLCore::iter,
ir))
}
# TODO make details list of lists if iter > 1?
# FLCore::iter(object@details, ir) <- list(call="call",
# value=out[[ir]]$value,
# count=out[[ir]]$counts,
# convergence=out[[ir]]$convergence,
# message=out[[ir]]$message)
object@details <- list(call=call, value=out[[ir]]$value,
count=out[[ir]]$counts,
convergence=out[[ir]]$convergence,
message=out[[ir]]$message)
# vcov & hessian
coef <- out[[ir]]$par
object@vcov[,,ir] <-
if (length(coef))
{
if(det(out[[ir]]$hessian) != 0)
{
tmphess <- try(solve(out[[ir]]$hessian), silent=TRUE)
if(class(tmphess) =='try-error')
{
matrix(numeric(0), length(coef), length(coef), dimnames=list(names(coef),
names(coef)))
} else
tmphess
} else
0
} else
0
object@hessian[,,ir] <- -out[[ir]]$hessian
# logLik
object@logLik[ir] <- -out[[ir]]$value
attr(object@logLik, 'nobs') <- length(lapply(alldata, FLCore::iter, ir)[[1]])
# fitted & residuals
FLCore::iter(fitted(object), ir) <- predict(FLCore::iter(object, ir))
FLCore::iter(residuals(object), ir) <- FLCore::iter(slot(object,
as.list(object@model)[[2]]), ir) - FLCore::iter(fitted(object), ir)
# force dimnames[1:5] in 'fitted' and 'residuals' to match
dimnames(fitted(object))[1:5] <- dimnames(do.call(as.character(as.list(object@model)[2]),
list(object)))[1:5]
dimnames(residuals(object)) <- dimnames(fitted(object))
} # CLOSE output loop ir
# return object
return(object)
}
) # }}}
|
bf87b0e45ce189d824da0cd4bcb426bd9932e94f
|
5ed86a18aac40468d7516f94583cbd06ea03dc47
|
/R/subset_run.R
|
f0176960013420ebcad004cae63b89d9fd89c880
|
[
"MIT"
] |
permissive
|
diazrenata/cvlt
|
22e8a758ff7098a042712ed4e21936af7980c2ca
|
e90c762361d2ec427210e207a82d4b82cdc5fbcb
|
refs/heads/main
| 2023-04-13T11:01:07.547451
| 2021-07-07T15:08:50
| 2021-07-07T15:08:50
| 363,262,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,878
|
r
|
subset_run.R
|
#' Run LDATS on a single dataset subset
#'
#' This function runs on a single subset (e.g. the dataset with timestep 1 as the test timestep). Run `fit_ldats_crossval` to run this function on every subset.
#'
#' First, fits an LDA to the *full* (not subsetted) dataset. Then splits the matrix of topic proportions (`gamma` matrix) for that LDA into training/test subsets to match the subset. (The LDA is fit to the full dataset, because LDAs fit to different subsets cannot be recombined in a logical way).
#'
#' Then fits a TS model to the *subsetted* `gamma` matrix, with the specified number of iterations & changepoints.
#'
#' Then extracts from that TS model the predicted abundances (multinomial probability distribution of species abundances) for each timestep. Because of the Bayesian component of the changepoint model, there is a matrix of predicted abundances per timestep *for every draw from the posterior*, so `nit` matrices. Then calculates the loglikelihood of the test timestep given these predicted probabilities. There are `nit` estimates of the loglikelihood.
#'
#' Returns the subsetted dataset item list provided, with the following appendend: The LDA, TS, and abundance probabilities (if `return_full = TRUE`), or as NULL otherwise; the vector of loglikelihoods for the test timestep for each iteration; a list `model_info` with the model specifications `(k, seed, cpts, nit)`
#'
#'
#'
#' @param subsetted_dataset_item Result of subset_data_one, list with elements `$full`, `$train`, `$test`, `$test_timestep`
#' @param k integer Number of topics for the LDA model.
#' @param lda_seed integer Seed for running LDA model. Only use even numbers (odd numbers duplicate adjacent evens).
#' @param cpts integer How many changepoints for ts?
#' @param nit integer How many iterations? (draws from posterior)
#' @param return_full logical Whether to return fitted model objects and abundance probabilities in addition to logliks. Can be useful for diagnostics, but hogs memory. Default FALSE.
#' @param cpt_seed integer what seed to use for the cpt model. If NULL (default) randomly draws one and records it as part of the model_info
#'
#' @return list. subsetted_dataset_item with the following appended: If `return_full`, fitted_lda; fitted_ts; abund_probabilities, otherwise NULL; test_logliks, model_info
#' @export
#'
#' @importFrom LDATS TS_on_LDA TS_control
ldats_subset_one <- function(subsetted_dataset_item,
k,
lda_seed,
cpts,
nit,
return_full = FALSE,
cpt_seed = NULL) {
if(k > 0) {
# Fit LDA with `k` topics and `seed` to the FULL abundance timeseries
fitted_lda <- LDA_set_user_seeds(
document_term_table = subsetted_dataset_item$full$abundance,
topics = k,
seed = lda_seed)[[1]]
} else if (k == 0) {
fitted_lda <- fit_means_lda(subsetted_dataset_item, lda_seed)
}
# Subset the gammas and loglikelihoods for that LDA to match the train/test split for this subset
subsetted_lda <- subset_lda(fitted_lda, subsetted_dataset_item)
if(is.null(cpt_seed)) {
cpt_seed <- sample.int(100000000, size = 1)
}
# Fit TS model with `cpts` and `nit` to the subsetted gammas
fitted_ts <- LDATS::TS_on_LDA(subsetted_lda,
document_covariate_table = as.data.frame(subsetted_dataset_item$train$covariates),
timename = "year",
formulas = ~1,
nchangepoints = cpts,
control = LDATS::TS_control(nit = nit, seed = cpt_seed))[[1]]
# Extract predicted multinomial predictions for all years and all draws from posterior
abund_probabilities <- get_abund_probabilities(
subsetted_dataset_item,
subsetted_lda,
fitted_ts
)
# Calculate loglikelihood of test timestep for each draw from the posterior
test_logliks <- get_test_loglik(
subsetted_dataset_item,
abund_probabilities
)
if(return_full) {
subsetted_dataset_item$fitted_lda <- subsetted_lda
subsetted_dataset_item$fitted_ts <- fitted_ts
subsetted_dataset_item$abund_probabilities <- abund_probabilities
} else {
subsetted_dataset_item$fitted_lda <- NULL
subsetted_dataset_item$fitted_ts <- NULL
subsetted_dataset_item$abund_probabilities <- NULL
}
subsetted_dataset_item$test_logliks <- test_logliks
subsetted_dataset_item$model_info <- data.frame(
k = k,
lda_seed = lda_seed,
cpts = cpts,
cpt_seed = cpt_seed,
nit = nit,
test_step = subsetted_dataset_item$test_timestep,
test_year = subsetted_dataset_item$test$covariates$year[[1]],
mean_test_loglik = mean(subsetted_dataset_item$test_logliks))
return(subsetted_dataset_item)
}
|
2c737fd84c2d24ee883a80cad8759be6d3193f8e
|
6502ceccc2bed9aa21f2f1fa02abd59f8a2b9e07
|
/R/hello.R
|
07fe2acd437c02e0ba43445737addecc605b76ac
|
[] |
no_license
|
zahrakhoshmanesh/happyR
|
14b6fe8f16eedfbb35d1bed381a99ca8ab68e633
|
6dad6a939de6b3f10e8cc6e7a8368a0ee5671c3c
|
refs/heads/master
| 2020-04-30T13:19:13.595524
| 2019-03-21T04:08:37
| 2019-03-21T04:08:37
| 176,853,622
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
r
|
hello.R
|
#' print hello "name".
#'
#' @param x A number.
#' @export
#' @return print name.
#' @examples
#' hello("Sam)
hello<- function(s){
sprintf("hello %s",s)
}
|
22abfb5f1a3bb7e626df76926fbfe13e627e03fd
|
8fb171e8e1827efd6b615fe2ce41f126a1fb4564
|
/man/markov_expanded.Rd
|
63c0fdb970cb7d19729cda8f3500f40d4f0426fc
|
[
"MIT"
] |
permissive
|
HealthEconomicsHackathon/hermes6
|
745b9e1b85c0e22c5f8cff7f728cbfbe8e480d0b
|
2ee4db4e99d6c02dcc3800f95737c2914bf0b0f5
|
refs/heads/master
| 2020-09-05T04:12:46.566159
| 2020-04-07T14:16:33
| 2020-04-07T14:16:33
| 219,979,028
| 4
| 1
|
NOASSERTION
| 2020-04-07T14:16:34
| 2019-11-06T11:15:45
|
R
|
UTF-8
|
R
| false
| true
| 340
|
rd
|
markov_expanded.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markov_expanded_probabilistic.R
\name{markov_expanded}
\alias{markov_expanded}
\title{Reduced dimensions in markov smoking probabilistic model}
\usage{
markov_expanded()
}
\value{
Output
}
\description{
Reduced dimensions in markov smoking probabilistic model
}
|
7ed16d96f1f53070590f59748ad888519cf5eae1
|
d5fbf5023653a02e38a090e6365f100f078716c8
|
/Predictions.R
|
c239a662ccc2e8c9e080884cb9cbd98be1497b6c
|
[] |
no_license
|
graceedriggs/Fake-News
|
d0c42d2acaf7344bc6327a60f15eba56468fa39d
|
956a1cdbc796434357c867f1880e3a6ec9d939e7
|
refs/heads/main
| 2023-01-08T16:51:41.607886
| 2020-11-05T04:44:17
| 2020-11-05T04:44:17
| 310,192,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,436
|
r
|
Predictions.R
|
##
## Predictions for Fake News
##
## Libraries
library(tidyverse)
library(caret)
clean.train <- read_csv("./CleanTrain.csv")
clean.test <- read_csv("./CleanTest.csv")
clean.fakenews <- read_csv("./CleanFakeNews.csv")
sum(is.na(clean.train))
## replace the NAs with 0
clean.train[is.na(clean.train)] = 0
clean.train$isFake <- as.factor(clean.train$isFake)
### XGB TREE PREDICTIONS
gbmod <- train(form=as.factor(isFake)~.,
data=clean.train %>% select(-Id),
method="xgbTree",
trControl=trainControl(method="cv",
number=5),
tuneGrid = expand.grid(nrounds=100, # Boosting Iterations
max_depth=3, #Max Tree Depth
eta=0.3, #(Shrinkage)
gamma=1,
colsample_bytree=1,# (Subsample Ratio of Columns)
min_child_weight=1,# (Minimum Sum of Instance Weight)
subsample=1)# (Subsample Percentage)0)
)
preds <- predict(gbmod, newdata=clean.test)
predframe <- data.frame(id=clean.test$Id, label=preds)
#predict
predictions <- data.frame(id=clean.test$id, label=(predict(rf_default, newdata=clean.test)))
write.csv(predframe,"/Users/graceedriggs/Documents/STAT 495/Fake-News/GD_XGB_Predictions.csv", row.names = FALSE)
|
185786aba25b3b2a2d8f80af4afc5bfb64d1a465
|
8e80765a6b757263315ffb8e42718fbb1359c924
|
/ch04/vector_matrix.R
|
8b6d5aac08b000365d044d3a396c2a5359de6ace
|
[] |
no_license
|
sycho0311/R-Basic
|
5c0c5499535b454f5399b99c8558356acfb56eb4
|
39ba67a34a0812faef7ef7a2e9b9ff15b9bd7cb9
|
refs/heads/master
| 2020-04-01T17:35:17.387095
| 2018-10-17T10:24:12
| 2018-10-17T10:24:12
| 147,784,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 503
|
r
|
vector_matrix.R
|
score <- matrix(c(80, 60, 90, 70, 70, 50, 100, 80, 95, 70, 95, 80), nrow = 4)
# 4개의 행을 같은 행렬
score[2, 2] <- score[2, 2] + 20
print(score)
kor <- score[, 1] # 하나의 열의 데이터만 가져오는 경우
print(kor)
kor <- score[, 1, drop=FALSE] # 벡터가 아닌 행렬로 데이터를 가져오고 싶은 경우
print(score[c(3,4), c(1, 2)]) # 벡터 행을 설정, 벡터 열을 설정
# 3행의 1열 3행의 2열이 하나의 행, 4행의 1열 4행의 2열이 그 다음행
|
1ebbf4cf472c578c3789b29c9057f6f3bc4efe8c
|
14bbda859b6b252c1793b3b5d5e293ad6aed2ce4
|
/SupportingScripts/CS2FeatureSelection.R
|
bf469dea141d8fbb4c23b204d58f87cfec878941
|
[] |
no_license
|
NicoleABartholow/MSDS6306CaseStudy2
|
5bbd515d626df0262a3d334c64dfb45c14a61552
|
a10188ae5abc977b8481a6dcbdf273e86296bd97
|
refs/heads/master
| 2020-04-09T00:17:27.043235
| 2018-12-10T06:32:19
| 2018-12-10T06:32:19
| 159,860,641
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,635
|
r
|
CS2FeatureSelection.R
|
#Predictions for model using Test dataset
TestAttrition <- DFTest$Attrition
DFTrainFS<- subset(DFTest, select=-c(Attrition ))
ctrl <- rfeControl(functions = nbFuncs)
rfe1 <- rfe(x=DFTrainFS, y=TestAttrition, sizes = 2^(2:27), maxit = 10, metric = "Accuracy" , rfeControl = ctrl, testx = DFTrainFS, testy = TestAttrition)
# Variables Accuracy Kappa AccuracySD KappaSD Selected
# 4 0.7807 0.3022 0.04505 0.1072
# 8 0.7627 0.3067 0.05760 0.1157
# 16 0.7847 0.3222 0.04490 0.1066
# 30 0.8291 0.3769 0.04663 0.1252 *
# The top 5 variables (out of 30):
# MonthlyIncome, TotalWorkingYears, JobLevel, Age, YearsAtCompany
#Feature Selection Analysis
#```{r Feature selection }
DFTrainFS <- cbind.data.frame(DFTrainRel$ID, DFTrainRel$BusinessTravel, DFTrainRel$Department, DFTrainRel$Education, DFTrainRel$EducationField, DFTrainRel$EnvironmentSatisfaction, DFTrainRel$Gender, DFTrainRel$HourlyRate, DFTrainRel$JobInvolvement, DFTrainRel$JobLevel, DFTrainRel$JobRole, DFTrainRel$JobSatisfaction, DFTrainRel$MaritalStatus, DFTrainRel$MonthlyRate, DFTrainRel$NumCompaniesWorked, DFTrainRel$OverTime, DFTrainRel$PercentSalaryHike, DFTrainRel$PerformanceRating, DFTrainRel$RelationshipSatisfaction, DFTrainRel$StockOptionLevel, DFTrainRel$TotalWorkingYears, DFTrainRel$TrainingTimesLastYear, DFTrainRel$WorkLifeBalance, DFTrainRel$YearsAtCompany, DFTrainRel$YearsSinceLastPromotion, DFTrainRel$YearsWithCurrManager, DFTrainRel$YearsInCurrentRole )
DFValFS <- cbind.data.frame(DFValRel$ID, DFValRel$BusinessTravel, DFValRel$Department, DFValRel$Education, DFValRel$EducationField, DFValRel$EnvironmentSatisfaction, DFValRel$Gender, DFValRel$HourlyRate, DFValRel$JobInvolvement, DFValRel$JobLevel, DFValRel$JobRole, DFValRel$JobSatisfaction, DFValRel$MaritalStatus, DFValRel$MonthlyRate, DFValRel$NumCompaniesWorked, DFValRel$OverTime, DFValRel$PercentSalaryHike, DFValRel$PerformanceRating, DFValRel$RelationshipSatisfaction, DFValRel$StockOptionLevel, DFValRel$TotalWorkingYears, DFValRel$TrainingTimesLastYear, DFValRel$WorkLifeBalance, DFValRel$YearsAtCompany, DFValRel$YearsSinceLastPromotion, DFValRel$YearsWithCurrManager, DFValRel$YearsInCurrentRole )
#TrainAttrition <- as.integer(DFTrainRel$Attrition)-1
#TestAttrition <- as.integer(DFValRel$Attrition)-1
TrainAttrition <- DFTrainRel$Attrition
TestAttrition <- DFValRel$Attrition
ctrl <- rfeControl(functions = nbFuncs)
rfe1 <- rfe(x=DFTrainFS, y=TrainAttrition, sizes = c(2,5), maxit = 10, metric = "Accuracy" , rfeControl = ctrl, testx = DFValFS, testy = TestAttrition)
#top5 - DFTrainRel$OverTime, DFTrainRel$TotalWorkingYears, DFTrainRel$YearsAtCompany, DFTrainRel$MaritalStatus, DFTrainRel$YearsInCurrentRole
#Variables Accuracy Kappa AccuracySD KappaSD Selected
# 4 0.8282 0.1955 0.02308 0.07625
# 8 0.8326 0.2846 0.02641 0.04974
# 16 0.8509 0.2403 0.01554 0.06693 *
# 27 0.8498 0.1529 0.01489 0.08675
#Remove some correlated variables - Monthly Income, Total Working Years, Performance rating, Years since last propotion, yeasr with current manager
DFTrainFS <- cbind.data.frame(DFTrainRel$ID, DFTrainRel$BusinessTravel, DFTrainRel$Education, DFTrainRel$EducationField, DFTrainRel$EnvironmentSatisfaction, DFTrainRel$Gender, DFTrainRel$HourlyRate, DFTrainRel$JobInvolvement, DFTrainRel$JobLevel, DFTrainRel$JobRole, DFTrainRel$JobSatisfaction, DFTrainRel$MaritalStatus, DFTrainRel$MonthlyRate, DFTrainRel$NumCompaniesWorked, DFTrainRel$OverTime, DFTrainRel$PercentSalaryHike, DFTrainRel$RelationshipSatisfaction, DFTrainRel$StockOptionLevel, DFTrainRel$TrainingTimesLastYear, DFTrainRel$WorkLifeBalance, DFTrainRel$YearsInCurrentRole )
DFValFS <- cbind.data.frame(DFValRel$ID, DFValRel$BusinessTravel, DFValRel$Education, DFValRel$EducationField, DFValRel$EnvironmentSatisfaction, DFValRel$Gender, DFValRel$HourlyRate, DFValRel$JobInvolvement, DFValRel$JobLevel, DFValRel$JobRole, DFValRel$JobSatisfaction, DFValRel$MaritalStatus, DFValRel$MonthlyRate, DFValRel$NumCompaniesWorked, DFValRel$OverTime, DFValRel$PercentSalaryHike, DFValRel$RelationshipSatisfaction, DFValRel$StockOptionLevel, DFValRel$TrainingTimesLastYear, DFValRel$WorkLifeBalance, DFValRel$YearsInCurrentRole )
rfe2 <- rfe(x=DFTrainFS, y=TrainAttrition, sizes = 2^(2:27), maxit = 10, metric = "Accuracy" , rfeControl = ctrl, testx = DFValFS, testy = TestAttrition)
rfe2
#top5 - DFTrainRel$OverTime, DFTrainRel$YearsAtCompany, DFTrainRel$StockOptionLevel, DFTrainRel$YearsInCurrentRole, DFTrainRel$MaritalStatus
#Variables Accuracy Kappa AccuracySD KappaSD Selected
# 2 0.8309 0.0356 0.02096 0.06053
# 5 0.8255 0.2204 0.02494 0.08264
# 25 0.8499 0.1189 0.01379 0.05350
#top5 - DFTrainRel$OverTime, DFTrainRel$YearsAtCompany, DFTrainRel$YearsInCurrentRole, DFTrainRel$YearsWithCurrManager, DFTrainRel$JobLevel
#Variables Accuracy Kappa AccuracySD KappaSD Selected
## 4 0.8239 0.1511 0.02436 0.05541
# 8 0.8421 0.2606 0.01742 0.06804
# 16 0.8532 0.1969 0.01763 0.06772 *
# 24 0.8491 0.1407 0.01734 0.05961
#top5 - DFTrainRel$OverTime, DFTrainRel$YearsAtCompany, DFTrainRel$StockOptionLevel, DFTrainRel$MaritalStatus, DFTrainRel$YearsInCurrentRole
# 4 0.8387 0.1787 0.01494 0.07049
# 8 0.8541 0.2321 0.01106 0.06567
# 16 0.8548 0.1483 0.01251 0.06738 *
# 22 0.8509 0.1069 0.01230 0.05773
#top5 - DFTrainRel$OverTime, DFTrainRel$StockOptionLevel, DFTrainRel$MaritalStatus, DFTrainRel$YearsInCurrentRole, DFTrainRel$JobLevel
# 4 0.8455 0.1613 0.01242 0.09275
# 8 0.8560 0.1868 0.01384 0.09583 *
# 16 0.8496 0.1024 0.01515 0.06421
# 21 0.8478 0.0809 0.01430 0.05013
#```
#```{r rfe}
###Code used to identify 5 best parameters to use when applying Naive Bayes. Take time and processing power.
#####THIS IS THE BEST PART
a <- rfe(x=DFTrainFS, y=TrainAttrition, sizes = c(2,5), maxit = 10, metric = "Accuracy" , rfeControl = ctrl, testx = DFValFS)1
a.results
#top5 - DFTrainRel$OverTime, DFTrainRel$TotalWorkingYears, DFTrainRel$YearsAtCompany, DFTrainRel$StockOptionLevel, DFTrainRel$MaritalStatus
#Variables Accuracy Kappa AccuracySD KappaSD Selected
# 4 0.8339 0.2035 0.01486 0.07075
# 8 0.8353 0.2942 0.01807 0.06181
# 16 0.8609 0.2873 0.01107 0.06562 *
# 27 0.8553 0.1725 0.01195 0.06204
```
|
4714f3b4864257dadf72cd83eb44961a1901ed9c
|
1a4c806a0745850e725040bca561fa109c0d5019
|
/stat/peptide/ResidueMaster.R
|
6c85bc7ccec4683cbaebed351c8af5b00ec3ee12
|
[
"Apache-2.0"
] |
permissive
|
tipplerow/jam
|
9037ae5aefd6f38315855c0f893eb8fa9d842bf3
|
464b78819f064a248425b4703dd96fa4f337ea67
|
refs/heads/master
| 2021-05-09T05:19:08.698810
| 2021-02-12T17:01:29
| 2021-02-12T17:01:29
| 119,304,231
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 219
|
r
|
ResidueMaster.R
|
ResidueMaster.find <- function() {
file.path(Sys.getenv("JAM_HOME", names = FALSE), "data", "residue_master.csv")
}
ResidueMaster.load <- function() {
read.csv(ResidueMaster.find(), strip.white = TRUE)
}
|
c4c3da7f45d791c24abc2c5dd66902c7906eff61
|
5589f0f59c12712e6765850fcff0fb46028287e7
|
/man/dirichlet.mle.Rd
|
2f078a6040d6c28c5057c170641e6dc52e296a84
|
[
"MIT"
] |
permissive
|
bcbritt/brittnu
|
e93ef06fef2d1db7b11637b59a80573d5fda3c56
|
17e3b4dc11f3c03ddeef7b3cdadde350811904e2
|
refs/heads/master
| 2023-05-14T19:40:29.449482
| 2021-03-31T06:27:15
| 2021-03-31T06:27:15
| 352,232,783
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,818
|
rd
|
dirichlet.mle.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/brittnu.R
\name{dirichlet.mle}
\alias{dirichlet.mle}
\title{Dirichlet.MLE}
\usage{
dirichlet.mle(
x,
weights = NULL,
eps = 10^(-5),
convcrit = 1e-05,
maxit = 1000,
oldfac = 0.3,
verbose = FALSE
)
}
\arguments{
\item{x}{A list with each entry being a single cross-validation iteration;
each list entry should be either the output from a
\code{\link[topicmodels]{LDA}} function call or a 2D double vector with
distributions as the rows and each category within each distribution as the
columns (such that each row in the vector sums to 1)}
\item{weights}{A numeric vector used to calibrate the initial estimates of
concentration parameters}
\item{eps}{A numeric value used as a tolerance parameter to prevent
logarithms of zero}
\item{convcrit}{A numeric value indicating the threshold for convergence used
to estimate concentration parameters}
\item{maxit}{A numeric value indicating the maximum number of iterations used
to estimate concentration parameters}
\item{oldfac}{A numeric value between 0 and 1 used as the convergence
acceleration factor}
\item{verbose}{A boolean value indicating whether progress updates should be
provided}
}
\value{
A list of the estimated concentration parameters, the sum of those
estimated concentration parameters, and the ratio between each estimated
concentration parameter and the sum of those parameters
}
\description{
This helper function estimates the concentration parameters of
the Dirichlet distribution underlying multiple deviates, with no
restrictions placed on the values of those concentration parameters. This
function is heavily based on \code{\link[sirt]{dirichlet.mle}}, with
modifications to avoid potential singularities in the estimation procedure.
}
|
f4d9961f7791fd4addb0a03c9ae398dcd4464850
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/gsw/examples/gsw_pot_enthalpy_ice_freezing_first_derivatives.Rd.R
|
fd2f838b297c46712a84447907e8d9b754545b77
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 758
|
r
|
gsw_pot_enthalpy_ice_freezing_first_derivatives.Rd.R
|
library(gsw)
### Name: gsw_pot_enthalpy_ice_freezing_first_derivatives
### Title: First Derivatives of Potential Enthalpy
### Aliases: gsw_pot_enthalpy_ice_freezing_first_derivatives
### ** Examples
SA <- c(34.7118, 34.8915, 35.0256, 34.8472, 34.7366, 34.7324)
p <- c( 10, 50, 125, 250, 600, 1000)
r <- gsw_pot_enthalpy_ice_freezing_first_derivatives(SA, p)
expect_equal(r$pot_enthalpy_ice_freezing_SA/1e2,
c(-1.183484968590718, -1.184125268891200, -1.184619267864844,
-1.184026131143674, -1.183727706650925, -1.183814873741961))
expect_equal(r$pot_enthalpy_ice_freezing_p/1e-3,
c(-0.202880939983260, -0.203087335312542, -0.203473018454630,
-0.204112435106666, -0.205889571619502, -0.207895691215823))
|
d5edb6c40c1d26a3822b2e17717455f681a0c4b4
|
2a71a1e4b6d183da7d6bbf3adf76955fc1d4254e
|
/ROS/KidIQ/kidiq.R
|
4c13f87456b5d9d49740fc837c8c34cdc1796ec6
|
[] |
no_license
|
lizzieinclass/oeb201
|
fc3f8babb2b63ece6208cf954c5e68572ffe8369
|
3d09ca890642cb052721aba174c5b938ef621ed9
|
refs/heads/master
| 2021-01-23T12:55:26.435075
| 2017-11-29T05:29:24
| 2017-11-29T05:29:24
| 102,660,185
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,603
|
r
|
kidiq.R
|
# setwd("~/AndrewFiles/books/regression.and.other.stories/examples/KidIQ")
library("arm")
library("rstanarm")
options(mc.cores = parallel::detectCores())
library("ggplot2")
library("bayesplot")
theme_set(bayesplot::theme_default(base_family = "sans"))
## code for nlsy kid iq data for Chapter 3
library("foreign")
kidiq <- read.dta(file="RIS/KidIQ/kidiq.dta")
### fitting and summarizing regressions in R
fit_3 <- lm(kid_score ~ mom_hs + mom_iq, data=kidiq)
display(fit_3)
print(fit_3)
summary(fit_3)
stan_fit_3 <- stan_glm(kid_score ~ mom_hs + mom_iq, data=kidiq)
print(stan_fit_3)
### graphical displays of data and fitted models
fit_2 <- lm(kid_score ~ mom_iq, data=kidiq)
plot(kidiq$mom_iq, kidiq$kid_score, xlab="Mother IQ score", ylab="Child test score")
abline(coef(fit_2)[1], coef(fit_2)[2])
# alternately
curve(cbind(1,x) %*% coef(fit_2), add=TRUE)
# ggplot version
ggplot(kidiq, aes(mom_iq, kid_score)) +
geom_point() +
geom_abline(
intercept = coef(fit_2)[1],
slope = coef(fit_2)[2]
) +
labs(
x = "Mother IQ score",
y = "Child test score"
)
### two fitted regression lines
## model with no interaction
fit_3 <- lm(kid_score ~ mom_hs + mom_iq, data=kidiq)
colors <- ifelse(kidiq$mom_hs==1, "black", "gray")
plot(kidiq$mom_iq, kidiq$kid_score,
xlab="Mother IQ score", ylab="Child test score", col=colors, pch=20)
b_hat <- coef(fit_3)
abline(b_hat[1] + b_hat[2], b_hat[3], col="black")
abline(b_hat[1], b_hat[3], col="gray")
# ggplot version
ggplot(kidiq, aes(mom_iq, kid_score)) +
geom_point(aes(color = factor(mom_hs)), show.legend = FALSE) +
geom_abline(
intercept = c(coef(fit_3)[1], coef(fit_3)[1] + coef(fit_3)[2]),
slope = coef(fit_3)[3],
color = c("gray", "black")
) +
scale_color_manual(values = c("gray", "black")) +
labs(
x = "Mother IQ score",
y = "Child test score"
)
### two fitted regression lines:
## model with interaction
fit_4 <- lm(kid_score ~ mom_hs + mom_iq + mom_hs:mom_iq, data=kidiq)
colors <- ifelse(kidiq$mom_hs==1, "black", "gray")
plot(kidiq$mom_iq, kidiq$kid_score,
xlab="Mother IQ score", ylab="Child test score", col=colors, pch=20)
b_hat <- coef(fit_4)
abline(b_hat[1] + b_hat[2], b_hat[3] + b_hat[4], col="black")
abline(b_hat[1], b_hat[3], col="gray")
# ggplot version
ggplot(kidiq, aes(mom_iq, kid_score)) +
geom_point(aes(color = factor(mom_hs)), show.legend = FALSE) +
geom_abline(
intercept = c(coef(fit_4)[1], sum(coef(fit_4)[1:2])),
slope = c(coef(fit_4)[3], sum(coef(fit_4)[3:4])),
color = c("gray", "black")
) +
scale_color_manual(values = c("gray", "black")) +
labs(
x = "Mother IQ score",
y = "Child test score"
)
## displaying uncertainty in the fitted regression
stan_fit_2 <- stan_glm(kid_score ~ mom_iq, data = kidiq)
print(stan_fit_2)
sims_2 <- as.matrix(stan_fit_2)
n_sims_2 <- nrow(sims_2)
subset <- sample(n_sims_2, 10)
plot(kidiq$mom_iq, kidiq$kid_score,
xlab="Mother IQ score", ylab="Child test score")
for (i in subset){
abline(sims_2[i,1], sims_2[i,2], col="gray")
}
abline(coef(stan_fit_2)[1], coef(stan_fit_2)[2], col="black")
# ggplot version
ggplot(kidiq, aes(mom_iq, kid_score)) +
geom_point() +
geom_abline(
intercept = sims_2[subset, 1],
slope = sims_2[subset, 2],
color = "gray",
size = 0.25
) +
geom_abline(
intercept = coef(stan_fit_2)[1],
slope = coef(stan_fit_2)[2],
size = 0.75
) +
labs(
x = "Mother IQ score",
y = "Child test score"
)
## 2 plots
stan_fit_3 <- stan_glm(kid_score ~ mom_hs + mom_iq, data=kidiq)
sims_3 <- as.matrix(stan_fit_3)
n_sims_3 <- nrow(sims_3)
pdf("kidiq.betasim2.pdf", height=3.5, width=9)
par(mar=c(3,3,1,3), mgp=c(1.7, .5, 0), tck=-.01)
par(mfrow=c(1,2))
plot(kidiq$mom_iq, kidiq$kid_score, xlab="Mother IQ score", ylab="Child test score", bty="l", pch=20, xaxt="n", yaxt="n")
axis(1, seq(80, 140, 20))
axis(2, seq(20, 140, 40))
mom_hs_bar <- mean(kidiq$mom_hs)
subset <- sample(n_sims_2, 10)
for (i in subset){
curve(cbind(1, mom_hs_bar, x) %*% sims_3[i,1:3], lwd=.5,
col="gray", add=TRUE)
}
curve(cbind(1, mom_hs_bar, x) %*% coef(stan_fit_3), col="black", add=TRUE)
jitt <- runif(nrow(kidiq), -.03, .03)
plot(kidiq$mom_hs + jitt, kidiq$kid_score, xlab="Mother completed high school", ylab="Child test score", bty="l", pch=20, xaxt="n", yaxt="n")
axis(1, c(0,1))
axis(2, seq(20, 140, 40))
mom_iq_bar <- mean(kidiq$mom_iq)
for (i in subset){
curve(cbind(1, x, mom_iq_bar) %*% sims_3[i,1:3], lwd=.5,
col="gray", add=TRUE)
}
curve(cbind(1, x, mom_iq_bar) %*% coef(stan_fit_3), col="black", add=TRUE)
dev.off()
|
25d63f3c6cf54a3ca76fb75d41d3396e5f9aa277
|
5a10a60157e344bcdea3b2a5af1ef150cd5864e3
|
/R/hct_method_MC_corr.R
|
8b563c94d7feaf99bee9b5efe4cbecf8a79f3fc1
|
[] |
no_license
|
cran/HDDesign
|
7d7044a1d3ae5f2e87bb2d24cbf2811be147cc29
|
5ea3d93332514d337fcbf151be677e19072e6b05
|
refs/heads/master
| 2020-04-02T04:11:11.704074
| 2016-06-11T09:37:25
| 2016-06-11T09:37:25
| 60,896,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 318
|
r
|
hct_method_MC_corr.R
|
hct_method_MC_corr <-
function(mu0, p, m, n, hct, alpha0, nrep, p1=0.5, ss=FALSE, ntest, pcorr, chol.rho,sampling.p)
{
temp=sapply(1:nrep, hct_method_single_MC_corr, mu0, p, m, n, hct, alpha0, p1, ss, ntest, pcorr, chol.rho,sampling.p)
if (ss==FALSE)
mean(temp)
else
apply(temp, 1, mean)
}
|
75c95116edda4a057ab81ebe904efe0f6f09d745
|
1380a6e8ffa98cea38e39a006e79cd364463dab4
|
/R/pcpn_iso_active.R
|
3b5d6ea29391bf350dc7d3f5b4ee20ee340cf2d0
|
[] |
no_license
|
cvoter/CSLSiso
|
7ec1227c474e9691b3ac5733b6871fa77513c142
|
d86f068a9b5b0ce35e098ef84c08929bec9d14b0
|
refs/heads/master
| 2021-05-20T09:05:07.354843
| 2020-03-24T22:29:16
| 2020-03-24T22:29:16
| 206,828,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 403
|
r
|
pcpn_iso_active.R
|
#' Precipitation Collection Dates
#'
#' Date intervals when precipitation collector at Hancock agricultural research
#' station was actively deployed for isotope measurements.
#'
#' @docType data
#'
#' @usage data(pcpn_iso_active)
#'
#' @format A lubridate interval with time periods when precipitation collector
#' was actively deployed at Hancock Agricultural research station.
#'
"pcpn_iso_active"
|
0415c54ddac31b7ff46702ab9f831798dc8d7754
|
3305747fa8c8da01422c30328e17c9354111d0e7
|
/R/rdbGetEntries.R
|
871ecd99af369a5b41e7032c45e6f5caa3e1a62e
|
[] |
no_license
|
jjcurtin/StudySupport
|
53c312a4d73eb79fe28f56a5e77e2d48c0a56d8f
|
60024bb750543eba67374c47c213763ae741dfc0
|
refs/heads/main
| 2023-04-24T09:09:09.844913
| 2021-05-06T02:38:31
| 2021-05-06T02:38:31
| 364,763,735
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 181
|
r
|
rdbGetEntries.R
|
rdbGetEntries= function(d = NULL, Phone = NULL)
{
#return by phone
if(!is.null(Phone)) {
dE = d[d$Phone == Phone,]
}else {
dE = data.frame(NULL)
}
return(dE)
}
|
6db00c928a95899d30ae991f59b53a22a749b4b6
|
82b8d888be750c9a5c37664b19bc6d4dee2ea17c
|
/R/Modules_R/ms1_peaks.r
|
9ebdb1bfd0bccb3eb1fc7070f4afbc26ee55abcb
|
[
"MIT"
] |
permissive
|
zmahnoor14/MAW
|
2d775cbc0aaebe47a1f244172a571f39a683ab02
|
2da7fbaab0e2e4ebbff3f36d631336690fb318b0
|
refs/heads/main
| 2023-08-03T09:30:16.118159
| 2023-08-01T14:15:02
| 2023-08-01T14:15:02
| 438,345,970
| 19
| 5
| null | 2023-04-19T21:32:40
| 2021-12-14T17:42:08
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 7,545
|
r
|
ms1_peaks.r
|
#! /usr/bin/Rscript
#' @title Extract isotopic peaks and save as txt files
#'
#' @description
#'
#' This function takes the isoptpic peak annotations performed by CAMERA
#' and generates the MS1 or isotopic peak lists for each precursor m/z
#' @param x is the result csv file from the function - ms2peaks
#'
#' @param y is the combined camera results in csv format
#'
#' @param result_dir result directory for the current MS2 mzml file
#'
#' @param input_dir where all the input files are and the QC/ folder is
#'
#' @param QCfile either TRUE or FALSE (depends is the user has used QC or CAMERA)
#'
#' @return
#'
#' a csv file containing all features and isotopic peak paths
#' isotopic peaks saved txt file for each precursor m/z
#'
#' @author Mahnoor Zulfiqar
#'
#' @examples
#'
#' ms1_peaks(x = /usr/project/file1/insilico/MS2DATA.csv',
#' y = "/usr/project/QC/combinedCam.csv",
#' result_dir = "/usr/project/file1",
#' input_dir = "/usr/project/"
#' QC = TRUE)
# ---------- Preparations ----------
# Load libraries
library("stringr")
library("dplyr")
# ---------- Arguments and user variables ----------
args <- commandArgs(trailingOnly=TRUE)
#print(args)
x <- as.character(args[1])
y <- as.character(args[2])
result_dir <- as.character(args[3])
input_dir <- as.character(args[4])
QCfile <- as.logical(args[5])
# ---------- ms1_peaks ----------
# Extract isotopic peaks for each pre_mz
# The input is x = first_list (from ms2peaks function) and y = camera results
ms1_peaks <- function(x, y, result_dir, input_dir, QCfile){
# store the ms1_peak list path here
ms1Peaks <- c()
x = read.csv(x)
if (QCfile){
dir_name <- paste(input_dir, str_remove(paste(result_dir, "/insilico/peakfiles_ms1", sep =""), "./"), sep = "")
# create a new directory to store all the peak list txt files
if (!file.exists(dir_name)){
dir.create(dir_name, recursive = TRUE)
}
# read the CAMERA results
y = read.csv(y)
# for all indices in the ms2 features table
for (i in 1:nrow(x)){
#store the indices of CAMERA that have same mz and rt as the ms2 features table
store_c <- c()
# for all indices in CAMERA Results
for (j in 1:nrow(y)){
# if mz and rt from ms2 features are within the range of CAMERA mz and rt
if (x[i, 'premz'] <= y[j, "mzmax"] && y[j, "mzmin"] <= x[i, 'premz'] && x[i, 'rtmed'] <= y[j, "rtmax"] && y[j, "rtmin"] <= x[i, 'rtmed']){
store_c <- c(store_c, j)
}
}
# indices with same pre m/z and same rt
df_y <- y[store_c, ]
df_y <- as.data.frame(df_y)
#if there was only one index
if (nrow(df_y) == 1){
# if there was no isotope annotation for that one index
if (is.na(df_y[1, "istops"])){
mz <- df_y[1, "mz"] # save mz
int <- df_y[1, "into"] # save intensity
no_isotop <- cbind(mz, int) # save as table
name_file <- paste(dir_name, "/ms1_peaks_", x[i, 'premz'], "_no_isotopes.txt", sep = "") # save name of the peaklist
write.table(no_isotop, name_file, row.names = FALSE, col.names = FALSE) # save peak list
name_file1 <- str_replace(name_file, input_dir, "./")
ms1Peaks <- c(ms1Peaks, name_file1) # add the path of the peak list to a list
}
# if there was an isotope annotation
else{
df_x <- y[which(y[, "file_origin"] ==df_y[1, "file_origin"]), ] # extract camera results from one file origin
df_x <- df_x[which(df_x[, 'istops'] == df_y[1, 'istops']), ] # extract only certain isotope annotation group
mz <- df_x[, "mz"] # save mz
int <- df_x[, "into"] # save intensity
no_isotop <- cbind(mz, int) # save as table
name_file <- paste(dir_name, "/ms1_peaksISOTOPE_", x[i, 'premz'], "_isotopeNum_", df_x[1, "istops"], ".txt", sep = "")
write.table(no_isotop, name_file, row.names = FALSE, col.names = FALSE)
name_file1 <- str_replace(name_file, input_dir, "./")
ms1Peaks <- c(ms1Peaks, name_file1)
}
}
# if there are more indices for df_y
else if(nrow(df_y) > 1){
# if all enteries have no isotope annotation
if(all(is.na(df_y[, 'istops']))){
df_z <- df_y[which(df_y[,"into"] == max(df_y[,"into"])), ] # extract the ms1 peak with highest intensity
mz <- df_z[1, "mz"] # save mz
int <- df_z[1, "into"] # save intensity
no_isotop <- cbind(mz, int) # save as table
name_file <- paste(dir_name, "/ms1_peaks_", x[i, 'premz'], "_no_isotopes.txt", sep = "") # save name of the peaklist
write.table(no_isotop, name_file, row.names = FALSE, col.names = FALSE) # save peak list
name_file1 <- str_replace(name_file, input_dir, "./")
ms1Peaks <- c(ms1Peaks, name_file1) # add the path of the peak list to a list
}
# if not all isotope annotations are NA
else if (!(all(is.na(df_y[, 'istops'])))){
df_y <- df_y[!is.na(df_y$'istops'),] # Remove the NA isotope annotations
df_z <- df_y[which(df_y[,"into"] == max(df_y[,"into"])), ] # Select the MS1 peak with highest intensity
df_z1 <- y[which(y[, "file_origin"] == df_z[1, "file_origin"]), ] # extract camera results from one file origin
df_z1 <- df_z1[which(df_z1[, 'istops'] == df_z[1, 'istops']), ] # extract only certain isotope annotation group
mz <- df_z1[, "mz"] # save mz
int <- df_z1[, "into"] # save intensity
no_isotop <- cbind(mz, int) # save as table
name_file <- paste(dir_name, "/ms1_peaksISOTOPE_", x[i, 'premz'], "_isotopeNum_", df_z1[1, 'istops'],".txt", sep = "") # save name of the peaklist
write.table(no_isotop, name_file, row.names = FALSE, col.names = FALSE) # save peak list
name_file1 <- str_replace(name_file, input_dir, "./")
ms1Peaks <- c(ms1Peaks, name_file1) # add the path of the peak list to a list
}
}
else if (nrow(df_y)==0){
ms1Peaks <- c(ms1Peaks, 'no ms1 peaks in QC')
}
}
second_list <- data.frame(cbind(x, ms1Peaks))
write.csv(second_list, file = paste(input_dir, str_remove(paste(result_dir,'/insilico/MS1DATA.csv', sep = ""), "./"), sep =""))
return(second_list)
}
else{
ms1Peaks <- c(ms1Peaks, 'no ms1 peaks in QC')
second_list <- data.frame(cbind(x, ms1Peaks))
write.csv(second_list, file = paste(input_dir, str_remove(paste(result_dir,'/insilico/MS1DATA.csv', sep = ""), "./"), sep =""))
return(second_list)
}
}
# Usage
ms1_peaks(x, y, result_dir, input_dir, QCfile)
|
7bedce4c50925d73fcce3649307394b5d4f7ae02
|
0c68982b31105dc858b460b43be01fea8919467f
|
/plotting/plot_filled_assigned_sperm.R
|
4f36f2a1d28f85b18d6f4154ce907c24b21ad057
|
[] |
no_license
|
mccoy-lab/transmission-distortion
|
003a3ee0391d8b38f15647cbcc784e00550666d2
|
d30bde24ca6d385036b88527c66a4a7d6261f8a5
|
refs/heads/master
| 2022-12-14T18:13:18.169290
| 2022-07-26T19:38:01
| 2022-07-26T19:38:01
| 280,891,155
| 1
| 0
| null | 2023-08-21T16:34:57
| 2020-07-19T15:11:56
|
R
|
UTF-8
|
R
| false
| false
| 1,448
|
r
|
plot_filled_assigned_sperm.R
|
# Plot randomly sampled sperm, after assigning stretches to each haplotype
# filled_sperm is the output of other steps in assign_haplotypes
outcome_new_5 <- as.data.frame(filled_sperm)
outcome_new_5 <- outcome_new_5[,sample(1:ncol(outcome_new_5),20, replace=FALSE)]
rownames(outcome_new_5) <- positions
# write filled sperm to its own file for further use
write.csv(outcome_new_5,"/Users/saracarioscia/mccoy-lab/transmission-distortion/raw_data_for_tests/NC17chr6_filledsperm.csv", row.names = TRUE)
write.csv(outcome_new_5,"/Users/saracarioscia/mccoy-lab/transmission-distortion/raw_data_for_tests/NC17chr6_filledsperm_20sample.csv", row.names = TRUE)
# reformat filled_sperm
outcome_new6 <- outcome_new_5 %>%
tibble::rownames_to_column('location') %>%
transform(location = as.numeric(location)) %>%
pivot_longer(-'location', names_to = 'chromosome', values_to = 'haplotype')
# grab the cell IDs of the chromosomes randomly chosen above
inferred_cell_IDs <- unique(outcome_new6$chromosome)
library(ggplot2)
# Plot whole length of chromosome
outcome_new6 %>% ggplot(aes(x=location, y = chromosome)) +
geom_point(aes(color=haplotype)) +
ggtitle("donor 17 chromosome 6") +
theme_minimal()
# plot only the region of the chromosome with the peak
outcome_new6 %>% ggplot(aes(x=location, y = chromosome)) +
geom_point(aes(color=haplotype)) +
xlim(158135000,170612671) +
ggtitle("donor 17 chromosome 6") +
theme_minimal()
|
f2739bda0bd636a1cd50077b21d21afa574c15ba
|
e0b530f1d389c1de35175643d306eb4be64445f4
|
/googlegmailv1.auto/R/gmail_functions.R
|
29f97bf4654eba67e2c3ed81b30f42f54639bf2d
|
[
"MIT"
] |
permissive
|
Phippsy/autoGoogleAPI
|
3ce645c2432b8ace85c51c2eb932e1b064bbd54a
|
d44f004cb60ce52a0c94b978b637479b5c3c9f5e
|
refs/heads/master
| 2021-01-17T09:23:17.926887
| 2017-03-05T17:41:16
| 2017-03-05T17:41:16
| 83,983,685
| 0
| 0
| null | 2017-03-05T16:12:06
| 2017-03-05T16:12:06
| null |
UTF-8
|
R
| false
| false
| 4,495
|
r
|
gmail_functions.R
|
#' Gmail API
#' Access Gmail mailboxes including sending user email.
#'
#' Auto-generated code by googleAuthR::gar_create_api_skeleton
#' at 2016-09-04 00:01:53
#' filename: /Users/mark/dev/R/autoGoogleAPI/googlegmailv1.auto/R/gmail_functions.R
#' api_json: api_json
#'
#' @details
#' Authentication scopes used are:
#' \itemize{
#' \item https://mail.google.com/
#' \item https://www.googleapis.com/auth/gmail.compose
#' \item https://www.googleapis.com/auth/gmail.insert
#' \item https://www.googleapis.com/auth/gmail.labels
#' \item https://www.googleapis.com/auth/gmail.modify
#' \item https://www.googleapis.com/auth/gmail.readonly
#' \item https://www.googleapis.com/auth/gmail.send
#' \item https://www.googleapis.com/auth/gmail.settings.basic
#' \item https://www.googleapis.com/auth/gmail.settings.sharing
#' }
#'
#' @docType package
#' @name gmail_googleAuthR
#'
NULL
## NULL
#' Gets the current user's Gmail profile.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/gmail/api/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://mail.google.com/
#' \item https://www.googleapis.com/auth/gmail.compose
#' \item https://www.googleapis.com/auth/gmail.modify
#' \item https://www.googleapis.com/auth/gmail.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://mail.google.com/, https://www.googleapis.com/auth/gmail.compose, https://www.googleapis.com/auth/gmail.modify, https://www.googleapis.com/auth/gmail.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param userId The user's email address
#' @importFrom googleAuthR gar_api_generator
#' @export
users.getProfile <- function(userId) {
url <- sprintf("https://www.googleapis.com/gmail/v1/users/%s/profile", userId)
# gmail.users.getProfile
f <- gar_api_generator(url, "GET", data_parse_function = function(x) x)
f()
}
#' Stop receiving push notifications for the given user mailbox.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/gmail/api/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://mail.google.com/
#' \item https://www.googleapis.com/auth/gmail.modify
#' \item https://www.googleapis.com/auth/gmail.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://mail.google.com/, https://www.googleapis.com/auth/gmail.modify, https://www.googleapis.com/auth/gmail.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param userId The user's email address
#' @importFrom googleAuthR gar_api_generator
#' @export
users.stop <- function(userId) {
url <- sprintf("https://www.googleapis.com/gmail/v1/users/%s/stop", userId)
# gmail.users.stop
f <- gar_api_generator(url, "POST", data_parse_function = function(x) x)
f()
}
#' Set up or update a push notification watch on the given user mailbox.
#'
#' Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
#'
#' @seealso \href{https://developers.google.com/gmail/api/}{Google Documentation}
#'
#' @details
#' Authentication scopes used by this function are:
#' \itemize{
#' \item https://mail.google.com/
#' \item https://www.googleapis.com/auth/gmail.modify
#' \item https://www.googleapis.com/auth/gmail.readonly
#' }
#'
#' Set \code{options(googleAuthR.scopes.selected = c(https://mail.google.com/, https://www.googleapis.com/auth/gmail.modify, https://www.googleapis.com/auth/gmail.readonly)}
#' Then run \code{googleAuthR::gar_auth()} to authenticate.
#' See \code{\link[googleAuthR]{gar_auth}} for details.
#'
#' @param WatchRequest The \link{WatchRequest} object to pass to this method
#' @param userId The user's email address
#' @importFrom googleAuthR gar_api_generator
#' @family WatchRequest functions
#' @export
users.watch <- function(WatchRequest, userId) {
url <- sprintf("https://www.googleapis.com/gmail/v1/users/%s/watch", userId)
# gmail.users.watch
f <- gar_api_generator(url, "POST", data_parse_function = function(x) x)
stopifnot(inherits(WatchRequest, "gar_WatchRequest"))
f(the_body = WatchRequest)
}
|
dbaf6967c7520d94e5abe245d74cd2a121e967e6
|
86c10d6aa4f2cef5168375b766b78466278b37f1
|
/R/summary_xi.R
|
34127bcf798c1b2c2d241d166be1edd6f2632b4a
|
[] |
no_license
|
pjoao266/bayesbr
|
30ccf36b27cade956d2fe9f6cf0547e6114d5a4f
|
88ebd3837ca83fd9c0ce979b233871ddef0379b4
|
refs/heads/master
| 2023-06-15T18:25:50.092487
| 2021-07-16T19:24:53
| 2021-07-16T19:24:53
| 276,712,638
| 2
| 0
| null | 2021-07-16T19:24:54
| 2020-07-02T17:55:00
|
R
|
UTF-8
|
R
| false
| false
| 1,574
|
r
|
summary_xi.R
|
#'@title Coefficients for xis
#'@aliases summary_xi
#'@name summary_xi
#'@description A function that uses posterior distribution values of the model and calculates the estimates for xi parametrer.
#'@usage summary_xi(x,prob=0.95)
#'@param x an object of the class \emph{bayesbr}, containing the list returned from the \code{\link{bayesbr}} function.
#'@param prob a probability containing the credibility index for the HPD interval for the coefficients of the covariates.
#'@return A list containing the estimates for xi parametrer, this list contains the following items:
#'\describe{
#'\item{table}{a table with the means, medians, standard deviations and the Highest Posterior Density (HPD) Interval,}
#'\item{coeff}{a vector containing the estimated coefficients.}}
#'@seealso \code{\link{summary_delta}},\code{\link{values}},\code{\link{summary.bayesbr}}
summary_xi = function(x,prob=0.95){
xis = x$info$samples$xi
n = x$info$n
warmup = x$info$warmup
iter = x$info$iter
table = NULL
coeff = numeric()
for (i in 1:n) {
aux = paste0('xi[',i,']')
xi = xis[[aux]]
mean_t = round(mean(xi),5)
coeff = c(coeff,mean_t)
median_t = round(median(xi),5)
sd_t = round(sd(xi),5)
xi_mcmc = as.mcmc( c(xi) )
hpd = HPDinterval(xi_mcmc, prob=prob)
vec = c(mean_t,median_t,sd_t,round(hpd[1:2],5))
table = rbind(table,vec)
}
colnames(table) = c("Mean","Median", "Std. Dev.","HPD_inf","HPD_sup")
rownames(table) = paste0("xi ",1:n)
names(coeff) = paste0("xi ",1:n)
list = list(table = table,xis = coeff)
return(list)
}
|
ddef0676f0d34c265a311e3d8adaf234464e7675
|
6a48e67720f48380c37a7c8cae41a6e2bcc56636
|
/factorial.R
|
98ae4ace9e3c184b77421c7f75872e9bb572906f
|
[] |
no_license
|
SHIVAMBASIA/R-Lab-Tasks
|
299778de7c90263cd8095bb8629d363ad87b7b29
|
3129da1d257b15c83b3e7634c24362b1a8cabdbf
|
refs/heads/master
| 2020-08-05T06:16:24.228635
| 2019-10-02T19:46:28
| 2019-10-02T19:46:28
| 212,426,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 105
|
r
|
factorial.R
|
a=readline("enter number")
a=as.integer(a)
i<-1
fact<-1
for(i in 1:a){
fact=fact*i
}
print(fact)
|
ca25728782acc663b6e79a6a305bf3713ed8e8e5
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/kergp/examples/covMat-methods.Rd.R
|
6b775a85c9c8f0abad8a513bc832a4aba258665d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 759
|
r
|
covMat-methods.Rd.R
|
library(kergp)
### Name: covMat-methods
### Title: Covariance Matrix for a Covariance Kernel Object
### Aliases: covMat,covMan-method covMat,covTS-method
### ** Examples
myCov <- covTS(inputs = c("Temp", "Humid", "Press"),
kernel = "k1PowExp",
dep = c(range = "cst", shape = "cst"),
value = c(shape = 1.8, range = 1.1))
n <- 100; X <- matrix(runif(n*3), nrow = n, ncol = 3)
try(C1 <- covMat(myCov, X)) ## bad colnames
colnames(X) <- inputNames(myCov)
C2 <- covMat(myCov, X)
Xnew <- matrix(runif(n * 3), nrow = n, ncol = 3)
colnames(Xnew) <- inputNames(myCov)
C2 <- covMat(myCov, X, Xnew)
## check with the same matrix in 'X' and 'Xnew'
CMM <- covMat(myCov, X, X)
CM <- covMat(myCov, X)
max(abs(CM - CMM))
|
731d7a25774b7825f5cf22a9ff88affba162af99
|
1b53071b8198a4705ca11b63c444f02763eedbd6
|
/Rfun/cdntc.R
|
d9f725fefab952021ccc5fe3458f104461dad945
|
[] |
no_license
|
tadhg-moore/MyLake_R
|
bbd33c02ddea3725b338acfd72865717d610afca
|
3641a82ad882c4965e7f9aeb4aff70160d2edc0e
|
refs/heads/master
| 2020-12-11T10:45:15.619867
| 2020-01-14T11:37:43
| 2020-01-14T11:37:43
| 233,827,050
| 0
| 0
| null | 2020-01-14T11:33:24
| 2020-01-14T11:33:23
| null |
UTF-8
|
R
| false
| false
| 610
|
r
|
cdntc.R
|
cdntc<-function(sp,z,Ta){
#CHECK: simplified - works well for 1-dim input
tol<-.00001 # iteration endpoint
visc <- 1.326e-5*(1 + 6.542e-3*Ta + 8.301e-6*Ta^2 - 4.84e-9*Ta^3)
# remove any sp==0 to prevent division by zero
sp[which(sp==0)]<-.1
# initial guess
ustaro<-array(0,length(sp))
ustarn<-.036*sp
iter<-0
# iterate to find z0 and ustar
while(abs(ustarn-ustaro)>tol){
ustaro<-ustarn
z0<-Charnock_alpha*ustaro^2/g + R_roughness*visc/ustaro
ustarn<-sp*(kappa/log(z/z0))
iter<-iter+1
}
sqrcd<-kappa/log((10)/z0)
cd<-sqrcd^2
u10=ustarn/sqrcd
return(list(cd,u10))
}
|
8c85ace2762864f5aaa4f12a3a5d53bf8f158d1c
|
74bf5107855b1695a1ac7d128c4df7b485e9c429
|
/R/checkSize_remeas.R
|
e4f126102afb93d12e51f5b52cffb123d402108d
|
[
"Apache-2.0"
] |
permissive
|
bcgov/FAIBBase
|
6b3b284e78257006c282c530ab01f897cc33e4c7
|
1c15295015a448e9ebd2aebec469cbf2b58e88b8
|
refs/heads/master
| 2023-08-03T11:05:59.717645
| 2023-07-19T16:57:09
| 2023-07-19T16:57:09
| 195,133,665
| 0
| 0
|
Apache-2.0
| 2020-02-12T17:59:58
| 2019-07-03T22:24:25
|
R
|
UTF-8
|
R
| false
| false
| 7,732
|
r
|
checkSize_remeas.R
|
#' Check the size change of a remeasured subject
#'
#' @description This function is to check the size change for a remeasured subject.
#'
#' @param subjectID character, Specifies subject ID, such as a tree id.
#' @param measTime numeric, Measurement number with bigger value indicates a later measurement.
#' @param size numeric, Measurement of an attribute.
#' @param change character, Change direction either from \code{increase} or \code{decrease}.
#' Default is \code{increase}.
#' @param maxChangeRate numeric, It determines the maximum change rate. If the change rate from previous
#' to current measurement exceeds the maximum change rate, then the pass of current
#' measurement will be flagged as \code{FALSE}. If missing, this term is set
#' as \code{NULL}.
#' @param toleranceMethod character, Method to allow acceptable measurement error in an opposite direction
#' of change argument.
#' It must be either \code{both} (break both absolute and relative tolerance),
#' \code{either} (break either absolute or relative tolerance),
#' \code{absolute} (break absolute tolerance only),
#' or \code{relative} (break relative tolerance only).
#' Default is \code{both}.
#' @param toleranceAbs numeric, Absolute tolerance value (exclusive) to allow measurement error.
#' It must be a a non-negative value.
#' If the change is \code{increase}, the change from current measurement to
#' last measurement will be compared to the negative tolerance value,
#' and vice versa. Default is \code{0} for zero tolerance.
#' @param toleranceRel numeric, Relative tolerance value (exclusive) to allow measurement error.
#' It must be a a non-negative value.
#' If the change is \code{increase}, the change from current measurement to
#' last measurement will be compared to the negative tolerance value,
#' and vice versa. Default is \code{0} for zero tolerance.
#' @return A data table that contains pass information. TRUE indicates pass, while FALSE indicates
#' failure.
#' @importFrom data.table ':=' data.table copy
#' @importFrom fpCompare %<=% %<<% %>=%
#' @author Yong Luo
#' @export
#' @rdname checkSize_remeas
checkSize_remeas <- function(subjectID,
measTime,
size,
change = "increase",
maxChangeRate = NULL,
toleranceMethod = "both",
toleranceAbs = 0,
toleranceRel = 0){
if(!change %in% c("increase", "decrease")){
stop("change must be correctly defined from increase or decrease.")
}
if(!toleranceMethod %in% c("both", "either", "absolute", "relative")){
stop("tolerance must be correctly defined from both, absolute, or relative.")
}
if(toleranceMethod %in% c("both", "either")){
if(toleranceAbs %<<% 0 | toleranceRel %<<% 0){
stop("tolerance value must be defined as a non-negative value.")
}
} else if(toleranceMethod == "absolute"){
if(toleranceAbs %<<% 0){
stop("tolerance value must be defined as a non-negative value.")
}
} else {
if(toleranceRel %<<% 0){
stop("tolerance value must be defined as a non-negative value.")
}
}
if(is.null(maxChangeRate)){
maxChangeRate <- Inf
}
thedata <- data.table(subjectID = subjectID,
measTime = measTime,
size = size)
orgdata <- data.table::copy(thedata)
if(nrow(thedata) != nrow(unique(thedata, by = c("subjectID", "measTime")))){
stop("Multiple sizes were recorded for one subject/measurement, please check the duplicates.")
}
thedata <- thedata[order(subjectID, measTime),]
thedata[, obsid := 1:length(measTime), by = "subjectID"]
thedata[, lastobs := max(obsid), by = "subjectID"]
thedata[, ':='(Fin_size = shift(size, n = 1L, fill = NA, type = "lead"),
Fin_measTime = shift(measTime, n = 1L, fill = NA, type = "lead"))]
thedata <- thedata[obsid != lastobs,]
thedata[, ':='(size_dif_abs = Fin_size - size,
size_dif_rel = (Fin_size - size)/size,
size_changeRate = (Fin_size - size)/(Fin_measTime - measTime))]
thedata[, ':='(pass = TRUE, reason = as.character(NA), memo = as.character(NA))]
thedata[is.na(size_dif_abs) & is.na(Fin_size),
':='(pass = FALSE, reason = "missing size")]
if(change == "increase"){
if(toleranceMethod == "both"){
thedata[size_dif_abs %<=% (-toleranceAbs) &
size_dif_rel %<=% (-toleranceRel),
':='(pass = FALSE, reason = "break both tolerance",
memo = paste0("tol_abs: ", size_dif_abs,
". tol_rel: ", round(size_dif_rel, 2)))]
} else if(toleranceMethod == "either"){
thedata[size_dif_abs %<=% (-toleranceAbs) |
size_dif_rel %<=% (-toleranceRel),
':='(pass = FALSE, reason = "break either tolerance",
memo = paste0("tol_abs: ", size_dif_abs,
". tol_rel: ", round(size_dif_rel, 2)))]
} else if(toleranceMethod == "absolute"){
thedata[size_dif_abs %<=% (-toleranceAbs),
':='(pass = FALSE, reason = "break absolute tolerance",
memo = paste0("tol_abs: ", size_dif_abs))]
} else {
thedata[size_dif_rel %<=% (-toleranceRel),
':='(pass = FALSE, reason = "break relative tolerance",
memo = paste0("tol_rel: ", round(size_dif_rel, 2)))]
}
thedata[size_changeRate %>>% maxChangeRate,
':='(pass = FALSE, reason = "abnormal change rate",
memo = paste0("change rate: ", round(size_changeRate, 2)))]
} else if(change == "decrease"){
if(toleranceMethod == "both"){
thedata[size_dif_abs %>=% (toleranceAbs) &
size_dif_rel %>=% (toleranceRel),
':='(pass = FALSE, reason = "break both tolerance",
memo = paste0("tol_abs: ", size_dif_abs,
". tol_rel: ", round(size_dif_rel, 2)))]
} else if(toleranceMethod == "either"){
thedata[size_dif_abs %>=% (toleranceAbs) |
size_dif_rel %>=% (toleranceRel),
':='(pass = FALSE, reason = "break either tolerance",
memo = paste0("tol_abs: ", size_dif_abs,
". tol_rel: ", round(size_dif_rel, 2)))]
} else if(toleranceMethod == "absolute"){
thedata[size_dif_abs %>=% (toleranceAbs),
':='(pass = FALSE, reason = "break absolute tolerance",
memo = paste0("tol_abs: ", size_dif_abs))]
} else {
thedata[size_dif_rel %>=% (toleranceRel),
':='(pass = FALSE, reason = "break relative tolerance",
memo = paste0("tol_rel: ", size_dif_rel))]
}
thedata[size_changeRate %<<% -maxChangeRate,
':='(pass = FALSE, reason = "abnormal change rate",
memo = paste0("change rate: ", round(size_changeRate, 2)))]
}
thedata[, measTime := Fin_measTime]
orgdata <- merge(orgdata, thedata[,.(subjectID, measTime, pass, reason, memo)],
by = c("subjectID", "measTime"),
all.x = TRUE)
return(orgdata)
}
|
341caa1dc71ffba3ffa97d84fca7e55169db7daa
|
2e0b18721959cf04addbc1b9f07188b5ce352ebc
|
/man/module_metadata.Rd
|
64482edbaeb211855d85dd470913f62c07251873
|
[
"MIT"
] |
permissive
|
aclemen1/modulr
|
cde8ed13e708d8207362006c0dc38f4cc81edb65
|
0162dde8a7281380b82d2446841520f3299f87df
|
refs/heads/master
| 2023-05-25T19:41:37.659195
| 2023-05-18T10:42:45
| 2023-05-18T10:43:37
| 41,587,794
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,405
|
rd
|
module_metadata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/modulr.R
\name{module_metadata}
\alias{module_metadata}
\alias{.__name__}
\alias{.__file__}
\alias{.__version__}
\alias{.__namespace__}
\alias{.__initials__}
\alias{.__final__}
\alias{.__path__}
\title{Module Metadata.}
\usage{
.__name__
.__version__
.__namespace__
.__initials__
.__final__
.__file__
.__path__
}
\value{
The name of the current module scope.
}
\description{
Access module metadata.
}
\details{
When modulr loads a module file, it assigns the module's name to
\code{.__name__}. A module file can discover whether or not it is running
in the main scope by checking if \code{.__name__} has value
\code{"__main__"}. This allows a common idiom for conditionally executing
code in a module file when it is run as a script (see example). It is
mainly useful when one wants to write a module file which can be executed
directly as a script and alternatively declared as a dependency and used by
other modules.
}
\section{Warning}{
Do not assign to any metadata in the workspace, because
this will always mask the object of the same name in \code{package:modulr}.
}
\examples{
# script.R
"script" \%provides\% { cat("Hello World\\n"); print(.__name__) }
if (.__name__ == "main") make()
# EOF
\dontrun{source("script.R")}
make("script")
}
\seealso{
\code{\link{define}} and \code{\link{make}}.
}
|
3fdb73f3e1b471330a1465f632ebb03c04c63523
|
bb1fc4854812f2efe4931ca3c0d791317309e425
|
/scripts/older_scripts/getFipsForPoints.R
|
4f1dc451c599dfd382ea66ec5bff57fdec1bcff2
|
[
"Apache-2.0"
] |
permissive
|
dlab-berkeley/Geocoding-in-R
|
890e491d84808e29d07897508dc44f2bd9a3f646
|
40a0369f3b29a5874394ffafd793edc7012144ea
|
refs/heads/master
| 2023-03-06T07:54:41.997542
| 2021-02-18T17:42:22
| 2021-02-18T17:42:22
| 47,520,653
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,466
|
r
|
getFipsForPoints.R
|
## ############################################################################
#
# Joining Census FIPS codes to points via point in polygon overlay
#
# Author: Patty Frontiera pfrontiera@berkeley.edu
# Date: 2014_04_15
# Last updated: 2014_04_17
#
# Purpose:
## This script takes as input a point file and a file of Census Tiger data
## performs a spatial overlay of the two
## and adds the Census FIPS code to the point file attribute table
## Then writes the output to CSV
##
# #ASSUMPTION: input point data has fields "latitude" and "longitude"
##
## This same practice can be done much faster in ArcGIS or PostGIS
## but this method is fine for < 1 million records
##
## This approach will work with on an offline server provided
## the data, R code and R libraries reside on the server.
##
## THIS IS SAMPLE CODE - you will need to make changes!
## ############################################################################
# clean start - remove any objects in the enviroment
rm(list = ls())
#LOAD LIBS
require(rgdal)
require(R.utils)
# ########################################################################################################
# USER SUPPLIED VALUES
# ########################################################################################################
working_dir <- "/Users/pattyf/geocoding/temp"
point_file <- "/Users/pattyf/geocoding/test_address_points.csv"
# You can download some sample point data from this url:
## https://gist.githubusercontent.com/pattyf/9091aca4d536e983beea/raw/65b4ca99a215b65cdd7c2406dfbac9749eb897f6/test_address_points
point_file_delimiter <- "|" # I prefer this delimiter to a comma as address components often contain commas
point_file_crs <- "4326" # These points use geographic coordinates with the WGS84 datum
# WGS 84 - coordinate reference system (crs) used by most GPS / Google maps etc
## AKA - spatial reference system or map projection or coordinate system
## See spatialreference.org - http://spatialreference.org/ref/epsg/4326/
# HEY: IMPORTANT
#ASSUMPTION: input point data has fields "latitude" and "longitude"
## Census block data - must point to file on your computer
#census_file <- '/Users/pattyf/Gisdata/Census/tabblock2010_06_pophu/tabblock2010_06_pophu.shp'
## Census blockgroup data - must point to file on your computer
#census_file <- '/Users/pattyf/Gisdata/Census/tl_2014_06_tract/tl_2014_06_bg.shp'
## Census tract data - must point to file on your computer
census_file <- '/Users/pattyf/Gisdata/Census/tl_2014_06_tract/tl_2014_06_tract.shp'
# CA block-level census data were downloaded from the census website, url below:
## http://www2.census.gov/geo/tiger/TIGER2014/TABBLOCK/tl_2014_06_tabblock10.zip
## Could automate the download but adds unneeded complexity to this script
## THIS IS BIG FILE = 415MB or so
## You can download a smaller file by downloading larger census geographies
### eg tracts level data
## http://www2.census.gov/geo/tiger/TIGER2014/TRACT/tl_2014_06_tract.zip
### or block group level data
### http://www2.census.gov/geo/tiger/TIGER2014/BG/tl_2014_06_bg.zip
## However, if you intersect points with the block level data
## you get a FIPS CODE that includes the state, county, tract, blockgroup and block id
## You need to change this file if not doing CA
## or if you want to change the input remote census data file, eg to smaller file like tracts
## See http://www2.census.gov for details
## Note there are several vintages (year versions) for each census products. For
## tracts, block groups, and blocks these don't change between census - there are only improvements/corrections
## If you are interested in comparisons over time (eg 2000 - 2010 census) get the harmonized data from NHGIS
#census_layer <- 'tabblock2010_06_pophu' # The layer is the name of the feature layer within the file
# For shapefiles it is the same as the prefix of the shapefile
#census_layer <- 'tl_2014_06_bg' #census blockgroup level data
census_layer <- 'tl_2014_06_tract' #census tract data
census_crs <- '4269' # US Census Tigerline data use geographic coordintes with the NAD83 datum
# The EPGS code for which is 4269
# See http://spatialreference.org/ref/sr-org/4269/ for details.
census_geograhpy_type = "tracts" # one of tracts, blocks, or blockgroups
output_crs <- '4326' #WGS84
## USE '3310' for CA Teale Albers - See http://spatialreference.org/ref/epsg/3310/
## Used for CA state-wide data processing (metric calculations)
## If the output CRS does not match the census CRS we will
## transform the data before saving to new file as last step
out_csv_file <-"point_data_withfips.csv" # The name of the output csv file
# Will be written to working_dir if full path not specified
out_shapefile_prefix <- "point_data_withfips"
out_shapefile_directory <- "." # The period indicates the current working dir.
# You can specify another directory as needed
debug <- 1 # We are just testing this script if debug is 1. If running for real, set this to 0
# When debug is 1 we only read in first 50 records from point file
# ########################################################################################################
# Load needed libraries
library(sp)
library(rgdal)
library("R.utils") # for file utils, like zip and unzipping files
# Set working directory for input and output where full path not given
setwd(working_dir)
# Read in point data
## In this exampe we have geocoded addresses - 355,054 addresses all in alameda county (would prefer a state sample)
## Format of these address data points in input file:
## Inaddress|street_address|street_name|latitude|country_code|fips_county|country_name|country_code3|longitude|region|locality|street_number|confidence|
if (debug == 1) {
# When debug is 1 only read in first 50 records from point file
point_data<-read.table(point_file,sep=point_file_delimiter, header=T, stringsAsFactors=,nrow=50)
} else {
point_data<-read.table(point_file,sep=point_file_delimiter, header=T, stringsAsFactors=F)
}
# Convert data frame to a spatialpoints data frame object
coordinates(point_data) =~ longitude+latitude
# Specify the CRS of the input point data
proj4string(point_data) = CRS(paste0("+init=epsg:",point_file_crs))
# Read the census block data into R
census_polys <- readOGR(census_file,census_layer)
# Specify the CRS of the input census data
proj4string(census_polys) = CRS(paste0("+init=epsg:",census_crs)) # define the projection
# CRS of both layers must match!
## If they do not then the point data should be transformed
## as it is much easier operation on points than polygons
if (point_file_crs != census_crs) {
point_data <- spTransform(point_data,CRS(paste0("+init=epsg:",census_crs)))
}
#
# Spatial Intersection
## Get fips code for each address point
## The block key from this dataset is col 5, which has the name BLOCKID10 (census 2010 block id)
ptm <- proc.time() # Time this operation to get a sense of how it will scale to more points
if (census_geograhpy_type == "blocks") {
point_data$fips_code <- over(point_data,census_polys)$BLOCKID10
}
if (census_geograhpy_type == "tracts") {
point_data$fips_code <- over(point_data,census_polys)$GEOID
}
if (census_geograhpy_type == "blockgroups") {
point_data$fips_code <- over(point_data,census_polys)$GEOID
}
print(proc.time() - ptm)
# ###############################################
# Notes on output from testing
# ###############################################
## It took 18 minutes to intersect ~350,000 address points
## with census block-level data
##
## user system elapsed
## 1049.953 18.078 1072.092
##
# How long does this operation take in ArcGIS?
## on our geocoding server it took only
## 2 minutes using spatial intersect operation.
# ###############################################
# ###############################################
# Transform the data before saving if needed
# ###############################################
if (output_crs != census_crs) {
point_data <- spTransform(point_data,CRS(paste0("+init=epsg:",output_crs)))
}
# ###############################################
# Save output to local files
# ###############################################
#
## as csv
write.csv(point_data@data,out_csv_file,row.names=FALSE)
#
## as shapefile
### note that field/col names longer than 8 characters will be truncated!
writeOGR(point_data, out_shapefile_directory, "out_shapefile_prefix", driver="ESRI Shapefile",overwrite_layer=TRUE)
|
4faedc2cff646058b07fd4259cb2bd25ff25522e
|
8ad686537311f3432667d8d96502981d02245ae0
|
/helper/identifiers.R
|
3116168f840f11f651ae29277a2929cc58417253
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
HannesOberreiter/inat-austria-city-challenge-2021
|
dac033226d4f392e66e216b4cbe8a879d2e1ac5d
|
1c20e7e86a6eebb7b64158ad63028334b0f31a43
|
refs/heads/master
| 2023-08-28T12:58:02.255817
| 2021-10-23T10:01:59
| 2021-10-23T10:01:59
| 418,587,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,340
|
r
|
identifiers.R
|
# Add Identifier too our dataset from API -----
library(here)
library(tidyverse)
library(httr)
library(jsonlite)
library(glue)
# Import Data ---------
data <- readxl::read_xlsx(
"data/16-10-2021-raw.xlsx",
col_types = "text",
sheet = "edited"
) %>%
mutate(
latitude = as.numeric(latitude),
longitude = as.numeric(longitude)
)
data %>% glimpse()
i <- 1
ix <- 0
testoffset <- 0
ident_list <- list()
while (i < nrow(data) - testoffset) {
print(paste("start", i))
ix <- ifelse((i + 200) < nrow(data) - testoffset, ix + 200, nrow(data) - testoffset)
print(paste("end", ix))
# fetching
id <- data$id[i:ix]
url <- glue("https://api.inaturalist.org/v1/observations?id={paste(id, collapse =',')}&order=desc&order_by=created_at")
fetch_url <- fromJSON(url)
result <- bind_rows(fetch_url$results)
result <- bind_rows(fetch_url$results)
ids <- as.character(result$id)
ident <- fetch_url$results$identifications
for (j in 1:length(ids)) {
ident_list[[ids[[j]]]] <- tibble(
id = ids[[j]],
obs_id = paste(ident[[j]]$user$id, collapse = ","),
obs_name = paste(ident[[j]]$user$login, collapse = ",")
)
}
i <- ix
}
res <- bind_rows(ident_list) # %>%
# mutate(id = as.integer(id)) %>%
# arrange(id) %>%
# glimpse()
res %>% write_excel_csv2(file = glue("{here()}/data/identifier.csv"))
|
cf9e66c90483f964bf89e87e0f299f465a157b56
|
22c625269e9878e3d028b56398d8a43c90b7bc16
|
/global.r
|
822479bd219f3d45562022a8dcedf6c4b459a74f
|
[] |
no_license
|
kblocksom/eForms_FishUpdates_NRSA
|
6c3b5efe381c78040a17b1d85224da3d175440d2
|
ce9f5c5fa166dbf965d3c36d3423027505643c49
|
refs/heads/master
| 2020-08-27T09:57:39.160458
| 2019-12-26T19:19:33
| 2019-12-26T19:21:29
| 217,325,514
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 619
|
r
|
global.r
|
list.of.packages <- c("shiny","Hmisc","dplyr","shinyjs","shinythemes","shinyBS","RJSONIO","DT","stringr","data.table","jsonlite","shinyalert","gtools")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
lapply(list.of.packages,function(x){library(x,character.only=TRUE)})
source('functions/eFormsParseJSON_basic.r')
source('functions/eFormsParseJSONtext.r')
source('functions/eFormsOrganizeData.r')
source('functions/parseFish.R')
fishTaxa <- readRDS("data/fishtaxa.rds") %>%
mutate(FINAL_NAME=as.factor(FINAL_NAME))
|
78a126ab6dc0f57cebddbf9b6a4c595aacf6d0a9
|
07a74984bf59ce4486e1bcaefafb8ce692b50d5a
|
/data-raw/geojson.R
|
90c2cd3935febd1914291932bb93630e7be9a49a
|
[] |
no_license
|
SymbolixAU/mapdeck
|
c3bc3a61b8d8ade69b9b67fa69a00f9294281630
|
6138c6845e37ab3479e4ff65d9b0fff29e20f070
|
refs/heads/master
| 2023-09-03T22:34:43.418728
| 2023-08-24T22:14:59
| 2023-08-24T22:14:59
| 141,350,341
| 344
| 50
| null | 2023-08-09T22:22:59
| 2018-07-17T22:06:34
|
HTML
|
UTF-8
|
R
| false
| false
| 2,869
|
r
|
geojson.R
|
library(sf)
# sf <- geojsonsf::geojson_sf(googleway::geo_melbourne)
# sf <- sf[, c( 'geometry')]
# sf$id <- 1:nrow(sf)
#
# #geojson <- geojsonsf::sf_geojson(sf)
# geojson <- geojsonsf::sf_geojson(sf)
#
# attr(geojson, 'class') <- 'json'
# usethis::use_data(geojson, overwrite = T)
library(data.table)
dt_shapes <- fread("~/Downloads/gtfs (3)/shapes.txt")
dt_trips <- fread("~/Downloads/gtfs (3)/trips.txt")
dt_routes <- fread("~/Downloads/gtfs (3)/routes.txt")
dt_stops <- fread("~/Downloads/gtfs (3)/stops.txt")
dt_stoptimes <- fread("~/Downloads/gtfs (3)/stop_times.txt")
## select one route_long_name
dt_routes <- dt_routes[ dt_routes[, .I[1], by = .(route_long_name) ]$V1 ]
dt <- unique(dt_trips[, .(route_id, trip_id, shape_id, trip_headsign, direction_id)])[
unique(dt_routes[route_type %in% c(0), .(route_id, route_long_name, route_type)])
, on = "route_id"
, nomatch = 0
]
## pick just one trip
dt <- dt[ dt[, .I[1], by = .(route_id)]$V1 ]
dt <- dt[
unique(dt_shapes[, .(shape_id, lon = shape_pt_lon, lat = shape_pt_lat, sequence = shape_pt_sequence)])
, on = "shape_id"
, nomatch =0
, allow.cartesian = T
]
rm(dt_shapes, dt_trips, dt_routes)
## grab stops
dt_stops <- unique(dt[, .(trip_id)])[
dt_stoptimes[, .(stop_id, trip_id)]
, on = "trip_id"
, nomatch = 0
][
dt_stops
, on = "stop_id"
, nomatch = 0
]
dt_stops <- dt_stops[ dt_stops[, .I[1], by = stop_id]$V1 ]
sf_stops <- dt_stops[
, {
geometry <- sf::st_point(x = c(stop_lon, stop_lat))
geometry <- sf::st_sfc(geometry)
geometry <- sf::st_sf(geometry = geometry)
}
, by = stop_id
]
sf_stops <- sf_stops[, 'geometry']
sf_stops <- sf::st_sf(sf_stops)
setorder(dt, trip_id, route_id, shape_id, direction_id, sequence)
sf <- dt[
, {
geometry <- sf::st_linestring(x = matrix(c(lon, lat), ncol = 2))
geometry <- sf::st_sfc(geometry)
geometry <- sf::st_sf(geometry = geometry)
}
, by = .(route_id, trip_id, shape_id, direction_id)
]
sf <- sf::st_as_sf(sf[, 'geometry'])
sf$id <- 1:nrow(sf)
sf_stops$id <- 1:nrow(sf_stops)
sf_geo <- geojsonsf::geojson_sf(geojson)
sf::st_crs(sf) <- sf::st_crs(sf_geo)
sf::st_crs(sf_stops) <- sf::st_crs(sf_geo)
sf_bind <- rbind(sf, sf_geo, sf_stops)
geojson <- geojsonsf::sf_geojson(sf_bind)
attr(geojson, 'class') <- 'json'
mapdeck(
token = key
, style = "mapbox://styles/mapbox/dark-v9"
, pitch = 35
) %>%
mapdeck::add_geojson(
data = geojson
, layer_id = "geojson"
)
usethis::use_data(geojson, overwrite = T)
sf <- geojsonsf::geojson_sf(geojson)
sf$fillColor <- sample(colourvalues::colour_values(1:5, palette = "viridis"), size = nrow(sf), replace = T)
sf$line_colour <- sample(colourvalues::colour_values(1:5, palette = "plasma"), size = nrow(sf), replace = T)
sf$random <- rnorm(n = nrow(sf))
geojson <- geojsonsf::sf_geojson(sf)
# attr(geojson, 'class') <- 'json'
usethis::use_data(geojson, overwrite = T)
|
d3725f6e38ee197989530e5930539d711a0873f5
|
b8cfc932bd048d70e2fb4b6fe1e15d285e4771a5
|
/man/StatisticsPhD.Rd
|
439e4e91d93c1f61eb277325820990c719ac11b2
|
[] |
no_license
|
rpruim/Lock5Data
|
3df5b5d1c1fc6e6dac9bc2453c16c8d18465bcf8
|
9e5b33f8800dc48c09b1e0c85dc2eefd3f967fe4
|
refs/heads/master
| 2016-09-10T15:25:39.616331
| 2014-03-04T14:50:16
| 2014-03-04T14:50:16
| 12,008,109
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,086
|
rd
|
StatisticsPhD.Rd
|
% Generated by roxygen2 (4.0.0): do not edit by hand
\docType{data}
\name{StatisticsPhD}
\alias{StatisticsPhD}
\title{Statistics PhD Programs}
\format{A data frame with 82 observations on the following 3 variables.
\itemize{
\item{\code{University}} {Name of the school}
\item{\code{Department}} {Type of department: \code{Biostatistics} or
\code{Statistics}}
\item{\code{FTGradEnrollment}} {Full time graduate
student enrollment} }}
\source{
The full list of the 82 Group IV departments was obtained at
\url{http://www.ams.org/profession/data/annual-survey/group_iv}. Data on
enrollment were obtained primarily from Assistantships and Graduate
Fellowships in the Mathematical Sciences, 2009, American Mathematical
Society.
}
\description{
Enrollments in Statistics PhD Programs
}
\details{
Graduate student enrollments in Statistics and Biostatistics departments in
2009. The list does not include combined departments of mathematics and
statistics and does not include departments that did not reply to the AMS
survey.
}
\examples{
data(StatisticsPhD)
}
\keyword{datasets}
|
7588772f8ee4a2184faab53165509f674b49eb5a
|
0226b6b2bca3405586901a9b8a0d2768a7c4a911
|
/R/stan_group_fit.R
|
35acafc6e17bf5df5bea496c499a7c0e367dfc3a
|
[] |
no_license
|
bgoodri/breathteststan
|
20d1d6787c084722e6662333a44bf57d6cf6bbea
|
6487bcc776c1360a5087cb78501f2ccd801c8578
|
refs/heads/master
| 2020-04-01T12:52:29.554806
| 2018-10-16T05:43:03
| 2018-10-16T05:43:03
| 153,227,629
| 0
| 0
| null | 2018-10-16T05:29:35
| 2018-10-16T05:29:34
| null |
UTF-8
|
R
| false
| false
| 6,511
|
r
|
stan_group_fit.R
|
#' @title Bayesian Stan fit to 13C Breath Data in Multiple Groups
#' @description Fits exponential beta curves to 13C breath test series
#' data using Bayesian Stan methods, by assuming fixed between group effects.
#' This model is overly parsiomonious. Do not use it
#' unless you check the results carefully and understand why
#' fits can be very bad.
#'
#' @param data Data frame or tibble as created by \code{\link[breathtestcore]{cleanup_data}},
#' with mandatory columns \code{patient_id, group, minute} and \code{pdr}.
#' It is recommended to run all data through \code{\link[breathtestcore]{cleanup_data}} which
#' will insert dummy columns for \code{patient_id} and \code{minute} if the
#' data are distinct, and report an error if not. Since the Bayesian method
#' is stabilized by priors, it is possible to fit single curves.
#' @param dose Dose of acetate or octanoate. Currently, only one common dose
#' for all records is supported.
#' @param sample_minutes If mean sampling interval is < sampleMinutes, data are subsampled
#' using a spline algorithm
#' @param student_t_df When student_t_df < 10, the student distribution is used to
#' model the residuals. Recommended values to model typical outliers are from 3 to 6.
#' When student_t_df >= 10, the normal distribution is used.
#' @param chains Number of chains for Stan
#' @param iter Number of iterations for each Stan chain
#' @param model Name of model; use \code{names(stanmodels)} for other models.
#'
#'
#' @return A list of classes "breathteststangroupfit", "breathteststanfit" and "breathtestfit"
#' with elements
#' \itemize{
#' \item {\code{coef} Estimated parameters as data frame in a key-value format with
#' columns \code{patient_id, group, parameter, method} and \code{value}.
#' Has an attribute AIC.}
#' \item {\code{data} The effectively analyzed data. If density of points
#' is too high, e.g. with BreathId devices, data are subsampled before fitting.}
#' \item {\code{stan_fit} The Stan fit for use with \code{shinystan::launch_shiny}
#' or extraction of chains. }
#' }
#' @seealso Base methods \code{coef, plot, print}; methods from package
#' \code{broom: tidy, augment}.
#' @examples
#' \donttest{
#' library(breathtestcore)
#' library(dplyr)
#' data("usz_13c", package = "breathtestcore")
#' data = usz_13c %>%
#' dplyr::filter( patient_id %in%
#' c("norm_001", "norm_002", "norm_003", "norm_004",
#' "pat_001", "pat_002","pat_003")) %>%
#' breathtestcore::cleanup_data()
#' fit = stan_group_fit(data, chains = 1, iter = 100)
#' plot(fit) # calls plot.breathtestfit
#' coef(fit)
#' }
#'
#' @export
#'
stan_group_fit = function(data, dose = 100, sample_minutes = 15, student_t_df = 10,
chains = 2, iter = 1000, model = "breath_test_group_1") {
if (length(unique(data$group)) < 2)
stop("Use stan_fit if there is only one group")
# Avoid notes on CRAN
value = patient_id = group = minute = pdr = NULL
stat = estimate = . = k = key = m = q_975 = NULL
cm = comment(data)
data = breathtestcore::subsample_data(data, sample_minutes) %>%
mutate(
pat_i = as.integer(as.factor(patient_id)),
group_i = as.integer(as.factor(group))
)
n_pat = max(data$pat_i)
n_group = max(data$group_i)
data_list = list(
n = nrow(data),
n_pat = n_pat,
n_group = n_group,
student_t_df = 5,
dose = 100,
pat_i = data$pat_i,
group_i = data$group_i,
minute = data$minute,
pdr = data$pdr)
# Note: as.array is required to handle the case of n_pat = 1
init = rep(list(list(
m_pat_raw = as.array(rnorm(n_pat,0,2)),
m_group = rnorm(n_group,0,.1),
sigma_m_pat = abs(rnorm(1, 10,1)),
mu_m = rnorm(1, 40, 2),
k_pat_raw = as.array(rnorm(n_pat,0,.0001)),
k_group = rnorm(n_group,0,.0001),
sigma_k_pat = abs(rnorm(1, 0,.0001)),
mu_k = rnorm(1, 40, 3),
beta_pat_raw = as.array(rnorm(n_pat,0,.1)),
beta_group = rnorm(n_group,0,.1),
sigma_beta_pat = abs(rnorm(1, 0, .1)),
mu_beta = rnorm(1, 2,0.1),
sigma = abs(rnorm(1, 10, 1))
)),chains)
if (!exists("stanmodels"))
stop("stanmodels not found")
mod = stanmodels[[model]]
if (is.null(mod))
stop("stanmodels ", model, " not found")
options(mc.cores = min(chains, max(parallel::detectCores()/2, 1)))
capture.output({fit = suppressWarnings(
rstan::sampling(mod, data = data_list, init = init,
control = list(adapt_delta = 0.9),
iter = iter, chains = chains)
)})
# Local extractor function
ex = function(par, i = NA) {
if (is.na(i)) {
as.vector(rstan::extract(fit, permuted = TRUE, pars = par)[[par]])
} else
{
rstan::extract(fit, permuted = TRUE, pars = par)[[par]][,i]
}
}
coef_chain = data %>%
select(-minute, -pdr) %>%
distinct() %>%
rowwise() %>%
do (
{
data_frame(
patient_id = .$patient_id,
group = .$group,
m = ex("mu_m") + ex("m_group",.$group_i) + ex("m_pat", .$pat_i),
k = ex("mu_k") + ex("k_group",.$group_i) + ex("k_pat", .$pat_i),
beta = ex("mu_beta") + ex("beta_group",.$group_i) + ex("beta_pat", .$pat_i)
)
}
) %>%
ungroup() %>%
mutate(
t50_maes_ghoos = breathtestcore::t50_maes_ghoos(.),
tlag_maes_ghoos = breathtestcore::tlag_maes_ghoos(.),
t50_maes_ghoos_scintigraphy = breathtestcore::t50_maes_ghoos_scintigraphy(.),
t50_bluck_coward = breathtestcore::t50_bluck_coward(.),
tlag_bluck_coward = breathtestcore::tlag_bluck_coward(.)
) %>%
rename(m_exp_beta = m, k_exp_beta = k, beta_exp_beta = beta) %>%
tidyr::gather(key, value, -patient_id, -group) %>%
na.omit() %>%
ungroup()
cf = coef_chain %>%
group_by(patient_id, group, key) %>%
summarize(
estimate = mean(value),
q_0275 = quantile(value, 0.0275),
q_25 = quantile(value, 0.25),
q_75 = quantile(value, 0.75),
q_975 = quantile(value, 0.975)
) %>%
ungroup() %>%
mutate(
parameter = stringr::str_match(key, "k|m|beta|t50|tlag")[,1],
method = stringr::str_match(key, "maes_ghoos_scintigraphy|maes_ghoos|bluck_coward|exp_beta")[,1]
) %>%
select(-key) %>%
tidyr::gather(stat, value, estimate:q_975)
ret = list(coef = cf, data = data, stan_fit = fit, coef_chain = coef_chain)
class(ret) = c("breathteststangroupfit", "breathteststanfit", "breathtestfit")
comment(ret) = cm # Recover comment
ret
}
|
8f15724fa3f564d999ec98bbe9bd82472cd54d0e
|
b75bc9ec3507d55bfa8fc534958027948d8c342f
|
/tests/testthat/test-get_dictionary.R
|
d5276e176e3021c56be3b9aefbdbf8f19d76d187
|
[
"MIT"
] |
permissive
|
CDU-data-science-team/experienceAnalysis
|
456185ac3ab7570ba10ed50d35e34873cd101e2c
|
e54ff773f48436876abd638d0775725bf2ef00b3
|
refs/heads/main
| 2023-07-16T13:26:39.847924
| 2021-08-23T13:22:19
| 2021-08-23T13:22:19
| 346,350,668
| 0
| 0
|
NOASSERTION
| 2021-07-28T22:10:33
| 2021-03-10T12:35:11
|
R
|
UTF-8
|
R
| false
| false
| 219
|
r
|
test-get_dictionary.R
|
test_that("Dictionary retrieved", {
df1 <- get_dictionary("nrc")
df2 <- get_dictionary("afinn")
df3 <- get_dictionary("bing")
expect_gt(nrow(df1), 0)
expect_gt(nrow(df2), 0)
expect_gt(nrow(df3), 0)
})
|
dfbaf463c29287ec439d2c0517315b56c837fe85
|
62c7b65acf01ca4ceb49f3d22a4349fa6d62ca82
|
/R/save-flow.r
|
181644ca7dddefbd42153e6f4986d09ec411faf8
|
[] |
no_license
|
hrbrmstr/flowdockr
|
8114e11f1f2f67ece7b0c85f065e1451c7855aa9
|
9da2bd1b47ca060fe9cfff73102192467083fae4
|
refs/heads/master
| 2021-01-10T10:06:35.621486
| 2016-02-18T15:38:54
| 2016-02-18T15:38:54
| 51,620,601
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,106
|
r
|
save-flow.r
|
#' Save R objects to an RData file on Flowdock
#'
#' @param ... objects to store in the R data file
#' @param file filename (without extension) to use
#' @param flow parameterized flow name (i.e. "\code{this-flow}")
#' @param tags vector of tags for the flow
#' @param flowdock_api_key used to authorize you to Flowdoc. You should store this
#' in \code{.Renviron} or some other moderately secure place. You can override
#' the pick from the environment by passing it in here.
#' @return parsed call result (invisibly)
#' @export
save_flow <- function(...,
file="flowr",
flow=Sys.getenv("FLOWDOCK_DEFAULT_FLOW", NULL),
tags="",
flowdock_api_key=Sys.getenv("FLOWDOCK_PAT")) {
loc <- Sys.getlocale('LC_CTYPE')
Sys.setlocale('LC_CTYPE','C')
on.exit(Sys.setlocale("LC_CTYPE", loc))
ftmp <- tempfile(file, fileext=".rda")
save(..., file=ftmp)
on.exit(unlink(ftmp), add=TRUE)
res <- flow_file(ftmp, flow, flowdock_api_key=flowdock_api_key)
stop_for_status()
unlink(ftmp)
invisible(res)
}
|
a33d47a254293ac3b65226ec5ca76e816dd9b909
|
5ea3ebf35a2059f50690e6eaed1918d5a6bc90f3
|
/man/long2mids.Rd
|
c22ce7c11bb1f65369dbd88df5ca6a9a54113cd2
|
[] |
no_license
|
cksun/mice
|
59ff1291ccaa305b882054da308a11bae4a4bdba
|
2bcb306418db3a4469388b659c934c8f68f991a3
|
refs/heads/master
| 2020-12-11T09:23:06.251006
| 2014-04-07T18:38:37
| 2014-04-07T18:38:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
rd
|
long2mids.Rd
|
\name{long2mids}
\alias{long2mids}
\title{Conversion of a imputed data set (long form) to a \code{mids} object}
\usage{
long2mids(x)
}
\arguments{
\item{x}{}
}
\value{
An object of class \code{mids}
}
\description{
Conversion of a imputed data set (long form) to a
\code{mids} object
}
\author{
Stef van Buuren, 2012
}
\keyword{internal}
|
0f8864a4590900aed6a2b73bf3f313ed00e89895
|
03af0227dc9d978acdbd95a3437486ab10249704
|
/dump_versions.R
|
18ee998d1ca0c3d9621f24445df7d49ec204a9c2
|
[] |
no_license
|
pieterjongsma/circular-rjmcmc
|
dd68efaa5938410dfa442c3a91c484c12dcf156a
|
66c9d6a2f8783f6f40478af46cd4171d75ff3437
|
refs/heads/master
| 2021-08-23T15:31:11.745521
| 2017-12-05T12:46:25
| 2017-12-05T12:46:25
| 112,741,111
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 157
|
r
|
dump_versions.R
|
#! /usr/local/bin/Rscript --vanilla
library('gtools') # For defmacro
library('circular')
library('xtable')
library('Rcpp')
library('stringr')
sessionInfo()
|
6a537546037e1644f91308ea3ec40a097230e3c1
|
e0810ebeed1b0e5488443943115de7bfe83e1228
|
/SIS/data_plotting/ode_comparsion.R
|
06706e9b6d589059488801392d490b0c1832dd6c
|
[] |
no_license
|
ksuchak1990/network-gillespie
|
eb9439bc9bfac7ad32f38d1f1f2aad50b69a392d
|
38b4801b533ee1e8afd01970b58a7c02289d8954
|
refs/heads/master
| 2021-01-20T20:48:45.385406
| 2018-05-23T15:28:46
| 2018-05-23T15:28:46
| 62,816,946
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,202
|
r
|
ode_comparsion.R
|
### Simulation data
R <- 2000
# Import data, make workable dataframe
mydata <- as.data.frame(read.table("../output/output.txt"))
colnames(mydata) <- c("t", "state")
mydata$state <- strtoi(mydata$state,base = 2)
# Sort data by time
data_sorted <- mydata[order(mydata$t),]
# Find unique times
times_unique <- unique(data_sorted$t)
# Make data frame into whcih we can output our data
df <- data.frame(matrix(nrow = length(times_unique),ncol = 9))
colnames(df) <- c("t", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7")
df$t <- times_unique
# Sorted data for which t == 0
myrows <- which(mydata$t == df[1, 1])
states_cur <- mydata[myrows, 2]
# Populate first row
for (i in 0:7) {
df[1, i+2] <- length(which(states_cur == i))
}
# Populate subsequent rows, accounting for changes
for (i in 2:length(times_unique)) {
myrows <- which(mydata$t == df[i, 1])
states_cur <- mydata[myrows, 2]
states_prev <- mydata[myrows - 1, 2]
df[i, 2:9] <- df[i-1, 2:9]
for (j in 1:length(states_cur)) {
df[i, states_cur[j]+2] <- df[i, states_cur[j]+2] + 1
df[i, states_prev[j]+2] <- df[i, states_prev[j]+2] - 1
}
}
# Scale for number of realisations
df[,2:9] <- df[2:9]/R
df2 <- data.frame(matrix(nrow = length(times_unique),ncol = 6))
colnames(df2) <- c("t", "s0", "s1", "s2", "s3")
df2$t <- df$t
df2$s0 <- df$s0
df2$s1 <- df$s1 + df$s2 + df$s4
df2$s2 <- df$s3 + df$s5 + df$s6
df2$s3 <- df$s7
df2$s0 <- df2$s0 * 0
df2$s1 <- df2$s1 * 1
df2$s2 <- df2$s2 * 2
df2$s3 <- df2$s3 * 3
df2$tot <- df2$s0 + df2$s1 + df2$s2 + df2$s3
### Numerical solution of master equation
th <- 1/3
library(deSolve)
#### SIS triangle network ####
sistrinet <- function(t, y, parms) {
with(as.list(c(y,parms)), {
dy1 <- g * (y[2] + y[3] + y[4])
dy2 <- g * (y[5] + y[6]) - (2 * b + g) * y[2]
dy3 <- g * (y[5] + y[7]) - (2 * b + g) * y[3]
dy4 <- g * (y[6] + y[7]) - (2 * b + g) * y[4]
dy5 <- g * y[8] + b * (y[2] + y[3]) - 2 * (b + g) * y[5]
dy6 <- g * y[8] + b * (y[2] + y[4]) - 2 * (b + g) * y[6]
dy7 <- g * y[8] + b * (y[3] + y[4]) - 2 * (b + g) * y[7]
dy8 <- 2 * b * (y[5] + y[6] + y[7]) - 3 * g * y[8]
list(c(dy1, dy2, dy3, dy4, dy5, dy6, dy7, dy8))
})
}
yini <- c(y1 = 0, y2 = th, y3 = th, y4 = th, y5 = 0, y6 = 0, y7 = 0, y8 = 0)
parms <- c(b=0.1, g=0.1)
times <- seq(from = 0, to = max(df$t), by = 0.01)
out <- ode (times = times, y = yini, func = sistrinet, parms = parms)
out <- as.data.frame(out)
out2 <- data.frame(matrix(nrow = length(out$time),ncol = 6))
colnames(out2) <- c("t", "s0", "s1", "s2", "s3", "tot")
out2$t <- out$time
out2$s0 <- out$y1
out2$s1 <- out$y2 + out$y3 + out$y4
out2$s2 <- out$y5 + out$y6 + out$y7
out2$s3 <- out$y8
out2$s0 <- out2$s0 * 0
out2$s1 <- out2$s1 * 1
out2$s2 <- out2$s2 * 2
out2$s3 <- out2$s3 * 3
out2$tot <- out2$s0 + out2$s1 + out2$s2 + out2$s3
png(filename = "./sis_trinet_000.png")
library(ggplot2)
ggplot(df, aes(t, s0)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y1 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_001.png")
library(ggplot2)
ggplot(df, aes(t, s1)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y2 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_010.png")
library(ggplot2)
ggplot(df, aes(t, s2)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y3 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_100.png")
library(ggplot2)
ggplot(df, aes(t, s4)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y4 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_011.png")
library(ggplot2)
ggplot(df, aes(t, s3)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y5 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_101.png")
library(ggplot2)
ggplot(df, aes(t, s5)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y6 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_110.png")
library(ggplot2)
ggplot(df, aes(t, s6)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y7 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_111.png")
library(ggplot2)
ggplot(df, aes(t, s7)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out, aes(x=time, y=y8 , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
png(filename = "./sis_trinet_average.png")
library(ggplot2)
ggplot(df2, aes(x=t, y=tot)) +
geom_point(aes(colour = "Simulation")) +
geom_line(data = out2, aes(x=t, y=tot , colour="Numerical")) +
xlab("Time") +
ylab("P(s)") +
theme_bw()
dev.off()
# png(filename = "./trinet.png")
# plot(out)
# dev.off()
# # Plot for each state
# for (i in 1:7) {
# plot(df$t, df[,i+1])
# }
|
8efef42ecb3ff19ca4c0eaf54579d13378840e6d
|
81436a57285b11749ecd5b3048f96a4b44604d1a
|
/utils/models/MICE_imputation.R
|
621addf46dcadc46b8281c6163f7a018e856e656
|
[] |
no_license
|
ChayutWo/Nonparam-oridinal-nominal
|
3c8ee1492ca711c03f43fab126b148a7dd94c1db
|
a9f2517741b9a800d21e412f499b5af7d429010d
|
refs/heads/master
| 2023-05-04T07:32:11.854195
| 2021-05-20T03:12:51
| 2021-05-20T03:12:51
| 255,473,233
| 1
| 0
| null | 2020-06-08T19:08:39
| 2020-04-14T00:39:37
|
Python
|
UTF-8
|
R
| false
| false
| 498
|
r
|
MICE_imputation.R
|
library(mice)
MICE_imputation <- function(df_observed, n_imputations){
# Perform missing data imputation using MICE approach
# df_observed: observed dataset
# n_imputations: number of imputed dataset that want to generate
# return: imputation_list
# a list comprising of imputed datasets
imputed_df <- mice(df_observed,m=n_imputations,print=F)
imputation_list = list()
for (i in 1:n_imputations) {
imputation_list[[i]] = complete(imputed_df, i)
}
return(imputation_list)
}
|
c5ea357ed8207cee9055ec712cc565626e2384b4
|
f2da63de512183804290bfcabfa60eaca3649e05
|
/projects/miniCRAN/run_installpackages/code/run_installpackages.R
|
f5d14c432cf3c8e3d7e268576e0b07262798e199
|
[] |
no_license
|
paradisepilot/statistics
|
a94bb57ebe453d49c06815c523e8f633423cb68e
|
50daf644baca1f40253edf91083ed42d4c5f9342
|
refs/heads/master
| 2022-07-25T16:19:07.751886
| 2022-06-26T21:18:38
| 2022-06-26T21:18:38
| 5,012,656
| 0
| 2
| null | 2019-04-22T06:52:55
| 2012-07-13T01:11:42
|
HTML
|
UTF-8
|
R
| false
| false
| 4,898
|
r
|
run_installpackages.R
|
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
command.arguments <- commandArgs(trailingOnly = TRUE);
code.directory <- normalizePath(command.arguments[1]);
output.directory <- normalizePath(command.arguments[2]);
pkgs.desired.FILE <- normalizePath(command.arguments[3]);
setwd(output.directory);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
fh.output <- file("log.output", open = "wt");
fh.message <- file("log.message", open = "wt");
sink(file = fh.message, type = "message");
sink(file = fh.output, type = "output" );
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
print("\n##### Sys.time()");
Sys.time();
start.proc.time <- proc.time();
###################################################
default.libPaths <- setdiff(gsub(x=.libPaths(),pattern="^/Users/.+",replacement=""),c(""));
.libPaths(default.libPaths);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# copy the file of desired packages to output directory
file.copy(
from = pkgs.desired.FILE,
to = "."
);
# read list of desired R packages
pkgs.desired <- read.table(
file = pkgs.desired.FILE,
header = FALSE,
stringsAsFactors = FALSE
)[,1];
# exclude packages already installed
pkgs.desired <- setdiff(
pkgs.desired,
as.character(installed.packages()[,"Package"])
);
write.table(
file = "Rpackages-desired-minus-preinstalled.txt",
x = data.frame(package = sort(pkgs.desired)),
quote = FALSE,
row.names = FALSE,
col.names = FALSE
);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# get URL of an active CRAN mirror
CRANmirrors <- getCRANmirrors();
CRANmirrors <- CRANmirrors[CRANmirrors[,"OK"]==1,];
caCRANmirrors <- CRANmirrors[CRANmirrors[,"CountryCode"]=="ca",c("Name","CountryCode","OK","URL")];
if (nrow(caCRANmirrors) > 0) {
myRepoURL <- caCRANmirrors[nrow(caCRANmirrors),"URL"];
} else if (nrow(CRANmirrors) > 0) {
myRepoURL <- CRANmirrors[1,"URL"];
} else {
q();
}
print(paste("\n##### myRepoURL",myRepoURL,sep=" = "));
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# assemble full path for R library to be built
current.version <- paste0(R.Version()["major"],".",R.Version()["minor"]);
myLibrary <- file.path(".","library",current.version,"library");
if(!dir.exists(myLibrary)) { dir.create(path = myLibrary, recursive = TRUE); }
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
print("\n##### installation of BiocManager starts ...");
install.packages(
pkgs = c("BiocManager"),
lib = myLibrary,
repos = myRepoURL,
dependencies = TRUE # c("Depends", "Imports", "LinkingTo", "Suggests")
);
print("\n##### installation of BiocManager complete ...");
library(
package = "BiocManager",
character.only = TRUE,
lib.loc = myLibrary
);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
print("\n##### installation of Bioconductor packages starts ...");
BiocManager::install(
pkgs = c("BiocVersion","BiocStyle","graph","Rgraphviz","ComplexHeatmap"),
lib = myLibrary,
dependencies = TRUE
);
print("\n##### installation of Bioconductor packages complete ...");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
# install desired R packages to
# user-specified library
print("\n##### installation of packages starts ...");
install.packages(
pkgs = pkgs.desired,
lib = myLibrary,
repos = myRepoURL,
dependencies = TRUE # c("Depends", "Imports", "LinkingTo", "Suggests")
);
print("\n##### installation of packages complete ...");
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
my.colnames <- c("Package","Version","License","License_restricts_use","NeedsCompilation","Built");
DF.installed.packages <- as.data.frame(installed.packages(lib = myLibrary)[,my.colnames]);
write.table(
file = "Rpackages-newlyInstalled.txt",
x = DF.installed.packages,
sep = "\t",
quote = FALSE,
row.names = FALSE,
col.names = TRUE
);
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
pkgs.notInstalled <- setdiff(
pkgs.desired,
as.character(installed.packages(lib = myLibrary)[,"Package"])
);
write.table(
file = "Rpackages-notInstalled.txt",
x = data.frame(package.notInstalled = sort(pkgs.notInstalled)),
quote = FALSE,
row.names = FALSE,
col.names = TRUE
);
###################################################
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
print("\n##### warnings()")
warnings();
print("\n##### sessionInfo()")
sessionInfo();
### ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ###
print("\n##### Sys.time()");
Sys.time();
stop.proc.time <- proc.time();
print("\n##### start.proc.time() - stop.proc.time()");
stop.proc.time - start.proc.time;
sink(type = "output" );
sink(type = "message");
closeAllConnections();
|
e116d47ce4da6c628fc9ee5e60040c4f0f0a4b82
|
55e78caca6c071a161f4e9885a875de0bfb09378
|
/man/cvar-package.Rd
|
0997079dd0dbede2acba43765781e9264947974d
|
[] |
no_license
|
GeoBosh/cvar
|
d3c5b8697809302f9803b191923e474a69f49800
|
207fd77ccc804123cd31126b2ce6d79402611754
|
refs/heads/master
| 2022-11-24T11:55:31.576486
| 2022-11-09T21:52:26
| 2022-11-09T21:52:26
| 128,628,020
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,336
|
rd
|
cvar-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvar-package.R
\docType{package}
\name{cvar-package}
\alias{cvar-package}
\alias{cvar}
\title{Compute Conditional Value-at-Risk and Value-at-Risk}
\description{
Compute expected shortfall (ES) and Value at Risk (VaR)
from a quantile function, distribution function, random number
generator or probability density function. ES is also known as
Conditional Value at Risk (CVaR). Virtually any continuous
distribution can be specified. The functions are vectorised over
the arguments. Some support for GARCH models is provided, as well.
}
\details{
There is a huge number of functions for computations with
distributions in core \R and in contributed packages. Pdf's,
cdf's, quantile functions and random number generators are
covered comprehensively. The coverage of expected shortfall is
more patchy but a large collection of distributions, including
functions for expected shortfall, is provided by
\insertCite{VaRES2013;textual}{cvar}.
\insertCite{PerformanceAnalytics2018;textual}{cvar} and
\insertCite{actuarJSS2008;textual}{cvar} provide packages
covering comprehensively various aspects of risk measurement,
including some functions for expected shortfall.
Package \pkg{cvar} is a small package with, essentially, two main
functions --- \code{ES} for computing the expected shortfall
and \code{VaR} for Value at Risk. The user specifies the
distribution by supplying one of the functions that define a
continuous distribution---currently this can be a quantile
function (qf), cumulative distribution function (cdf) or
probability density function (pdf). Virtually any continuous
distribution can be specified.
The functions are vectorised over the parameters of the
distributions, making bulk computations more convenient, for
example for forecasting or model evaluation.
The name of this package, "cvar", comes from \emph{Conditional
Value at Risk} (CVaR), which is an alternative term for
expected shortfall.
We chose to use the standard names \code{ES} and \code{VaR},
despite the possibility for name clashes with same named
functions in other packages, rather than invent possibly
difficult to remember alternatives. Just call the functions as
\code{cvar::ES} and \code{cvar::VaR} if necessary.
Locations-scale transformations can be specified separately
from the other distribution parameters. This is useful when
such parameters are not provided directly by the distribution
at hand. The use of these parameters often leads to more
efficient computations and better numerical accuracy even if
the distribution has its own parameters for this purpose. Some
of the examples for \code{VaR} and \code{ES} illustrate this
for the Gaussian distribution.
Since VaR is a quantile, functions computing it for a given
distribution are convenience functions. \code{VaR} exported by
\pkg{cvar} could be attractive in certain workflows because of
its vectorised distribution parameters, the location-scale
transformation, and the possibility to compute it from cdf's
when quantile functions are not available.
Some support for GARCH models is provided, as well. It is
currently under development, see \code{\link{predict.garch1c1}}
for current functionality.
In practice, we may need to compute VaR associated with data. The distribution comes
from fitting a model. In the simplest case, we fit a distribution to the data,
assuming that the sample is i.i.d. For example, a normal distribution \eqn{N(\mu,
\sigma^2)} can be fitted using the sample mean and sample variance as estimates of the
unknown parameters \eqn{\mu} and \eqn{\sigma^2}, see section \sQuote{Examples}. For other
common distributions there are specialised functions to fit their parameters and if
not, general optimisation routines can be used. More soffisticated models may be used,
even time series models such as GARCH and mixture autoregressive models.
}
\examples{
## see the examples for ES(), VaR(), predict.garch1c1()
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{ES}},
\code{\link{VaR}}
}
\author{
Georgi N. Boshnakov
}
|
43589178c6130d2f53c56db136f4606fada248b5
|
c74b4096a66df8d61a85aeef35daf17012befe12
|
/Assignment 2/rprog_data_ProgAssignment3-data/best.R
|
388f47ecd79e9e47094a35cddd90e4802775306c
|
[] |
no_license
|
Amircrown/R
|
fe8e567584787e6febdce54add723a3b66c5d6ba
|
db75ac944a91dd482586fbecd7a60e631f695eaf
|
refs/heads/master
| 2021-01-13T02:05:58.582657
| 2015-04-20T00:50:27
| 2015-04-20T00:50:27
| 33,512,599
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 917
|
r
|
best.R
|
# This is from deleteme 2
best <- function(state,outcome){
data <- read.csv("outcome-of-care-measures.csv", colClasses ="character" )
data1<- data[,c(2,7,11,17,23)]
if (outcome == "heart attack")
coloumn <- 3
else if (outcome == "heart failure")
coloumn <- 4
else if (outcome =="pneumonia")
coloumn <-5
else
stop("invalid outcome")
data_state <- subset(data1, data1[,2]==state, c(1,2,coloumn))
if (nrow(data_state)==0){
stop("invalid state")
}
data_state = subset(data_state, data_state[,3]!="Not Available" )
#print(data_state[,3])
data_state[,3]<-as.numeric(data_state[,3], na.rm = TRUE)
minim_val <- min(data_state[,3])
minim_list <- subset(data_state,data_state[,3]== minim_val)
#print(data_state)
return(sort(minim_list[,1])[1])
}
|
fca68df1477be039ee87adf5b676f4112bc94141
|
c8fbccfe947a9643f0af1a4dc21ca8d090c06d3d
|
/01_dev-env.R
|
a4db984e6ee90ea01701515ba9883a11d7873074
|
[] |
no_license
|
rstats-wtf/wtf-dev-env
|
dd9cf2f32c788e2d8f59e87e1e59d06ca9288110
|
a6f4989a8dce6a2803e7db35ba349fb0139ae476
|
refs/heads/master
| 2020-12-19T18:13:18.829532
| 2020-01-23T15:53:03
| 2020-01-23T15:53:03
| 235,811,386
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 319
|
r
|
01_dev-env.R
|
# Make sure you have devtools and the BRRR package installed
install.packages("devtools")
devtools::install_github("brooke-watson/BRRR")
# We will use has_devel to verify you have your system setup properly
# You will also need to turn on your speakers to hear the sounds
if (devtools::has_devel()) BRRR::skrrrahh(36)
|
90dbce7fc1118d7777f97d1566fd5665dac10aff
|
6a54a5e5d8ec493a37f63a8f2fb2223d11b4e3b3
|
/tests/testthat/test-src_dbi.R
|
24d21a6efa7ab119135b31ac1b7b7efe0097c205
|
[
"MIT"
] |
permissive
|
WbGuo96/dplyr
|
1b480906180de091d74942201a2ff4c12de4a38b
|
1382b65c8cd5002d3ceb80e092fec9739bdbf02a
|
refs/heads/master
| 2022-03-27T08:16:34.443466
| 2020-01-08T16:07:11
| 2020-01-08T16:07:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 216
|
r
|
test-src_dbi.R
|
test_that("src_sqlite() errs if path does not exist", {
skip_if_not_installed("dbplyr")
expect_error(
src_sqlite(":memory:"),
"`path` must already exist, unless `create` = TRUE",
fixed = TRUE
)
})
|
b4547506431965ddcf6313839fa9894dc003d5a8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/InfoTrad/examples/EA.Rd.R
|
779a47c5455f2ebb0fcba2342505cb0b37537195
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,273
|
r
|
EA.Rd.R
|
library(InfoTrad)
### Name: EA
### Title: Ersan and Alici (2016) Cluster analysis with the altered steps.
### Aliases: EA print.EA_class EA_class print
### ** Examples
# Sample Data
# Buy Sell
#1 350 382
#2 250 500
#3 500 463
#4 552 550
#5 163 200
#6 345 323
#7 847 456
#8 923 342
#9 123 578
#10 349 455
Buy=c(350,250,500,552,163,345,847,923,123,349)
Sell=c(382,500,463,550,200,323,456,342,578,455)
data=cbind(Buy,Sell)
# Parameter estimates using the LK factorization of Lin and Ke (2011)
# with the modified clustering algorithm of Ersan and Alici (2016).
# Default factorization is set to be "LK"
result=EA(data)
print(result)
# Alpha: 0.9511418
# Delta: 0.2694005
# Mu: 76.7224
# Epsilon_b: 493.7045
# Epsilon_s: 377.4877
# Likelihood Value: 43973.71
# PIN: 0.07728924
# Parameter estimates using the EHO factorization of Easley et. al. (2010)
# with the modified clustering algorithm of Ersan and Alici (2016).
result=EA(data,likelihood="EHO")
print(result)
# Alpha: 0.9511418
# Delta: 0.2694005
# Mu: 76.7224
# Epsilon_b: 493.7045
# Epsilon_s: 377.4877
# Likelihood Value: 43973.71
# PIN: 0.07728924
|
277106a5aac2d87365e587a52105707622262e1e
|
0fbc58702c39addfa7949391d92533922dcf9d49
|
/inst/examples/plot-type-tikz.R
|
8cfc2ed2bca09bae309a9cb0deddca27abd8a65d
|
[] |
no_license
|
yihui/MSG
|
d3d353514464f962a0d987efd8cf32ed50ac901a
|
8693859ef41139a43e32aeec33ab2af700037f82
|
refs/heads/master
| 2021-11-29T08:12:02.820072
| 2021-08-15T17:14:36
| 2021-08-15T17:14:36
| 1,333,662
| 30
| 12
| null | 2021-08-15T17:14:37
| 2011-02-06T05:42:53
|
R
|
UTF-8
|
R
| false
| false
| 265
|
r
|
plot-type-tikz.R
|
# 作图的九种样式
par(mfrow = c(3, 3), mar = c(2, 1, 3, 1))
for (i in c("p", "l", "b", "c", "o", "h", "s", "S", "n")) {
plot(c(1:5, 5:1), xlab = "", type = i, ylab = "", axes = FALSE,
main = paste("\\texttt{type = \"", i, "\"}", sep = ""))
box()
}
|
ff0dc627ba25cf567a9ecfe931017d294c8408b5
|
71c579f5f8ac72d7e3d21c77855b62690433c919
|
/Documents/Git/Targil3/Ex3_1.R
|
500e860f445d2e5e6d164ca36d26d4b259fd80fb
|
[] |
no_license
|
hilakr/Text-Mining
|
90ed7d2bcd5e89f29a1b9d693e19b1c2a7517498
|
d4778d60d999c2bc756b9e210d77dae677f610b8
|
refs/heads/master
| 2022-02-20T07:49:22.768320
| 2016-06-12T15:59:20
| 2016-06-12T15:59:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,326
|
r
|
Ex3_1.R
|
install.packages('igraph')
library(igraph)
require(igraph)
edges.data = read.csv('ga_edgelist.csv',header = T)
ga_edges = graph.data.frame(edges.data,directed = F)
summary(ga_edges)
V(ga_edges)$name
#Remove self-Loops is exist
ga_edges = simplify(ga_edges)
#Calculate betweenness
ga_bet = betweenness(ga_edges)
ga_bet = sort(ga_bet,decreasing = T)
names(ga_bet[1])
#Calculate closeness
ga_close = closeness(ga_edges)
ga_close = sort(ga_close, decreasing = T)
names(ga_close[1])
#Calculate eigenvector
ga_eigen = evcent(ga_edges)
ga_eigen = sort(ga_eigen$vector, decreasing = T)
names(ga_eigen[1])
#Find commuinty with Girvan-Newman community detection
fc = edge.betweenness.community(ga_edges)
#Cheack what is the modularity
fc$modularity
#What partition is the best?
max(fc$modularity)
which.max(fc$modularity)
#Color nodes by partitions
memb = membership(fc)
plot(ga_edges, vertex.size=7, vertex.label=NA,
vertex.color=memb, asp=FALSE)
#How many communities received
max(levels(as.factor(memb)))
#What size of each commuinty
summary(as.factor(memb))
#Find commuinty with Multi-Level algorithm
#This function implements the multi-level modularity optimization algorithm for finding community structure.
ml = multilevel.community(ga_edges)
#Cheack what is the modularity
ml$modularity
#What partition is the best?
max(ml$modularity)
which.max(ml$modularity)
#Color nodes by partitions
memb = membership(ml)
plot(ga_edges, vertex.size=7, vertex.label=NA,
vertex.color=memb, asp=FALSE)
#How many communities received
max(levels(as.factor(memb)))
#What size of each commuinty
summary(as.factor(memb))
#Find commuinty with propagating labels algorithm
#This is a fast, nearly linear time algorithm for detecting community structure in networks.
#In works by labeling the vertices with unique labels and then updating the labels by majority voting in the neighborhood of the vertex.
pl = label.propagation.community(ga_edges)
#Cheack what is the modularity
pl$modularity
#What partition is the best?
max(pl$modularity)
which.max(pl$modularity)
#Color nodes by partitions
memb = membership(pl)
plot(ga_edges, vertex.size=8, vertex.label=NA,
vertex.color=memb, asp=FALSE)
#How many communities received
max(levels(as.factor(memb)))
#What size of each commuinty
summary(as.factor(memb))
|
04ac514730dc7742a1a938b3bfc912aae543c60b
|
119eb210049745e82f058ab6cad13ccac892433c
|
/Script/week_6_assignment_RCH.R
|
ed662bda1a1ba20d6100c644c3a84eb2f29eb3de
|
[] |
no_license
|
gge-ucd/r-davis-in-class-rchiggins
|
52ec0a39f6b32d0a897d910fb76c9fa809b6ae30
|
a39f50e3f11cb058ba5402dadd5dcd46d3572aaa
|
refs/heads/master
| 2020-04-17T03:06:27.217967
| 2019-09-23T21:38:44
| 2019-09-23T21:38:44
| 166,166,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,291
|
r
|
week_6_assignment_RCH.R
|
#Week 6 assignment
library(tidyverse)
gapminder <- read_csv("https://gge-ucd.github.io/R-DAVIS/data/gapminder.csv")
#1A Modify the following code to make a figure that shows how life expectancy has changed over time:
ggplot(gapminder, aes(x = gdpPercap, y = lifeExp)) +
geom_point()
ggplot(gapminder, aes(x = year, y = lifeExp, color = continent)) +
geom_point()
#1B Look at the following code. What do you think the scale_x_log10() line is doing? What do you think the geom_smooth() line is doing?
ggplot(gapminder, aes(x = gdpPercap, y = lifeExp)) +
geom_point(aes(color = continent), size = .25) +
scale_x_log10() +
geom_smooth(method = "lm", color = 'black', linetype = 'dashed') +
theme_bw()
#Including the scale_x_log10() makes the x axis a log scale so the points are spread out rather than clumped in one region
#The geom_smooth() line puts a line of best fit through the data points
#1C Challenge: Modify the above code to size the points in proportion to the population of the county. Hint: Are you translating data to a visual feature of the plot?
ggplot(gapminder, aes(x = gdpPercap, y = lifeExp)) +
geom_point(aes(color = continent, size = pop)) +
scale_x_log10() +
geom_smooth(method = "lm", color = 'black', linetype = 'dashed') +
theme_bw()
|
8feaee9e154b74a1a35f4132dd5584c3ee4881ae
|
b6d08af0a15af8bd5974218eecf45b58ea194289
|
/R_working_archive/pub_vf_analysis_regressions.R
|
1a791fc2f8ed31194754c8da680c226fcd6527ac
|
[] |
no_license
|
csjohns/pb-voter-turnout
|
dd9ecbb3c1d26529d86789f78b4f8a128caeca17
|
221dd06d6db0bde2e801d2a6f865575acec933d3
|
refs/heads/master
| 2021-05-23T05:01:35.410913
| 2021-02-24T06:33:23
| 2021-02-24T06:33:23
| 81,271,059
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,864
|
r
|
pub_vf_analysis_regressions.R
|
#################################################################################################################################################
###
### PB and voter turnout: Regression analyses from matched dataset
### Calls the pub_vf_matching.R script and the pub_balance_checking.R scripts
###
### Created by: Carolina Johnson
### Created date: 3/1/2018
###
#################################################################################################################################################
library(dplyr)
library(tidyr)
library(ggplot2)
library(lubridate)
library(stringr)
library(lme4)
library(margins)
### Creating/loading matched datasets
# source("pub_vf_matching.R")
load("vf_analysis.RData")
#### replicating transformation and first regressions with the matched data -------------------------------------------------------------------------------------
## process analysis df to pb_long df for analysis (creating wide pb table along the way)
source("create_pb_long.R")
pb_long <- create_pb_long(vf_analysis)
#### Set reference levels for factor variables
pb_long <- pb_long %>%
group_by() %>%
mutate(Race = relevel(as.factor(Race), ref = "W"),
election_type = relevel(as.factor(election_type), ref = "g"))
#### Model explorations ---------------------------------------------------------------------------------------------------------------------------
#
# ## looking at a very basic linear regression predicting turnout
# bas_log <- lm(turned_out ~ pb + after_pb + as.factor(year) + election_type , data = pb_long)
# summary(bas_log)
#
# bas_log_all <- lm(turned_out ~ pb + after_pb + as.factor(year) + election_type +
# Female + Race + age + medhhinc + white + college + majmatch, data = pb_long)
# summary(bas_log_all) ## R-Squared = .31!
#
# ## Quick comparison of linear and logit models with covariates - this is mostly just to give a sense of the relative magnitude of effects in the two model approaches
# library(margins)
# covar_formula <- turned_out ~ pb + after_pb + as.factor(year) + election_type + Race + age + Female + medhhinc + college + white + majmatch
# covar_logit <- pb_long %>% glm(covar_formula, data = ., family = binomial())
# summary(covar_logit)
# dydx(pb_long, covar_logit, "after_pb", change = c(0,1))[[1]] %>% mean
# covar_lm <- lm(covar_formula, data = pb_long)
# summary(covar_lm)
# dydx(pb_long, covar_lm, "after_pb", change = c(0,1))[[1]] %>% mean
#
# ##### Trying with lmer getting random effects for individuals
#
# logit_lme_f <- turned_out ~ pb + after_pb + Race + as.factor(year) + election_type + age + medhhinc + white + college + majmatch + (1 | VANID)
# lme_logit <- glmer(logit_lme_f, data = pb_long, family = binomial(), nAGQ = 0)
# summary(lme_logit)
#
# ## Comparing inclusion of NYCDD random effects - fit is improved by including NYCDD
#
# logit_full_fm <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + medhhinc + white + college + majmatch + (1 | VANID) + (1|NYCCD)
# lme_full <- glmer(logit_full_fm, data = pb_long, family = binomial(), nAGQ = 0)
# summary(lme_full)
#
# AIC(lme_full)
# AIC(lme_logit)
# BIC(lme_full)
# BIC(lme_logit)
#
# AICcollege <- AIC(lme_full)
# BICcollege <- BIC(lme_full)
#
# dydx(pb_long, lme_full, "after_pb", change = c(0,1))[[1]] %>% mean
#
# ## testing not including college
# logit_full_fm_nocollege <- turned_out ~ pb + after_pb + as.factor(year) + election_type + Race + age + medhhinc + white + majmatch + (1 | VANID) + (1|NYCCD)
# lme_full_ncollege <- glmer(logit_full_fm_nocollege, data = pb_long, family = binomial(), nAGQ = 0)
# AIC(lme_full_ncollege)
# AIC(lme_full)
# BIC(lme_full_ncollege)
# BIC(lme_full)
#
# dydx(pb_long, lme_full_ncollege, "after_pb", change = c(0,1))[[1]] %>% mean
# ## all this points to keeping college in the analyis. Not sure why I originally dropped it...
#
# ## testing including non-linear effects for age and medhhinc (as suggested by plotting)
# logit_age2_form <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + I(age^2) + medhhinc + white + college + majmatch + (1 | VANID) + (1|NYCCD)
# logit_med2_form <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + I(medhhinc^2) + medhhinc + white + college + majmatch + (1 | VANID) + (1|NYCCD)
# logit_lmed_form <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + log(medhhinc) + white + college + majmatch + (1 | VANID) + (1|NYCCD)
#
# lme_age2 <- glmer(logit_age2_form, data = pb_long, family = binomial(), nAGQ = 0)
# lme_med2 <- glmer(logit_med2_form, data = pb_long, family = binomial(), nAGQ = 0)
# lme_lmed <- glmer(logit_lmed_form, data = pb_long, family = binomial(), nAGQ = 0)
#
# AIC(lme_full)
# AIC(lme_age2)
# AIC(lme_med2)
# AIC(lme_lmed)
#
# BIC(lme_full)
# BIC(lme_age2)
# BIC(lme_med2)
# BIC(lme_lmed)
#
# #incl white?:
# lme_nowhite_form <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + I(age^2) + medhhinc + college + majmatch + (1 | VANID) + (1|NYCCD)
# lme_nowhite <- glmer(lme_nowhite_form, data = pb_long, family = binomial(), nAGQ = 0)
#
# AIC(lme_age2)
# AIC(lme_nowhite)
# BIC(lme_age2)
# BIC(lme_nowhite)
#
# ## % white isn't contributing, much once majority race is included (esp. since matched on nonwhite)
# ## BIC and AIC agree that it does not improve the model
# table(pb_long$turned_out, fitted(lme_age2)>= .5)
# table(pb_long$turned_out == as.numeric(fitted(lme_age2)>= .5)) %>% prop.table() #---> 87% pcp
#
# table(pb_long$turned_out, fitted(lme_nowhite)>= .5)
# table(pb_long$turned_out == as.numeric(fitted(lme_nowhite)>= .5)) %>% prop.table() #---> 87% pcp
# ## Percent correctly predicted is basically the same for the two models.
#
# #incl gender?:
# lme_nosex_form <- turned_out ~ pb + after_pb + Race + as.factor(year) + election_type + age + I(age^2) + medhhinc + college + majmatch + (1 | VANID) + (1|NYCCD)
# lme_nosex <- glmer(lme_nosex_form, data = pb_long, family = binomial(), nAGQ = 0)
#
# AIC(lme_nowhite)
# AIC(lme_nosex)
# BIC(lme_nowhite)
# BIC(lme_nosex)
#
# ## Both AIC and BIC actually encourage omitting gender from the modle - however, it's not
# ## a huge difference and I want to be able to include gender in the the subgroup breakdowns,
# ## so should include it for comparability
#
# # Age at vote eligibility flag
#
# logit_elig_fm <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + I(age^2) + I(age_at_vote < 18) + medhhinc + college + majmatch + (1 | VANID) + (1|NYCCD)
# lme_elig <- glmer(logit_elig_fm, data = pb_long, family = binomial(), nAGQ = 0)
# summary(lme_elig)
#
# AIC(lme_age2)
# AIC(lme_elig)
# BIC(lme_age2)
# BIC(lme_elig)
#
# ## including flag for age at vote makes a huge improvement in model fit. Use it!
### Models and output for paper ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
### LMER base model, no covars: ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
logit_minimal_form <- turned_out ~ pb + after_pb + as.factor(year) + election_type + (1| VANID) + (1|NYCCD)
lme_minimal <- glmer(logit_minimal_form, data = pb_long, family = binomial(), nAGQ = 0)
### LMER model only demographics + base ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
logit_demog_form <- turned_out ~ pb + after_pb + as.factor(year) + election_type + Race + Female + age + I(age^2) + I(age_at_vote < 18) + (1| VANID) + (1|NYCCD)
lme_demog <- glmer(logit_demog_form, data = pb_long, family = binomial(), nAGQ = 0)
### LMER model incl tract vars ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
logit_tract_form <- turned_out ~ pb + after_pb + as.factor(year) + election_type + Race + Female + age + I(age^2) +
I(age_at_vote < 18) + college_pct + medhhinc_10k +(1| VANID) + (1|NYCCD)
lme_tract <- glmer(logit_tract_form, data = pb_long, family = binomial(), nAGQ = 0)
### LMER model incl majmatch ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
lme_final_form <- turned_out ~ pb + after_pb + Race + Female + as.factor(year) + election_type + age + I(age^2) + I(age_at_vote < 18) + medhhinc_10k + college_pct + majmatch + (1 | VANID) + (1|NYCCD)
lme_final <- glmer(lme_final_form, data = pb_long, family = binomial(), nAGQ = 0)
### LMER model incl compet ---------------------------------------------------------------------------------------------------------------------------------------------------------------------
lme_compet_form <- turned_out ~ pb + after_pb + Race + Female + compet + as.factor(year) + election_type + age + I(age^2) + I(age_at_vote < 18) + medhhinc_10k + college_pct + majmatch + (1 | VANID) + (1|NYCCD)
lme_compet <- glmer(lme_compet_form, data = pb_long, family = binomial(), nAGQ = 0)
### Table / effect output for paper. "mainregs.tex" ------------------------------------------------------------------------------------------------------------------------------------------------------
## calculating average effect from final model
meaneffect <- pb_long %>%
filter(! year %in% c(2011,2015) )%>%
simcf::extractdata(lme_compet_form,., na.rm = T) %>%
margins::dydx(., lme_compet, "after_pb", change = c(0,1)) %>%
.$dydx_after_pb %>%
mean()
print(meaneffect)
all_models <- list(lme_minimal, lme_demog, lme_tract, lme_final, lme_compet)
save(all_models, file = "data/cleaned_R_results/mainresults.RData")
library(stargazer)
stargazer(all_models[2:5], #type = "text",
out = "Paper_text/Tables/mainregs.tex", label = "main_results",
title = "Individual voter turnout difference-in-difference regression results: no interactions",
column.labels = c("Minimal", "Demog.", "Tract", "Majority Match"),
order = c("^pb$", "^after\\_pb$", "^election\\_typep$", "^election\\_typepp$",
"^RaceB$", "^RaceA$", "^RaceH$", "^RaceU$", "^Female$",
"^age$", "^I\\(age\\^2\\)$", "I\\(age\\_at\\_vote < 18\\)TRUE",
"^college\\_pct$", "^medhhinc\\_10k$", "^majmatchTRUE$"),
covariate.labels = c("PB district", "After PB", "Primary election", "Pres. Primary",
"Black", "Asian", "Hispanic", "Race Unknown", "Female",
"Age in years", "Age\\textsuperscript{2}", "18+ at vote",
"\\% college educated", "Median HH income", "Majority Race"),
dep.var.labels.include = FALSE, dep.var.caption = "",
digit.separator = "",intercept.bottom = TRUE, no.space = TRUE,
omit = c("year"), omit.labels = c("Year fixed effects?"),
keep.stat = c("n", "aic", "bic", "n"),
star.char = "*", star.cutoffs = 0.05,
align = TRUE,
notes = "\\parbox[t]{.85\\textwidth}{\\footnotesize \\textit{Note:} Difference-in-difference regression results from multilevel mixed effect logistic models of individual turnout in a given election, including random effects for individual and council districts. Standard errors reported in parentheses and statistical significance at $p<0.05$ indicated by $^{*}$.}",
notes.label = "",
notes.align = "l",
notes.append = FALSE)
|
3bc47d0bca4a85b424b1acc5945e8043ac824727
|
b25823f5b05e403f791b2424729040fae485150f
|
/run_analysis.R
|
70382501c8a426db48354233268c03bb7124ccf3
|
[] |
no_license
|
ErazeX/Gettingandcleaningdata
|
275c5cd3930d21d21e8bf8ea7936b33bca0e3a62
|
a1e4caa5d8c9c2107872370b54a4c4c20ef23500
|
refs/heads/master
| 2021-01-15T23:45:42.909806
| 2014-05-25T19:38:06
| 2014-05-25T19:38:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,318
|
r
|
run_analysis.R
|
subject_test <- read.table("~/R working/UCI HAR Dataset/test/subject_test.txt", quote="\"")
X_test <- read.table("~/R working/UCI HAR Dataset/test/X_test.txt", quote="\"")
y_test <- read.table("~/R working/UCI HAR Dataset/test/y_test.txt", quote="\"")
subject_train <- read.table("~/R working/UCI HAR Dataset/train/subject_train.txt", quote="\"")
X_train <- read.table("~/R working/UCI HAR Dataset/train/X_train.txt", quote="\"")
y_train <- read.table("~/R working/UCI HAR Dataset/train/y_train.txt", quote="\"")
features <- read.table("~/R working/UCI HAR Dataset/features.txt", quote="\"")
features[,2] <- gsub("\\(\\)","",as.matrix(features[,2]))
features[,2] <- gsub("-",".",as.matrix(features[,2]))
features[,2] <- gsub("\\(",".",as.matrix(features[,2]))
features[,2] <- gsub("\\)","",as.matrix(features[,2]))
features[,2] <- gsub(",",".",as.matrix(features[,2]))
names(subject_test)[1] <- "Subject.number"
names(y_test)[1] <- "Activity.number"
colnames(X_test) <- features[,2]
names(subject_train)[1] <- "Subject.number"
names(y_train)[1] <- "Activity.number"
colnames(X_train) <- features[,2]
all_merged <- rbind(cbind(subject_test,y_test,X_test),cbind(subject_train,y_train,X_train))
std_colnames <- colnames(all_merged)[grep("std",colnames(all_merged))]
mean_colnames <- colnames(all_merged)[grep("mean",colnames(all_merged))]
mean_col_excl <- mean_colnames[grep("meanFreq",mean_colnames)]
real_mean_colnames <- mean_colnames[mean_colnames %in% mean_col_excl==FALSE]
only_mean_and_std <- cbind(all_merged[std_colnames],all_merged[real_mean_colnames],all_merged["Activity.number"],all_merged["Subject.number"])
only_mean_and_std$Activity.longname[only_mean_and_std$Activity.number==1] <- "Walking"
only_mean_and_std$Activity.longname[only_mean_and_std$Activity.number==2] <- "Walking Upstairs"
only_mean_and_std$Activity.longname[only_mean_and_std$Activity.number==3] <- "Walking Downstairs"
only_mean_and_std$Activity.longname[only_mean_and_std$Activity.number==4] <- "Sitting"
only_mean_and_std$Activity.longname[only_mean_and_std$Activity.number==5] <- "Standing"
only_mean_and_std$Activity.longname[only_mean_and_std$Activity.number==6] <- "Laying Down"
tidy_table <- (aggregate(. ~ Subject.number + Activity.longname, data = only_mean_and_std, FUN = mean))
write.csv(file="Tidy Data Set.txt",x=tidy_table)
|
b0ace17596d7431ed5661ee781e92e204e39b3cf
|
066ff002724a0fa18db0c4f2921d2a121b027c3e
|
/scripts/r/old/create_graph.R
|
c89e1a052ad64047bc233279c65d32a35be64da5
|
[] |
no_license
|
wri/demographic-identifier
|
a361afabc98eb0e1faa2f49ef0ccce2558a89068
|
aa7228da11110375e46f63e648472bbec0c0551b
|
refs/heads/master
| 2021-08-16T18:02:40.220571
| 2020-04-01T13:22:50
| 2020-04-01T13:22:50
| 144,755,460
| 26
| 2
| null | 2019-05-10T13:38:19
| 2018-08-14T18:06:04
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 7,872
|
r
|
create_graph.R
|
library(tidyverse)
library(ggridges)
figdata <- read.csv("master-scripts/output-wiki-pred-race.csv")
figdata$race <- as.character(figdata$race)
figdata$race[figdata$race == "GreaterEuropean,WestEuropean,Hispanic"] <- "Latino"
figdata$race[figdata$race %in% c("Asian,GreaterEastAsian,Japanese",
"Asian,GreaterEastAsian,EastAsian")] <- "Asian"
figdata$race[figdata$race %in% c("GreaterEuropean,WestEuropean,Germanic",
"GreaterEuropean,WestEuropean,Italian",
"GreaterEuropean,WestEuropean,French",
"GreaterEuropean,British",
"GreaterEuropean,WestEuropean,Nordic",
"GreaterEuropean,Jewish",
"GreaterEuropean,EastEuropean"
)] <- "Caucasian"
figdata$race[figdata$X__name == "nan nan"] <- NA
means <- figdata %>%
dplyr::group_by(race) %>%
dplyr::summarise(age = mean(age, na.rm=T))
figdata <- all_data %>%
dplyr::group_by(age, pred_gender, race) %>%
dplyr::summarise(n=n())%>%
as.data.frame()
figdata$n <- as.numeric(figdata$n)
figdata <- figdata[!is.na(figdata$race),]
p1 <- ggplot(data=figdata, aes(x=age, y=n, fill=pred_gender))+
geom_col(stat="identity", data=figdata[figdata$pred_gender=="female",], width=1)+
geom_col(stat="identity", data=figdata[figdata$pred_gender=="male",], aes(y=n*-1), width=1)+
coord_flip()+
theme_minimal()+
scale_x_continuous(breaks=seq(10,70,10))+
theme(panel.grid.minor.x=element_blank(),
panel.grid.major.x=element_blank())+
facet_wrap(.~race)
##### GENDER BY TOPIC #######
topic <- read.csv("data/processed/topic_gender.csv")
topic <- topic %>% arrange(desc(Percent.female))
topic$Topic <- as.character(topic$Topic)
topic[,2:6] <- lapply(topic[,2:6], function(x) as.numeric(x))
topic[23,] <- c("Sustainable farming", NA, sum(topic[1:2,3]), sum(topic[1:2,4]), NA, sum(topic[1:2,6]))
topic[24,] <- c("Innovation & climate change", NA, sum(topic[c(3,8),3]), sum(topic[c(3,8),4]), NA, sum(topic[c(3,8),6]))
topic[25,] <- c("High-level panels", NA, sum(topic[c(16,17),3]), sum(topic[c(16,17),4]), NA, sum(topic[c(16,17),6]))
topic <- topic[-c(1,2,3,8,16,17, 9,10,11,12,14,15),]
topic[,2:6] <- lapply(topic[,2:6], function(x) as.numeric(x))
topic$Percent.female <- topic$Female / (topic$Male + topic$Female)
#bolded <- rep("plain", 22)
#bolded[10] <- "bold"
ggplot(data=topic, aes(x=reorder(Topic, Percent.female), y=Percent.female*100))+
geom_col(aes(alpha = Percent.female))+
coord_flip()+
geom_hline(yintercept=43.4, linetype = "dashed")+
ggridges::theme_ridges(center_axis_labels = TRUE)+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#axis.text.y=element_text(face = bolded),
legend.position = "none")+
xlab("")+
ylab("Percent female")
##### AGE BY TOPIC ######
age <- read.csv("data/processed/age_data.csv")
age$over <- "No"
age$over[age$age > 37.14] <- "Yes"
age <- age %>%
group_by(name) %>%
mutate(age_mean = mean(age),
quantile = quantile(age, 0.5))
bolded <- rep("plain", 19)
bolded[11] <- "bold"
t1 <- textGrob(expression("Average age of Twitter users by topic and " * phantom("overall")),
x = 0.5, y = 1.1, gp = gpar(col = "black"))
t2 <- textGrob(expression("Average age of Twitter users by topic and " * phantom("overall")),
x = 0.5, y = 1.1, gp = gpar(col = "red"))
ggplot(data = age, aes(x=age, y = reorder(name, quantile)))+
stat_density_ridges(calc_ecdf = TRUE, quantile_lines = TRUE, quantiles = 2, linetype = "dashed", fill = "grey80")+
geom_density_ridges(alpha=0)+
geom_vline(xintercept=36, alpha = 0.7, color = "red", linetype = "dashed")+
theme_ridges(center_axis_labels = TRUE)+
theme(axis.text.y=element_text(face = bolded))+
xlab("Age")+
ylab("")+
ggtitle("Average age of Twitter users by topic and overall")
#### RACE DATA #####
dem <- readRDS("data/processed/ethn_topic_embeddings.rds")
dem_topics <- dem[,]
ethn <- read.csv("data/processed/merged_age_gender_race.csv")
ethn <- ethn[!is.na(ethn$race) & !is.na(ethn$pred_gender),]
ethn <- ethn[,c(5,10)]
dem$clust <- dem$clust$V1
### DEM: user.name, ETHN: user.name
dem <- left_join(dem, ethn)
saveRDS(dem, "data/processed/ethn_topic_embeddings.rds")
dem$race <- as.character(dem$race)
dem$race[dem$race == "GreaterEuropean,WestEuropean,Hispanic"] <- "Latino"
dem$race[dem$race %in% c("Asian,GreaterEastAsian,Japanese",
"Asian,GreaterEastAsian,EastAsian")] <- "Asian"
dem$race[dem$race %in% c("GreaterEuropean,WestEuropean,Germanic",
"GreaterEuropean,WestEuropean,Italian",
"GreaterEuropean,WestEuropean,French",
"GreaterEuropean,British",
"GreaterEuropean,WestEuropean,Nordic",
"GreaterEuropean,Jewish",
"GreaterEuropean,EastEuropean"
)] <- "Caucasian"
dem$race[dem$X__name == "nan nan"] <- NA
dem <- dem[!is.na(dem$race),]
demographic_clust <- dem %>%
group_by(race, clust) %>%
summarise(n = n()) %>%
na.omit() %>%
ungroup() %>%
group_by(clust) %>%
mutate(n = n/sum(n))
demographic_clust <- demographic_clust %>%
group_by(race) %>%
mutate(avg = mean(n))%>%
ungroup() %>%
mutate(diff = n-avg)
demographic_clust %>% filter(grepl("Caucasian", race)) %>% arrange(desc(diff)) %>% View()
most_white <- c(85, 68, 72, 88, 161, 117, 142, 191, 18, 12)
least_white <- c(82, 108, 14, 150, 50, 107, 152, 99, 42, 16)
w_percent <- demographic_clust %>%
filter(race == "Caucasian") %>%
filter(clust %in% c(most_white, least_white))
white_df <- data.frame(Cluster = c(most_white, least_white))
white_df <- left_join(white_df, all_topics, by = c("Cluster" = "Clust"))
white_df <- dplyr::inner_join(white_df, w_percent, by = c("Cluster" = "clust"))
ggplot(data=white_df, aes(x=reorder(Topic, n), y=n*100))+
geom_col(aes(alpha = n))+
coord_flip()+
ggridges::theme_ridges(center_axis_labels = TRUE)+
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
#axis.text.y=element_text(face = bolded),
legend.position = "none")+
xlab("")+
ylab("Percent white")
#asian
most_asian <- demographic_clust %>% filter(grepl("Asian", race)) %>% group_by(clust) %>% summarise(n = sum(n), diff = sum(n) - sum(avg)) %>% ungroup() %>% arrange(desc(diff)) %>% top_n(10)
least_asian <- demographic_clust %>% filter(grepl("Asian", race)) %>% group_by(clust) %>% summarise(n = sum(n), diff = sum(n) - sum(avg)) %>% ungroup() %>% arrange(desc(diff)) %>% top_n(-10)
asian_df <- rbind(most_asian, least_asian)
asian_df <- left_join(asian_df, all_topics, by = c("clust" = "Clust"))
#latino
most_latino <- demographic_clust %>% filter(grepl("Latino", race)) %>% group_by(clust) %>% summarise(n = sum(n), diff = sum(n) - sum(avg)) %>% ungroup() %>% arrange(desc(diff)) %>% top_n(10)
least_latino <- demographic_clust %>% filter(grepl("Latino", race)) %>% group_by(clust) %>% summarise(n = sum(n), diff = sum(n) - sum(avg)) %>% ungroup() %>% arrange(desc(diff)) %>% top_n(-10)
latino <- rbind(most_latino, least_latino)
latino <- left_join(latino, all_topics, by = c("clust" = "Clust"))
#black
most_black <- demographic_clust %>% filter(grepl("African", race)) %>% group_by(clust) %>% summarise(n = sum(n), diff = sum(n) - sum(avg)) %>% ungroup() %>% arrange(desc(diff)) %>% top_n(10)
least_black <- demographic_clust %>% filter(grepl("African", race)) %>% group_by(clust) %>% summarise(n = sum(n), diff = sum(n) - sum(avg)) %>% ungroup() %>% arrange(desc(diff)) %>% top_n(-10)
black <- rbind(most_black, least_black)
black <- left_join(black, all_topics, by = c("clust" = "Clust"))
|
53510432d7b62e39d118cbd8aea15688d814ab95
|
2d628fbf686e4dbf9284cd213dc2f7ef0e0ba758
|
/plot3.R
|
2a09b773798f2bc9fa4d622564ef330fe2aadcef
|
[] |
no_license
|
ssendhil/ExData_Plotting1
|
dbdca063207520107f3ef8e297b43ecc67d30775
|
e0be507716fb7261cfb6dcd94f33adaf8c7e6f7a
|
refs/heads/master
| 2020-06-13T18:25:56.237191
| 2019-07-02T00:56:29
| 2019-07-02T00:56:29
| 194,748,502
| 0
| 0
| null | 2019-07-01T22:00:29
| 2019-07-01T22:00:29
| null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
plot3.R
|
setwd("~/Desktop/Coursera/4_Exploratory_Data_Analysis/Project 1")
plot3_data <- read.table("household_power_consumption.txt", header = T,
sep = ";", na.strings = "?")
# Fix dates
plot3_data$Date <- as.Date(plot3_data$Date, format = "%d/%m/%Y")
# Subset the data
plot3_data_subset <- subset(plot3_data, subset = (Date >= "2007-02-01" & Date <= "2007-02-02"))
#Fix date & time
plot3_data_subset$datetime <- as.POSIXct(paste(as.Date(plot3_data_subset$Date), plot3_data_subset$Time))
#Plot 3
plot(plot3_data_subset$Sub_metering_1 ~ plot3_data_subset$datetime,
type = "l", ylab = "Energy sub metering", xlab = "")
lines(plot3_data_subset$Sub_metering_2 ~ plot3_data_subset$datetime, col = "Red")
lines(plot3_data_subset$Sub_metering_3 ~ plot3_data_subset$datetime, col = "Blue")
legend("topright", lty = 1, col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file = "plot3.png", height = 480, width = 480)
dev.off()
|
9c0369912e20acfd7860b3b36467a6419930dc2e
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/gtfsrouter/tests/testthat.R
|
336a8901746b2072277b7255aef547710e171a81
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(gtfsrouter)
test_check("gtfsrouter")
|
ab34bac0fdedbff8adc080a49addf188c2ce32ce
|
5e7733232f464b41fc94a3ba19fc2dc46a64ca8f
|
/bc.R
|
e8156f8b913675aedeb2c365c10d4fac0e1ac974
|
[] |
no_license
|
ryninho/functions
|
5e6d345d70ad4b1a8434fa5fbe5ea4917f3bc020
|
78c13a7e833bef62eafb52bcfd6207629ea9cf4b
|
refs/heads/master
| 2021-01-17T07:25:35.195697
| 2017-10-02T04:07:53
| 2017-10-02T04:07:53
| 20,424,099
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
bc.R
|
#' Calculate the Bhattacharyya coefficient of a pair of discrete distributions.
#'
#' https://en.wikipedia.org/wiki/Bhattacharyya_distance
#' @param p,q Vectors of probabilities.
#' @return The Bhattacharyya coefficient of \code{p} and \code{q}.
#' @examples
#' bc(c(.5, .25, .25), c(1, 0, 0))
#' bc(c(.2, .6, .2), c(.4, .2, .4))
bc <- function(p, q) {
p[is.na(p)] <- 0
q[is.na(q)] <- 0
return(sum(sqrt(p*q)))
}
|
9a23161a8ac3bfebe0cd5c855e4554e4d1738fd0
|
51f38bc84a50b94f8e52eb368565fd521b17ac38
|
/sampleFromTau2Dist.R
|
2bd50c4c34eab361fb50841a61192d4e6bb9ba69
|
[] |
no_license
|
tlafarge/NICOB_app_public
|
ce1bc91c0538eed48ea889566ea03b0170385d77
|
0389374a0cbf7f43abb26fd8c3158bd7ebcacf37
|
refs/heads/master
| 2022-09-13T14:59:19.784305
| 2022-09-01T21:10:16
| 2022-09-01T21:10:16
| 235,769,104
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,310
|
r
|
sampleFromTau2Dist.R
|
######################################################################
##
## FILE : sampleFromTau2Dist.R
##
## AUTHOR : Amanda Koepke & Antonio Possolo
## MODIFICATION: 2016-Mar-31
##
## INPUT : y = Numeric vector with values measured by the labs
## sigma = Numeric vector with standard uncertainties
## associated with measured values
##
## OUTPUT: Returns one sample from the approximate distribution for tau^2,
## sampled by simulating tau^2_M from its approximate distribution, a
## location-shifted, scaled gamma (derived in Biggerstaff and Tweedie (1997))
## and then the simulated tau^2=max(0,tau^2_M).
######################################################################
sampleFromTau2Dist=function(y,sigma){
######################################################################
## Data specific variables
######################################################################
K=length(y)
w=sigma^-2
S_r=function(w,r){
sum(w^r)
}
S1=S_r(w,1)
S2=S_r(w,2)
S3=S_r(w,3)
c=S1-S2/S1
muhat=sum(w*y)/sum(w)
Q=sum(w*(y-muhat)^2)
if ((S1-S2/S1)!=0)
tau2_hat_M=(Q-(K-1))/(S1-S2/S1)
else
tau2_hat_M=0
tau2_hat_DL=max(0,tau2_hat_M)
######################################################################
## Functions for expected value and variance of Cochran's Q, given in
## equations (5) and (7) of Biggerstaff and Tweedie (1997).
######################################################################
E_Q=function(tau2,weight_vec){
S1=S_r(weight_vec,1)
S2=S_r(weight_vec,2)
S3=S_r(weight_vec,3)
K=length(weight_vec)
(K-1)+(S1-S2/S1)*tau2
}
Var_Q=function(tau2,weight_vec){
S1=S_r(weight_vec,1)
S2=S_r(weight_vec,2)
S3=S_r(weight_vec,3)
K=length(weight_vec)
2*(K-1)+4*(S1-S2/S1)*tau2+2*(S2-2*S3/S1+S2^2/S1^2)*tau2^2
}
######################################################################
## Functions to calculate the shape and scale parameters for gamma
## distribution approximation to distribution of Q.
######################################################################
### This function was returning Inf when all of the SEs were equal, making Var_Q = 0,
### this conditional fixes that but makes the tau sampler always return 0, might not be what we want
lambda_tau2=function(tau2,data_w){
if(Var_Q(tau2,weight_vec=data_w)==0){
E_Q(tau2,weight_vec=data_w)/.0000000001
}else{
E_Q(tau2,weight_vec=data_w)/Var_Q(tau2,weight_vec=data_w)
}
}
r_tau2=function(tau2,data_w){
if(Var_Q(tau2,weight_vec=data_w)==0){
(E_Q(tau2,weight_vec = data_w))^2/.0000000001
}else{
(E_Q(tau2,weight_vec = data_w))^2/Var_Q(tau2,weight_vec = data_w)
}
}
######################################################################
## Simulate from approximate gamma distribution for tau^2_M to generate
## a sample from the approximate distribution for tau^2.
######################################################################
r=r_tau2(tau2_hat_M,w)
lambda=lambda_tau2(tau2_hat_M,w)
transformed_tau2_M=rgamma(n=1,shape=r,scale=1/lambda)
if (c!=0)
r_tau2_DL=max(0,(transformed_tau2_M-(K-1))/c)
else
r_tau2_DL=0
return(r_tau2_DL)
}
|
1eceaa4cadf4d5421719fd32fd20cf2f5dd9f9fb
|
2805259234d195d4982709427e7301092c1cf951
|
/R/asn.R
|
3be691c03bf14bdd41624c7a6ad10d273cd2a373
|
[] |
no_license
|
hrbrmstr/cymruservices
|
d5d1c30290179225d24a30ef1f13550380479de3
|
6bdde182f4555b2a8e40f5d24aac210f9a185b9d
|
refs/heads/master
| 2021-01-17T13:07:16.606407
| 2018-09-16T17:05:03
| 2018-09-16T17:05:03
| 39,524,054
| 7
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,654
|
r
|
asn.R
|
# the documented, memoised versions of these are at the bottom
.bulk_origin <- function(ips, timeout=getOption("timeout")) {
host <- "v4.whois.cymru.com"
port <- 43
# setup query
cmd <- "begin\nverbose\n"
ips_c <- paste(unlist(ips), collapse="\n")
cmd <- sprintf("%s%s\nend\n", cmd, ips_c)
# setup connection and post query
con <- sock(host=host, port=port, blocking=TRUE, open="r+", timeout=timeout)
if (is.null(con$result)) {
message("Error opening connection to v4.whois.cymru.com")
data.frame(
as = rep(NA, length(ips)),
ip = rep(NA, length(ips)),
bgp_prefix = rep(NA, length(ips)),
cc = rep(NA, length(ips)),
registry = rep(NA, length(ips)),
allocated = rep(NA, length(ips)),
as_name = rep(NA, length(ips))
) -> out
class(out) <- c("tbl_df", "tbl", "data.frame")
return(out)
}
con <- con$result
cat(cmd, file=con)
response <- readLines(con)
close(con)
if (length(response) == 0) {
message("Error reading from connection to v4.whois.cymru.com")
data.frame(
as = rep(NA, length(ips)),
ip = rep(NA, length(ips)),
bgp_prefix = rep(NA, length(ips)),
cc = rep(NA, length(ips)),
registry = rep(NA, length(ips)),
allocated = rep(NA, length(ips)),
as_name = rep(NA, length(ips))
) -> out
class(out) <- c("tbl_df", "tbl", "data.frame")
return(out)
}
# trim header, split fields and convert results
trim_df(
read.csv(
textConnection(tail(response, -1)),
stringsAsFactors = FALSE,
sep = "|",
header = FALSE
)
) -> response
names(response) <- c("as", "ip", "bgp_prefix", "cc","registry", "allocated", "as_name")
response[(response == "NA")] <- NA
class(response) <- c("tbl_df", "tbl", "data.frame")
return(response)
}
.bulk_peer <- function(ips, timeout=getOption("timeout")) {
host <- "peer.whois.cymru.com"
# host <- "v4-peer.whois.cymru.com"
port <- 43
# setup query
cmd <- "begin\nverbose\n"
ips_c <- paste(unlist(ips), collapse="\n")
cmd <- sprintf("%s%s\nend\n", cmd, ips_c)
# setup connection and post query
con <- sock(host=host, port=port, blocking=TRUE, open="r+", timeout=timeout)
if (is.null(con$result)) {
message("Error opening connection to v4-peer.whois.cymru.com")
data.frame(
peer_as = rep(NA, length(ips)),
ip = rep(NA, length(ips)),
bgp_prefix = rep(NA, length(ips)),
cc = rep(NA, length(ips)),
registry = rep(NA, length(ips)),
allocated = rep(NA, length(ips)),
peer_as_name = rep(NA, length(ips))
) -> out
class(out) <- c("tbl_df", "tbl", "data.frame")
return(out)
}
con <- con$result
cat(cmd, file=con)
response <- readLines(con)
close(con)
if (length(response) == 0) {
message("Error reading from connection to v4-peer.whois.cymru.com")
data.frame(
peer_as = rep(NA, length(ips)),
ip = rep(NA, length(ips)),
bgp_prefix = rep(NA, length(ips)),
cc = rep(NA, length(ips)),
registry = rep(NA, length(ips)),
allocated = rep(NA, length(ips)),
peer_as_name = rep(NA, length(ips))
) -> out
class(out) <- c("tbl_df", "tbl", "data.frame")
return(out)
}
# trim header, split fields and convert results
trim_df(
read.csv(
textConnection(tail(response, -1)),
stringsAsFactors = FALSE,
sep = "|",
header = FALSE
)
) -> response
names(response) <- c("peer_as", "ip", "bgp_prefix", "cc", "registry", "allocated", "peer_as_name")
response[(response == "NA")] <- NA
class(response) <- c("tbl_df", "tbl", "data.frame")
return(response)
}
.bulk_origin_asn <- function(asns, timeout=getOption("timeout")) {
host <- "v4.whois.cymru.com"
port <- 43
# setup query
cmd <- "begin\nverbose\n"
ips <- paste(unlist(ifelse(grepl("^AS", asns), asns,
sprintf("AS%s", asns))), collapse="\n")
cmd <- sprintf("%s%s\nend\n", cmd, ips)
# setup connection and post query
con <- sock(host=host, port=port, blocking=TRUE, open="r+", timeout=timeout)
if (is.null(con$result)) {
message("Error opening connection to v4.whois.cymru.com")
data.frame(
as = rep(NA, length(asns)),
cc = rep(NA, length(asns)),
registry = rep(NA, length(asns)),
allocated = rep(NA, length(asns)),
as_name = rep(NA, length(asns))
) -> out
class(out) <- c("tbl_df", "tbl", "data.frame")
return(out)
}
con <- con$result
cat(cmd, file=con)
response <- readLines(con)
close(con)
if (length(response) == 0) {
message("Error reading from connection to v4.whois.cymru.com")
data.frame(
as = rep(NA, length(asns)),
cc = rep(NA, length(asns)),
registry = rep(NA, length(asns)),
allocated = rep(NA, length(asns)),
as_name = rep(NA, length(asns))
) -> out
class(out) <- c("tbl_df", "tbl", "data.frame")
return(out)
}
# trim header, split fields and convert results
trim_df(
read.csv(
textConnection(tail(response, -1)),
stringsAsFactors = FALSE,
sep = "|",
header = FALSE
)
) -> response
names(response) <- c("as", "cc", "registry", "allocated", "as_name")
response[(response == "NA")] <- NA
class(response) <- c("tbl_df", "tbl", "data.frame")
return(response)
}
#' Retrieves BGP Origin ASN info for a list of IPv4 addresses
#'
#' @param ips vector of IPv4 address (character - dotted-decimal)
#' @param timeout numeric: the timeout (in seconds) to be used for this connection.
#' Beware that some OSes may treat very large values as zero: however the
#' POSIX standard requires values up to 31 days to be supported.
#' @return data frame of BGP Origin ASN lookup results
#' \itemize{
#' \item \code{as} - AS #
#' \item \code{ip} - IPv4 (passed in)
#' \item \code{bgp_refix} - BGP CIDR
#' \item \code{cc} - Country code
#' \item \code{registry} - Registry it falls under
#' \item \code{allocated} - date it was allocated
#' \item \code{as_ame} - AS name
#' }
#' If a socket connection cannot be made (i.e. a network problem on your
#' end or a service/network problem on their end), all columns will be
#' \code{NA}.
#' @note The Team Cymru's service is NOT a GeoIP service! Do not use this
#' function for that as your results will not be accurate.
#' Data is updated every 4 hours. Also,
#' A direct connection to TCP Port 43 (WHOIS) is required for most of these
#' API functions to work properly.
#' @seealso \url{http://www.team-cymru.org/IP-ASN-mapping.html}
#' @export
#' @examples \dontrun{
#' bulk_origin(c("68.22.187.5", "207.229.165.18", "198.6.1.65"))
#' }
bulk_origin <- memoise::memoise(.bulk_origin)
#' Retrieves BGP Peer ASN info for a list of IPv4 addresses
#'
#' @param ips vector of IPv4 address (character - dotted-decimal)
#' @param timeout numeric: the timeout (in seconds) to be used for this connection.
#' Beware that some OSes may treat very large values as zero: however the
#' POSIX standard requires values up to 31 days to be supported.
#' @return data frame of BGP Peer ASN lookup results
#' \itemize{
#' \item \code{peer_as} - peer AS #
#' \item \code{ip} - IPv4 (passsed in)
#' \item \code{bgp_prefix} - BGP CIDR block
#' \item \code{cc} - Country code
#' \item \code{registry} - Registry it falls under
#' \item \code{allocated} - date allocated
#' \item \code{peer_as_name} - peer name
#' }
#' If a socket connection cannot be made (i.e. a network problem on your
#' end or a service/network problem on their end), all columns will be
#' \code{NA}.
#' @note The Team Cymru's service is NOT a GeoIP service! Do not use this
#' function for that as your results will not be accurate.
#' Data is updated every 4 hours. Also,
#' A direct connection to TCP Port 43 (WHOIS) is required for most of these
#' API functions to work properly.
#' @seealso \url{http://www.team-cymru.org/IP-ASN-mapping.html}
#' @export
#' @examples \dontrun{
#' bulk_peer(c("68.22.187.5", "207.229.165.18", "198.6.1.65"))
#' }
bulk_peer <- memoise::memoise(.bulk_peer)
#' Retrieves BGP Origin ASN info for a list of ASN ids
#'
#' @param asns character vector of ASN ids (character)
#' @param timeout numeric: the timeout (in seconds) to be used for this connection.
#' Beware that some OSes may treat very large values as zero: however the
#' POSIX standard requires values up to 31 days to be supported.
#' @return data frame of BGP Origin ASN lookup results
#' \itemize{
#' \item \code{as} - AS #
#' \item \code{cc} - Country code
#' \item \code{registry} - registry it falls under
#' \item \code{allocated} - when it was allocated
#' \item \code{as_name} - name associated with the allocation
#' }
#' If a socket connection cannot be made (i.e. a network problem on your
#' end or a service/network problem on their end), all columns will be
#' \code{NA}.
#' @note The Team Cymru's service is NOT a GeoIP service! Do not use this
#' function for that as your results will not be accurate.
#' Data is updated every 4 hours. Also,
#' A direct connection to TCP Port 43 (WHOIS) is required for most of these
#' API functions to work properly.
#' @seealso \url{http://www.team-cymru.org/IP-ASN-mapping.html}
#' @export
#' @examples \dontrun{
#' bulk_origin_asn(c(22822, 1273, 2381, 2603, 2914, 3257, 3356, 11164,
#' 174, 286, 1299, 2914, 3257, 3356, 3549, 22822))
#' }
bulk_origin_asn <- memoise::memoise(.bulk_origin_asn)
|
a5e10d5c5a8280befcd900415ebda08957b1db8f
|
0c97842a4b2465063c91dc82748f6376e0bb0f9e
|
/man/dict_all_geocomponent_2010.Rd
|
eb25671d6bb6774dee42cf7e5177b97106a45d17
|
[] |
no_license
|
raivtash/totalcensus
|
dcc2b11a03273b8a66af0b0417187c903cbc2195
|
a9542819a58081e7978c0f8b99c02b32d0d1040b
|
refs/heads/master
| 2020-04-10T10:59:27.234397
| 2018-12-08T15:13:37
| 2018-12-08T15:13:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 824
|
rd
|
dict_all_geocomponent_2010.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_acs.R, R/data_decennial.R
\docType{data}
\name{dict_all_geocomponent_2010}
\alias{dict_all_geocomponent_2010}
\alias{dict_all_geocomponent_2010}
\title{List of all geographic components, 2010 version}
\format{A data.table with 114 rows and 2 variables:
\describe{
\item{code}{code for the geocomponent, such as "01" and "M3"}
\item{geo_component}{description of the geographic component}
}}
\source{
2010 Census Summary File 1
\href{https://www.census.gov/prod/cen2010/doc/sf1.pdf}{technical documentation}
page 6-15
}
\usage{
dict_all_geocomponent_2010
dict_all_geocomponent_2010
}
\description{
List of all geographic components, 2010 version
This dataset contains all available geographic components and codes.
}
\keyword{datasets}
|
fa666656370fa3606bbbcef0bdd2fe1e31752211
|
ab06f734214a5d90724da9389932a653845079cd
|
/run_analysis.R
|
9595ca3b9b0a82fd8ee31f2e8e4ad1331fa9deba
|
[] |
no_license
|
jlabeaga/coursera_clean_data
|
3eb4cdbb8dcc106079415bc43059dd9db7689a9c
|
e2c7153e2d6ee4b089cb4a97df848019293b53db
|
refs/heads/master
| 2021-01-10T09:50:02.517398
| 2016-02-27T11:34:18
| 2016-02-27T11:34:18
| 52,613,893
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,049
|
r
|
run_analysis.R
|
library(plyr)
# ===============================================================================
# Step 1:
# Merges the training set (read from X_train.txt) and the test set (read from
# X_test.txt) to create one data set.
# ===============================================================================
# read train data
measured_train <- read.table("train/X_train.txt")
activity_train <- read.table("train/y_train.txt")
subject_train <- read.table("train/subject_train.txt")
# read test data
measured_test <- read.table("test/X_test.txt")
activity_test <- read.table("test/y_test.txt")
subject_test <- read.table("test/subject_test.txt")
# bind rows for all measurements
measured_data <- rbind(measured_train, measured_test)
# bind rows for all activities
activity_data <- rbind(activity_train, activity_test)
# bind rows for all subjects
subject_data <- rbind(subject_train, subject_test)
# ===============================================================================
# Step 2
# Extracts only the measurements on the mean and standard deviation for
# each measurement (only the features named with the "mean" or "std" particles)
# and remove the rest of measurements.
# ===============================================================================
features <- read.table("features.txt")
# retrieve columns with particels mean() or std() in their names
mean_and_std_features <- grep("-(mean|std)\\(\\)", features[, 2])
# filter the desired columns
measured_data <- measured_data[, mean_and_std_features]
# set the correct column names
names(measured_data) <- features[mean_and_std_features, 2]
# ===============================================================================
# Step 3:
# Adds descriptive activity names to name the activity for each observation.
# ===============================================================================
activities <- read.table("activity_labels.txt")
# update values with the text activity names
activity_data[, 1] <- activities[activity_data[, 1], 2]
# correct column name
names(activity_data) <- "activity"
# ===============================================================================
# Step 4
# Appropriately labels the data set with descriptive variable names.
# ===============================================================================
# set the column name
names(subject_data) <- "subject"
# bind all the columns in one data frame
all_data <- cbind(measured_data, activity_data, subject_data)
# ===============================================================================
# Step 5
# From the data set in step 4, creates a second, independent tidy data set with
# the average of each variable for each activity and each subject.
# ===============================================================================
# average on 66 first columns and group by the las 2 (activity & subject)
tidy_data <- ddply(all_data, .(subject, activity), function(x) colMeans(x[, 1:66]))
# now save the tidy data set to a text file
write.table(tidy_data, "tidy_data.txt", row.name=FALSE)
|
308e7d72c86317a1b654df53c54100120d563100
|
c267b09529ad20ba29c23b9922cbb94ce5b2e2d8
|
/man/createGroupFromInput.Rd
|
f886b8b5a32504d516f116d28497fdab6bf9623e
|
[] |
no_license
|
IlyaFinkelshteyn/psichomics
|
7f004cffcdabf028ef3094b3a6a6b290fb377cd4
|
a7f2db7954f6962ff7a487369c7b367a1d999daa
|
refs/heads/master
| 2021-01-11T06:08:04.688256
| 2016-09-24T01:40:53
| 2016-09-24T01:40:53
| 69,070,120
| 0
| 0
| null | 2016-09-24T00:37:37
| 2016-09-24T00:37:36
| null |
UTF-8
|
R
| false
| true
| 604
|
rd
|
createGroupFromInput.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/groups.R
\name{createGroupFromInput}
\alias{createGroupFromInput}
\title{Set new groups according to the user input}
\usage{
createGroupFromInput(session, input, output, dataset, datasetName)
}
\arguments{
\item{session}{Shiny session}
\item{input}{Shiny input}
\item{output}{Shiny output}
\item{dataset}{Data frame or matrix: dataset of interest}
\item{datasetName}{Character: name of the dataset}
}
\value{
Matrix with the group names and respective indexes
}
\description{
Set new groups according to the user input
}
|
561cca0705692f9d7976f319ae489f7f0a916ec2
|
ae7f71577a5d730d7712ba6aa196d770827c28f1
|
/ngs/R/mergeRData.R
|
c9c9ff884087a071ce0fef90876284d9c287fbb9
|
[] |
no_license
|
plijnzaad/phtools
|
e688b036e72734512159dc1cede1305394910284
|
650edbc855c9772662cda34f67fae15b070437e5
|
refs/heads/master
| 2023-04-07T03:37:06.355512
| 2023-03-23T12:23:15
| 2023-03-23T12:23:15
| 46,286,525
| 5
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,190
|
r
|
mergeRData.R
|
#!/usr/bin/env Rscript
## merge objects found in an .RData file into a big RData file.
warning("Running on host ", Sys.getenv("HOSTNAME"), "\n")
library(parseArgs, verbose=FALSE)
usage <- function()warning("Usage mergeRData [--regexp '^.*$' ] --out totals.rda *.rda
Merges objects inside RData files using the c() operator. Typically used on data that was
previously produced from split input.
Options:
--regexp: comma-separated list of perl regular expressions to select object names
--out: name of the output file (typically ending in .rda, and lacking the part
that distinguishes the differnt inputs)
Note: if the resulting *.RData contains a *list* whose elements are of type X (say GRanges)
rather than one big GRanges, it means that during the merging library X was not loaded.
The solution is to add more ``library(therelevantpackage)'' statements to the top of this
script.
")
args <- parseArgs(.overview=usage,
out=NA,
regexp='.*',
dryrun=FALSE,
## preeval='',
.allow.rest=TRUE)
rda.files <- args$.rest
if(is.null(rda.files)) {
warning("No input arguments\n")
usage()
stop()
}
library(rtracklayer, verbose=FALSE) # this should force loading of most of the relevant bioc. libs
regexps <- unlist(strsplit(args$regexp, "[,;]"))
expected <-NULL #based on contents of first file
final <- new.env()
check.names <- function(file, names, regexps) {
found <- NULL
for(re in regexps)
found <- c(found, names[grep(re, names, perl=TRUE)])
if(any(duplicated(found))) {
dups <- paste(found[duplicated(found)], sep="; ")
stop("file " ,file, "regular expressions resulted in duplicated object names, ",
dups,"Be more specific\n")
}
if(is.null(found)||length(found)==0)
stop("In file ", file, ", no objects matching any of '", regexps, "' was found.")
found
}
check.sets <- function(file, expected, found) {
if (!setequal(expected, found))
stop("File", file, ": expected, not found: ", setdiff(expected, found),
"\nFound, not expected: ", setdiff(found, expected), "\n")
}
for (file in rda.files) {
env <- new.env()
tryCatch( load(file=file, env=env), silent=FALSE )
contents <- ls(envir=env)
names <- check.names(file, contents, regexps)
if (is.null(expected)) {
expected <- names
if(args$dryrun) {
rest <- setdiff(contents, names)
stop("Would try to merge the following objects in each file: ",
paste(names,collapse=", "), "\nand ignore: ",paste(rest, collapse=", "),"\n")
}
for (name in names) {
obj <- get(name, envir=env)
assign(name, obj, env=final)
}
} else {
check.sets(file, expected, names)
for (name in names) {
obj <- c(get(name, envir=final), get(name, envir=env)) # the actual merging
dummy <- length(obj)
assign(name, obj, env=final)
}
}
} #rda.file
save(file=args$out, list=expected, envir=final)
sessionInfo()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.