blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
โ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
โ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
โ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d57ef78c8f49eac853cf21509184f2a335e4ae73
|
babac763748a83cc9e14e67236a9cc8a3e4ada34
|
/man/set_panels.Rd
|
96135e8c962cdcb96def6d50efd2154cc6e4b3bc
|
[] |
no_license
|
cran/JOPS
|
84e0a06484f480a2ab6b01d40a7c386fdd635173
|
932ab4e5c2117e91a682fbf0cb1c675a6234a82c
|
refs/heads/master
| 2023-05-12T01:00:20.338969
| 2021-06-03T10:00:17
| 2021-06-03T10:00:17
| 341,417,657
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 599
|
rd
|
set_panels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_panels.R
\name{set_panels}
\alias{set_panels}
\title{Prepare graphics layout for multiple panels}
\usage{
set_panels(rows = 1, cols = 1)
}
\arguments{
\item{rows}{number of rows.}
\item{cols}{number of columns.}
}
\value{
Prepare graphics layout for multiple panels
}
\description{
Adapt margins and axes layout for multiple panels.
}
\references{
Eilers, P.H.C. and Marx, B.D. (2021). \emph{Practical Smoothing, The Joys of
P-splines.} Cambridge University Press.
}
\author{
Paul Eilers
}
|
d683ddf104377023a9e7b0c9fa90109ee00d3633
|
246189c0e240e174b9ca74e2a42bfecee79cc9e1
|
/R/processPlantSurveys.R
|
726af1214d29e82763668b093e988854a48e173d
|
[] |
no_license
|
ksauby/GTMNERRproc
|
f3bcd140578d710c9b013da83d9ac8d08e781eee
|
fd5a073d5fd2690b6fde64a0313d1a3fdfe07645
|
refs/heads/master
| 2021-04-06T13:03:29.008590
| 2017-11-15T20:35:53
| 2017-11-15T20:35:53
| 83,352,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,743
|
r
|
processPlantSurveys.R
|
#' Process GTMNERR Plant Survey Data
#'
#' @description Steps:
#' \itemize{
#' \item Fix column names
#' \item format dates
#' \item format PlantIDs
#' \item format convert "999" values to NA
#' \item format change "yes"/"no" values of the insect survey, missing, and dead columns to 0/1
#' \item format numeric columns
#' \item add total segment column
#' \item add fruit count column
#' \item add fruit/flower count column
#' \item check for observations of 0 pads, 0 height, or 0 width; if there are observations, stop
#' \item add column, "DemographicSurvey"
#' \itemize{
#' \item survey 1 - spring/summer 2013
#' \item survey 2 - fall/winter 2013/2014
#' \item survey 3 - spring/summer 2014
#' \item survey 4 - winter 2015
#' \item survey 5 - spring/summer 2015
#' }
#' \item addFecundityYear
#' \itemize{
#' \item 2012 - Date >= "2012-12-02" & Date < "2013-05-01"
#' \item 2013 - Date >= "2013-05-01" & Date < "2014-05-01"
#' \item 2014 - Date >= "2014-05-01" & Date < "2015-05-01"
#' \item 2015 - Date >= "2015-05-01"
#' }
#' }
#' Column Names:
#' \itemize{
#' \item PlantMeasureID Record number if Microsoft Access database
#' \item First_Observer_Initials Initials of primary observer (should always be KS, for Kristen Sauby)
#' \item Second_Observer_Initials Initials of secondary observer; CJP - Cory Penca; YP - Yani Paulay; KS - Kristen Sauby; JW: Juliana Welch; CW: Cedric Worman; AP: Adam Payton
#' \item Date Date
#' \item PlantID Unique number for the individual plant
#' \item Plant_collected Were plant samples collected?
#' \item Time
#' \item PlantPictures identifying numbers of photos taken
#' \item PlantPictures_Direction direction that the photo(s) was(were) taken
#' \item CA_t Presence/absence (1/0) of Cactoblastis cactorum
#' \item CACA_collected Were samples of Cactoblastis cactorum collected?
#' \item CACA_quantity Number of Cactoblastis cactorum samples collected
#' \item ME_t Presence/absence (1/0) of Melitara prodenialis
#' \item MEPR_collected Were samples of Melitara prodenialis collected?
#' \item MEPR_quantity Number of Melitara prodenialis samples collected
#' \item CH_t Presence/absence (1/0) of Chelinidea vittiger
#' \item CHVI_collected Were samples of Chelinidea vittiger collected?
#' \item CHVI_quantity Number of Chelinidea vittiger samples collected
#' \item DA_t Presence/absence (1/0) of Dactylopius species
#' \item DACT_collected Were samples of Dactylopius species collected?
#' \item DACT_quantity Number of Dactylopius species samples collected
#' \item Unknown_Moth_t Presence/absence (1/0) of unknown moth
#' \item UnknownMoth_collected Were samples of unknown moth collected?
#' \item UnknownMoth_quantity Number of unknown moth samples collected
#' \item Old_Moth_Evidence_t Evidence of past moth infestation
#' \item Old_Moth_Evidence_recent Whether evidence of past moth infestation appears recent or old
#' \item Fungus Presence/absence (1/0) of plant fungal infection
#' \item Gerstaeckeria Presence/absence (1/0) of Gerstaeckeria
#' \item Plant_Segments_total Number of segments
#' \item Plant_Segments_w_leaves New, green segments with leaves
#' \item Plant_Segments_wo_leaves Green segments without leaves
#' \item Plant_Segments_woody number of woody segments/trunks; these segments are entirely brown on the outside
#' \item Height_t maximum height in cm
#' \item Width_t maximum width in cm
#' \item Perpen_Width width, perpendicular to max width, in cm
#' \item Num_FlowerBuds Number of flower buds
#' \item Num_Fruit_red Number of red fruit
#' \item Num_Fruit_green Number of green fruit
#' \item Num_Flowers Number of flowers
#' \item Num_Fruit use this when number is recorded but distinction by color is not made
#' \item Pollinators
#' \item Spiders
#' \item Ants
#' \item Other_collected_quantity
#' \item Plant_Notes
#' \item Insect_Notes
#' \item Other_Notes
#' \item Dead Whether the plant is observed to be dead; 0 or 1
#' \item Missing
#' \item OutsideOfPlot "Yes" if plant is no longer in plot
#' \item PlotPlantID Unique number for the individual plant; if a plant is counted in multiple plots, a letter is appended to the plant ID here (e.g., 9606a) and then removed for analysis
#' \item Size_t The sum of Plant_Segments_total, Plant_Segments_w_leaves, Plant_Segments_wo_leaves, and Plant_Segments_woody
#' \item Fruit_t The sum of Num_Fruit_red, Num_Fruit_green, and Num_Fruit
#' \item Fruit_Flowers_t The sum of Num_FlowerBuds, Num_Flowers, Num_Fruit_red, Num_Fruit_green, and Num_Fruit
#' }
#'
#' @export
processPlantSurveys <- function(Plant.Surveys, Plant.Info) {
# ----------------------------------------------------------------- WARNINGS
# check first duplicate data entries
dups <- Plant.Surveys %>%
group_by(PlantID, DateSurveyed) %>%
summarise(n.obs = length(Plant_collected)) %>%
filter(n.obs > 1)
if (dim(dups)[1] > 0) {stop("Duplicates observations for a PlantID, Date combination are present in the dataset.")}
# check for PlantID = NA
dups <- Plant.Surveys %>% filter(is.na(PlantID))
if (dim(dups)[1] > 0) {stop("NA values for PlantID.")}
# are all Plant IDs from the Plant Surveys data in Plant Info?
dups <- filter(Plant.Surveys, !(PlantID %in% Plant.Info$PlantID))[, 4:5]
if (dim(
filter(Plant.Surveys, !(PlantID %in% Plant.Info$PlantID))[, 4:5]
)[1] > 0) {
warning(paste(
"These Plant IDs from Plant Surveys are not in Plant Info:",
paste(unique(dups$PlantID), collapse=", ")
))
}
# Duplicates in Plant Surveys
# duplicates for 1795 on 2013-02-10 are okay - I accidentally surveyed the plot twice; keep it for detectability
dups = Plant.Surveys %>%
group_by(PlantID, DateSurveyed) %>%
dplyr::summarise(Nrecords = length(First_Observer_Initials)) %>%
as.data.frame %>%
arrange(PlantID) %>%
filter(Nrecords > 1)
if (dim(dups)[1] > 0) {
warning(paste(
"Duplicate surveys on the same date for the following plants:",
paste(unique(dups$PlantID), collapse=", ")
))
}
# check for size = 0
dups <- Plant.Surveys %>% filter(Max_Height==0)
if (dim(dups)[1] > 0) {stop("Max. height values = 0.")}
dups <- Plant.Surveys %>% filter(Max_Width==0)
if (dim(dups)[1] > 0) {stop("Max. width values = 0.")}
dups <- Plant.Surveys %>% filter(Perpen_Width==0)
if (dim(dups)[1] > 0) {stop("Perpendicular width values = 0.")}
# ------------------------------------------------------------- CHANGE NAMES
# remame size and height
Plant.Surveys %<>% as.data.table %>%
setnames("Max_Height", "Height_t") %>%
setnames("Max_Width", "Width_t") %>%
setnames("CACA_Larvae", "CA_t") %>%
setnames("MEPR_Larvae", "ME_t") %>%
setnames("CHVI_Evidence", "CH_t") %>%
setnames("DACT_Evidence", "DA_t") %>%
setnames("UnknownMoth_Evidence", "Unknown_Moth_t") %>%
setnames("Old_Moth_Evidence", "Old_Moth_Evidence_t") %>%
setnames("DateSurveyed", "Date") %>%
setnames("Gerstaeckeria", "Gerstaeckeria_t") %>%
as.data.frame
# formatting/preparation necessary for prepping Demographic Plant Info
Plant.Surveys$Date %<>% Format_Date_Function
Plant.Surveys %<>% arrange(Date)
Plant.Surveys %<>% Format_PlantIDs_Function
# ------------------------------------------------ CONVERT ALL "999s" to NAs
Plant.Surveys[,c(
"Plant_Segments_total",
"Plant_Segments_w_leaves",
"Plant_Segments_wo_leaves",
"Plant_Segments_woody",
"Perpen_Width",
"Width_t",
"Height_t",
"Num_FlowerBuds",
"Num_Fruit_red",
"Num_Fruit_green",
"Num_Flowers",
"Num_Fruit")] %<>%
apply(2, NA_Function
)
# ------------------- INSECT SURVEYS, MISSING, DEAD - CHANGE YES, NO to 0, 1
Plant.Surveys[,c(
"CA_t",
"ME_t",
"CH_t",
"DA_t",
"Unknown_Moth_t",
"Gerstaeckeria_t",
"Old_Moth_Evidence_t",
"Dead",
"Missing")] %<>%
apply(2, Yes_Function
)
Plant.Surveys[,c(
"CA_t",
"ME_t",
"CH_t",
"DA_t",
"Unknown_Moth_t",
"Gerstaeckeria_t",
"Old_Moth_Evidence_t",
"Dead",
"Missing")] %<>%
apply(2, No_Function
)
Plant.Surveys[,c(
"CA_t",
"ME_t",
"CH_t",
"DA_t",
"Unknown_Moth_t",
"Gerstaeckeria_t",
"Old_Moth_Evidence_t",
"Dead",
"Missing")] %<>%
apply(2, NA_Function
)
# ------------------------------------------------------------- MAKE NUMERIC
Plant.Surveys[,c(
"CA_t",
"ME_t",
"CH_t",
"DA_t",
"Unknown_Moth_t",
"Gerstaeckeria_t",
"Old_Moth_Evidence_t",
"Plant_Segments_total",
"Plant_Segments_w_leaves",
"Plant_Segments_wo_leaves",
"Plant_Segments_woody",
"Height_t",
"Width_t",
"Perpen_Width",
"Num_FlowerBuds",
"Num_Fruit_red",
"Num_Fruit_green",
"Num_Flowers",
"Num_Fruit",
"Dead",
"Missing")] %<>%
apply(2, destring
)
# ------------------------------------------------------------ GERSTAECKERIA
# Gerstaeckeria was not reliably surveyed so can only take values of NA or 1
Plant.Surveys$Gerstaeckeria_t %<>% Zero_is_NA_Function
# ---------------------------------------------- Change Missing = NA to zero
Plant.Surveys$Missing %<>% NA_is_Zero_Function
# ------------------------------------------------- ADD TOTAL SEGMENT COLUMN
# do this so that plants that have no segments recorded (all NAs) have a total segment count = NA
# for those plants that have fewer than four NAs (at least one segment column has a number), sum the segments
Plant.Surveys$Size_t <- Plant.Surveys %>%
dplyr::select(
Plant_Segments_total,
Plant_Segments_w_leaves,
Plant_Segments_wo_leaves,
Plant_Segments_woody
) %>%
apply(1, mysum)
Plant.Surveys$Size_t %<>% Zero_is_NA_Function
# --------------------------------------------------------- ADD FRUIT COLUMN
Plant.Surveys$Fruit_t <- Plant.Surveys %>%
dplyr::select(
Num_Fruit_red,
Num_Fruit_green,
Num_Fruit
) %>%
apply(1, mysum)
Plant.Surveys$Fruit_Flowers_t <- Plant.Surveys %>%
dplyr::select(
Num_FlowerBuds,
Num_Flowers,
Num_Fruit_red,
Num_Fruit_green,
Num_Fruit
) %>%
apply(1, mysum)
# -------- variable indicating whether size/fruit measured during the survey
Plant.Surveys$SegmentsMeasured <- Plant.Surveys %>%
dplyr::select(
Plant_Segments_w_leaves,
Plant_Segments_wo_leaves,
Plant_Segments_woody,
Plant_Segments_total
) %>%
apply(1, mysum3)
Plant.Surveys$FruitMeasured <- Plant.Surveys %>%
dplyr::select(
Num_FlowerBuds,
Num_Flowers,
Num_Fruit_red,
Num_Fruit_green,
Num_Fruit
) %>%
apply(1, mysum3)
Plant.Surveys$SizeMeasured <- Plant.Surveys %>%
dplyr::select(
Height_t,
Width_t,
Perpen_Width
) %>%
apply(1, mysum3)
# --------------------------------------------------------------------------
Plant.Surveys$Date %<>%
strptime("%Y-%m-%d") %>%
as.POSIXct(format="%Y-%m-%d", tz="")
Plant.Surveys %<>%
addSamplingPeriods %>%
assignSeason %>%
createFecundityYear %>%
as.data.frame
Plant.Surveys$Date %<>% as.Date
# ----------------------------------------------------------------- WARNINGS
dups <- Plant.Surveys %>% filter(Size_t==0)
if (dim(dups)[1] > 0) {stop("Size values = 0.")}
# throw a warning if a plant has a recorded size but is also marked either dead or missing
temp <- Plant.Surveys %>%
filter(!(is.na(Size_t)) & Dead == 1)
if (dim(temp)[1] > 0) {
warning(paste(
"PlantMeasureID Records ",
paste(temp$PlantMeasureID, collapse=", "),
"have size measurements but are also marked dead"
))
}
temp <- Plant.Surveys %>%
filter(!(is.na(Size_t)) & Missing == 1)
if (dim(temp)[1] > 0) {
warning(paste(
"PlantMeasureID Records ",
paste(temp$PlantMeasureID, collapse=", "),
"have size measurements but are also marked missing"
))
}
# ------------------------------------------------------------------------ #
return(Plant.Surveys)
}
|
01fafeece02ef349625a5ffa9c0061611bf97bbe
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/xrnet/man/y_linear.Rd
|
e77fe5a17b7c2bd9b9c2dae4fa841108ddc90652
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 278
|
rd
|
y_linear.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{y_linear}
\alias{y_linear}
\title{Simulated outcome data}
\format{A vector with 100 elements}
\usage{
y_linear
}
\description{
Simulated outcome data
}
\keyword{datasets}
|
551d7a1d64a82894e83818f85b93b5333512ff1a
|
3cfae31923ff4af0c3f7cad2d79dcf30472f3408
|
/R/aaa.R
|
ec1c3e758f32a3a67ba7ae8570232db6dec8e08c
|
[] |
no_license
|
dipterix/restbatch
|
e8e213cf20faf1c97683aea8b4629d8be1607557
|
8bfe11619d4af1fc3217d1fd8f64d164eefc63ac
|
refs/heads/main
| 2023-08-13T17:20:24.079458
| 2021-10-10T05:26:25
| 2021-10-10T05:28:25
| 335,794,148
| 5
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,727
|
r
|
aaa.R
|
R_user_dir <- function (package, which = c("data", "config", "cache")) {
stopifnot(is.character(package), length(package) == 1L)
which <- match.arg(which)
home <- normalizePath("~")
path <- switch(which, data = {
if (nzchar(p <- Sys.getenv("R_USER_DATA_DIR"))) p
else if (nzchar(p <- Sys.getenv("XDG_DATA_HOME"))) p
else if (.Platform$OS.type == "windows") file.path(Sys.getenv("APPDATA"), "R", "data")
else if (Sys.info()["sysname"] == "Darwin") file.path(home, "Library", "Application Support", "org.R-project.R")
else file.path(home, ".local", "share")
}, config = {
if (nzchar(p <- Sys.getenv("R_USER_CONFIG_DIR"))) p
else if (nzchar(p <- Sys.getenv("XDG_CONFIG_HOME"))) p
else if (.Platform$OS.type == "windows") file.path(Sys.getenv("APPDATA"), "R", "config")
else if (Sys.info()["sysname"] == "Darwin") file.path(home, "Library", "Preferences", "org.R-project.R")
else file.path(home, ".config")
}, cache = {
if (nzchar(p <- Sys.getenv("R_USER_CACHE_DIR"))) p
else if (nzchar(p <- Sys.getenv("XDG_CACHE_HOME"))) p
else if (.Platform$OS.type == "windows") file.path(Sys.getenv("LOCALAPPDATA"), "R", "cache")
else if (Sys.info()["sysname"] == "Darwin") file.path(home, "Library", "Caches", "org.R-project.R")
else file.path(home, ".cache")
})
file.path(path, "R", package)
}
rand_string <- function(length = 50){
# Must use completely randomized rad string such that set.seed won't work
ret <- as.character(openssl::aes_keygen(length = ceiling(length / 2)))
ret <- paste(ret, collapse = "")
ret <- stringr::str_sub(ret, end = length)
ret
}
stopifnot2 <- function(..., msg = 'Condition not satisfied'){
if(!all(c(...))){
stop(msg)
}
}
dir_create2 <- function(x, showWarnings = FALSE, recursive = TRUE, check = TRUE, ...) {
if (!dir.exists(x)) {
dir.create(x, showWarnings = showWarnings, recursive = recursive, ...)
}
if (check && !dir.exists(x)) {
stop('Cannot create directory at ', shQuote(x))
}
invisible(normalizePath(x))
}
attached_packages <- function(include_base = FALSE){
info <- utils::sessionInfo()
bk <- rev(info$basePkgs)
pk <- vapply(info$otherPkgs, '[[', 'Package', 'Package', USE.NAMES = FALSE)
pk <- rev(pk)
if(include_base){
pk <- c(bk, pk)
}
pk
}
get_os <- function(){
if("windows" %in% .Platform$OS.type){
return("windows")
}
os <- stringr::str_to_lower(R.version$os)
if(stringr::str_detect(os, '^darwin')){
return('darwin')
}
if(stringr::str_detect(os, '^linux')){
return('linux')
}
if(stringr::str_detect(os, '^solaris')){
return('solaris')
}
if(stringr::str_detect(os, '^win')){
return('windows')
}
return('unknown')
}
|
fd54cbf84a6e9679786d4189f78357b05ac3b79c
|
4bdd257c859dc4406d0fe77a22c130e1c8112413
|
/Log_Regression.R
|
c5402aa89c210b24c5c6f6a5aea8746945dc900a
|
[] |
no_license
|
anjelica-weber/Data_Mining
|
5c84657ec7eb012c55b83be4014ea770b76cc2e1
|
d8952586c93b98559a89ac8c6829233c4e1e0ca5
|
refs/heads/master
| 2023-07-08T21:17:13.056830
| 2021-08-25T16:43:37
| 2021-08-25T16:43:37
| 394,665,480
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,840
|
r
|
Log_Regression.R
|
# Load Libraries ----------------------------------------------------------
library(caTools)
library(tidyverse)
library(xlsx)
# Load Data ---------------------------------------------------------------
dir_ex <- paste0("J:/deans/Presidents/SixSigma/Individual Folders",
"/Current Employees/Engineers/Anjelica Weber/Projects",
"/Data Mining Training Materials")
df_heart <- read.xlsx2(file = paste0(dir_ex, "/Datasets.xlsx"),
sheetName = "HeartFailure")
# Pre-Processing ----------------------------------------------------------
#formatting data types
df_heart <- df_heart %>%
mutate(age = as.numeric(age),
anaemia = as.factor(anaemia),
creatinine_phosphokinase = as.numeric(creatinine_phosphokinase),
diabetes = as.factor(diabetes),
ejection_fraction = as.numeric(ejection_fraction),
high_blood_pressure = as.factor(high_blood_pressure),
platelets = as.numeric(platelets),
serum_creatinine = as.numeric(serum_creatinine),
serum_sodium = as.numeric(serum_sodium),
sex = as.factor(sex),
smoking = as.factor(smoking),
time = as.numeric(time),
DEATH_EVENT = as.factor(DEATH_EVENT))
# Train / Test Data Split -------------------------------------------------
#data points for training and testing set selected using a random number generator (RNG)
#setting seed number for the RNG, this can be any arbitrary number
#set.seed(101)
#Split ratio is 70% for training 30% testing
#function creates boolean vector T for 70% of data F for 30%
#pass in any column from data set
#sample <- sample.split(df_heart$age, SplitRatio = 0.7)
#train <- subset(df_heart, sample == T)
#test <- subset(df_heart, sample == F)
# Training Model ----------------------------------------------------------
#equation y ~ x1 + x2 + etc, or y ~. for all variables
log_model <- glm(DEATH_EVENT ~.,
data = df_heart,
family = binomial(link = "logit"))
#model summary
summary(log_model)
# Predictions -------------------------------------------------------------
#head(predict(log_model, df_heart))
#the probability a data point will be in class 0 or class 1
#results range from 0 (0%) to 1 (100%)
result_prob <- predict(log_model,
newdata = df_heart[,1:12],
type = "response")
#results coerced into classes
#general rule of thumb the threshold is < 50% is class 0 and > 50% is class 1
#Use ROC analysis results for optimal threshold for model / data set
result_fitted <- ifelse(result_prob > 0.5, 1, 0)
# Evaluating Accuracy -----------------------------------------------------
misClassError <- mean(result_fitted != df_heart$DEATH_EVENT)
print(paste("Prediction accuracy is", round((1 - misClassError)*100, 2), "%"))
|
3c45f44c9f71128b1f3d33475e2d074f515ded3a
|
1a4b455f385cf9ba8ed6bc916497fe190deb5ed6
|
/R-prediction-code/workflowsNew.R
|
b514098259887152525a66868f89d657a8dd2796
|
[] |
no_license
|
Blodgic/R-prediction-code
|
72595e9315ce7ee158dede0f8df5395d7196e9ce
|
037509d7303b82c40e593957c1a41d3eaf91eab5
|
refs/heads/master
| 2021-01-11T00:46:32.390135
| 2016-10-10T14:22:41
| 2016-10-10T14:22:41
| 70,493,646
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,172
|
r
|
workflowsNew.R
|
## ===============================================
## The Box Plot Rule workflow
## ===============================================
BPrule.wf <- function(form,train,test,...) {
ms <- as.matrix(filter(train,Insp != 'fraud') %>%
group_by(Prod) %>%
summarise(median=median(Uprice),iqr=IQR(Uprice)) %>%
select(median,iqr))
rownames(ms) <- levels(train$Prod)
ms[which(ms[,'iqr']==0),'iqr'] <- ms[which(ms[,'iqr']==0),'median']
ORscore <- abs(test$Uprice-ms[test$Prod,'median']) /
ms[test$Prod,'iqr']
rankOrder <- order(ORscore,decreasing=T)
res <- WFoutput(evalOutlierRanking(test,rankOrder,...))
workflowInformation(res) <-
list(probs=matrix(c(ORscore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
## ===============================================
## The LOF workflow
## ===============================================
LOF.wf <- function(form, train, test, k, ...) {
ntr <- nrow(train)
all <- rbind(train,test)
N <- nrow(all)
ups <- split(all$Uprice,all$Prod)
r <- list(length=ups)
for(u in seq(along=ups))
r[[u]] <- if (NROW(ups[[u]]) > 3)
lofactor(ups[[u]],min(k,NROW(ups[[u]]) %/% 2))
else if (NROW(ups[[u]])) rep(0,NROW(ups[[u]]))
else NULL
all$lof <- vector(length=N)
split(all$lof,all$Prod) <- r
all$lof[which(!(is.infinite(all$lof) | is.nan(all$lof)))] <-
SoftMax(all$lof[which(!(is.infinite(all$lof) | is.nan(all$lof)))])
res <- WFoutput(evalOutlierRanking(test,
order(all[(ntr+1):N,'lof'],decreasing=T),...))
workflowInformation(res) <-
list(probs=matrix(c(all[(ntr+1):N,'lof'],
ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
## ===============================================
## The NB with Smote workflow
## ===============================================
NBsm.wf <- function(form,train,test,...) {
require(e1071,quietly=TRUE)
require(DMwR,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- train[sup,c('ID','Prod','Uprice','Insp')]
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
newData <- SMOTE(Insp ~ .,data,perc.over=700)
model <- naiveBayes(Insp ~ .,newData)
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],type='raw')
rankOrder <- order(preds[,'fraud'],decreasing=T)
rankScore <- preds[,'fraud']
res <- WFoutput(evalOutlierRanking(test,rankOrder,...))
workflowInformation(res) <-
list(probs=matrix(c(rankScore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
## ===============================================
## The AdaBoosting workflow
## ===============================================
ab.wf.x <- function(form,train,test,ntrees=100,...) {
require(adabag,quietly=TRUE)
require(rpart,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- train[sup,c('ID','Prod','Uprice','Insp')]
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
model <- boosting(Insp ~ .,data,mfinal=ntrees)
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')])
rankOrder <- order(preds$prob[,2],decreasing=TRUE)
rankScore <- preds$prob[,2]
res <- WFoutput(evalOutlierRanking(test,rankOrder,...))
workflowInformation(res) <-
list(probs=matrix(c(rankScore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
# Weka version
ab.wf <- function(form,train,test,ntrees=100,...) {
require(RWeka,quietly=TRUE)
sup <- which(train$Insp != 'unkn')
data <- train[sup,c('ID','Prod','Uprice','Insp')]
data$Insp <- factor(data$Insp,levels=c('ok','fraud'))
model <- AdaBoostM1(Insp ~ .,data,
control=Weka_control(I=ntrees))
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],
type='probability')
rankOrder <- order(preds[,"fraud"],decreasing=TRUE)
rankScore <- preds[,"fraud"]
res <- WFoutput(evalOutlierRanking(test,rankOrder,...))
workflowInformation(res) <-
list(probs=matrix(c(rankScore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
ab.st.wf <- function(train,test,ntrees=100,...) {
require(RWeka,quietly=TRUE)
require(DMwR,quietly=TRUE)
train <- train[,c('ID','Prod','Uprice','Insp')]
train[which(train$Insp == 'unkn'),'Insp'] <- NA
train$Insp <- factor(train$Insp,levels=c('ok','fraud'))
pred.ada <- function(m,d) {
p <- predict(m,d,type='probability')
data.frame(cl=colnames(p)[apply(p,1,which.max)],
p=apply(p,1,max)
)
}
model <- SelfTrain(Insp ~ .,train,
learner('AdaBoostM1',
list(control=Weka_control(I=ntrees))),
'pred.ada')
preds <- predict(model,test[,c('ID','Prod','Uprice','Insp')],
type='probability')
rankOrder <- order(preds[,'fraud'],decreasing=T)
rankScore <- preds[,"fraud"]
res <- WFoutput(evalOutlierRanking(test,rankOrder,...))
workflowInformation(res) <-
list(probs=matrix(c(rankScore,ifelse(test$Insp=='fraud',1,0)),ncol=2))
res
}
|
e8ae5b4e68d1ddb59e3ccd30b948a99bd9c0e70b
|
01d2cc085dd9e6ecf5c23592a9f8b79d149f2a44
|
/MoreIsBetter Bits/MoreSetup.r
|
5aef7b892916bf9625cb7b14c53db0a5fa329fbb
|
[] |
no_license
|
fruitsamples/OTStreamLogViewer
|
02596342d830b17082fac9b89770a71009761c9e
|
b556c584f72ceecfe7fa9474b830cff347637142
|
refs/heads/master
| 2021-01-10T11:07:14.037047
| 2015-11-25T21:40:44
| 2015-11-25T21:40:44
| 46,888,752
| 0
| 0
| null | null | null | null |
WINDOWS-1252
|
R
| false
| false
| 2,553
|
r
|
MoreSetup.r
|
/*
File: MoreSetup.r
Contains: Sets up conditions etc for MoreIsBetter.
Written by: Quinn
Copyright: Copyright ยฉ 1998 by Apple Computer, Inc., All Rights Reserved.
You may incorporate this Apple sample source code into your program(s) without
restriction. This Apple sample source code has been provided "AS IS" and the
responsibility for its operation is yours. You are not permitted to redistribute
this Apple sample source code as "Apple sample source code" after having made
changes. If you're going to re-distribute the source, we require that you make
it clear in the source that the code was descended from Apple sample source
code, but that you've made changes.
Change History (most recent first):
<2> 11/4/00 Quinn Tweaks to TARGET_API_MAC_CARBON handling for non-Carbon builds.
<1> 20/3/00 Quinn First checked in.
*/
//
// We never want to use old names or locations.
// Since these settings must be consistent all the way through
// a compilation unit, and since we don't want to silently
// change them out from under a developer who uses a prefix
// file (C/C++ panel of Target Settings), we simply complain
// if they are already set in a way we don't like.
//
#ifndef OLDROUTINELOCATIONS
# define OLDROUTINELOCATIONS 0
#elif OLDROUTINELOCATIONS
# error OLDROUTINELOCATIONS must be FALSE when compiling MoreIsBetter.
#endif
#ifndef OLDROUTINENAMES
# define OLDROUTINENAMES 0
#elif OLDROUTINENAMES
# error OLDROUTINENAMES must be FALSE when compiling MoreIsBetter.
#endif
// "ConditionalMacros.r" seems to have troubles if you define
// TARGET_API_MAC_CARBON to true without defining the alternative
// (TARGET_API_MAC_OS8) to false. Here we fix that up.
#ifdef TARGET_API_MAC_CARBON
#if TARGET_API_MAC_CARBON
#define TARGET_API_MAC_OS8 0
#endif
#endif
//
// Bring in "ConditionalMacros.r" in order to set up
// UNIVERSAL_INTERFACES_VERSION.
//
#include <ConditionalMacros.r>
// Now that we've included a Mac OS interface file,
// we know that the Universal Interfaces environment
// is set up. MoreIsBetter requires Universal Interfaces
// 3.2 or higher. Check for it.
#if !defined(UNIVERSAL_INTERFACES_VERSION) || UNIVERSAL_INTERFACES_VERSION < 0x0320
#error MoreIsBetter requires Universal Interfaces 3.2 or higher.
#endif
//
// We usually want assertions and other debugging code
// turned on, but you can turn it all off if you like
// by setting MORE_DEBUG to 0.
//
#ifndef MORE_DEBUG
# define MORE_DEBUG 1
#endif
|
631bfdb93be957b6fa941898f9b0879bede287fc
|
f60fad0df80e47cb1d64e864653d8c8a30943384
|
/Shinyapp/WordPredictor/predictor.R
|
9da7af82faff8ac05b242ef1571c6fb49791296c
|
[] |
no_license
|
kobe04/DataScienceCapstone
|
5473c87ce71f627985d59b261f424c0462991d03
|
e7a972cf11c13eb223a412e47d2256d08a476791
|
refs/heads/master
| 2020-05-03T20:44:42.449971
| 2019-05-01T10:06:16
| 2019-05-01T10:06:16
| 178,809,692
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,527
|
r
|
predictor.R
|
## Function to predict the next word
# library(data.table); library(stringr)
predictWord <- function(dataTable, inputWords, top10){
inputWords <- gsub("[[:digit:]]+", "", inputWords)
inputWords <- gsub("[[:punct:][:blank:]]+", " ", inputWords)
inputWords <- str_split(str_trim(str_to_lower(inputWords)), " ") [[1]]
wordCount <- length(inputWords)
if (wordCount >= 4){
entered <- paste(inputWords[wordCount-3], inputWords[wordCount-2], inputWords[wordCount-1],
inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
entered <- paste(inputWords[wordCount-2], inputWords[wordCount-1], inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
entered <- paste(inputWords[wordCount-1], inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
entered <- paste(inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
return(top10[round(runif(1,min = 1, max = 10))])
}
else if (wordCount == 3){
entered <- paste(inputWords[wordCount-2], inputWords[wordCount-1], inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
entered <- paste(inputWords[wordCount-1], inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
entered <- paste(inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
return(top10[round(runif(1,min = 1, max = 10))])
}
else if (wordCount == 2){
entered <- paste(inputWords[wordCount-1], inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
entered <- paste(inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
return(top10[round(runif(1,min = 1, max = 10))])
}
else{
entered <- paste(inputWords[wordCount])
prediction <- dataTable[source == entered,]
if (nrow(prediction) == 3){
return(c(prediction[1,predict], prediction[2,predict], prediction[3,predict]))
}
else if (nrow(prediction) == 2){
return(c(prediction[1,predict], prediction[2,predict]))
}
else if (nrow(prediction) >= 1){
return(prediction[1,predict])
}
return(top10[round(runif(1,min = 1, max = 10))])
}
return(top10[round(runif(1,min = 1, max = 10))])
}
|
93eb6bf5103a07c1f904719078f57f9bfe44fe02
|
be7f37f0a8680fac63aab8bd02f668a40da79dc8
|
/GenomicDataScienceSpecialization/06_Bioconductor_for_Genomic_Data_Science/week4.R
|
826b6334f8f8d6703b781bfdfe4aefff71454f18
|
[] |
no_license
|
dongyuanwu/SelfImprovement
|
269f1af04394f8f3463f34bd071dd7860e2bd466
|
1349012e0775ac8ebe95356a181196772440c82b
|
refs/heads/master
| 2022-12-22T09:17:55.771734
| 2020-10-04T21:26:45
| 2020-10-04T21:26:45
| 272,483,670
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,574
|
r
|
week4.R
|
# Q1
library(yeastRNASeq)
library(ShortRead)
fastqFilePath <- system.file("reads", "wt_1_f.fastq.gz", package = "yeastRNASeq")
reads <- readFastq(fastqFilePath)
reads_set <- sread(reads)
sum(DNAStringSet(reads_set, 5, 5) == "A") / length(reads_set)
# Q2
qm <- as(quality(reads), "matrix")
mean(qm[, 5:5])
# Q3
library(leeBamViews)
bamFilePath <- system.file("bam", "isowt5_13e.bam", package="leeBamViews")
bamFile <- BamFile(bamFilePath)
gr <- GRanges(seqnames = "Scchr13", ranges = IRanges(start = 800000, end = 801000))
params <- ScanBamParam(which = gr, what = scanBamWhat())
aln <- scanBam(bamFile, param = params)
aln <- aln[[1]]
duplicatedValues = unique(aln$pos[duplicated(aln$pos)])
sum(aln$pos %in% duplicatedValues)
# Q4
library(GenomicRanges)
bpaths <- list.files(system.file("bam", package = "leeBamViews"), pattern = "bam$",
full=TRUE)
gr <- GRanges(seqnames = "Scchr13", ranges = IRanges(start = 807762, end = 808068))
bamView <- BamViews(bpaths)
bamRanges(bamView) <- gr
aln <- scanBam(bamView)
lens <- sapply(aln, function(x) length(x[[1]]$seq))
mean(unlist(lens))
# Q5
library(oligo)
library(GEOquery)
geoMat <- getGEO("GSE38792")
pD.all <- pData(geoMat[[1]])
getGEOSuppFiles("GSE38792")
untar("GSE38792/GSE38792_RAW.tar", exdir = "GSE38792/CEL")
celfiles <- list.files("GSE38792/CEL", full = TRUE)
rawData <- read.celfiles(celfiles)
filename <- sampleNames(rawData)
pData(rawData)$filename <- filename
sampleNames <- gsub(".*_", "", filename)
sampleNames <- gsub(".CEL.gz$", "", sampleNames)
sampleNames(rawData) <- sampleNames
pData(rawData)$group <- ifelse(grepl("^OSA", sampleNames(rawData)), "OSA", "Control")
normData <- rma(rawData)
expr <- exprs(normData)
mean(expr["8149273", 1:8])
# Q6
library(limma)
design <- model.matrix(~ normData$group)
fit <- lmFit(normData, design)
fit <- eBayes(fit)
abs(topTable(fit, n = 1)$logFC)
# Q7
topTable(fit, p.value = 0.05)
# Q8 (wrong answer)
library(minfiData)
data(RGsetEx)
p <- preprocessFunnorm(RGsetEx)
b <- getBeta(p)
b_os <- b[getIslandStatus(p) == "OpenSea", ]
mean(b_os[, c(1, 2, 5)]) - mean(b_os[, c(3, 4, 6)])
# Q9
library(AnnotationHub)
ahub <- AnnotationHub()
qhs <- subset(ahub, species=="Homo sapiens")
genes <- query(qhs, c("Caco2", "Awg", "DNase"))
genes <- genes[["AH22442"]]
sum(countOverlaps(genes, p))
#67365(x)/90561(x)/76722/29265/40151
source("https://bioconductor.org/biocLite.R")
biocLite("IlluminaHumanMethylation450kanno.ilmn12.hg19")
library(IlluminaHumanMethylation450kanno.ilmn12.hg19)
CpG<-IlluminaHumanMethylation450kanno.ilmn12.hg19
class(CpG)
|
b4ac3c378cd52a1ca09392ef5ba40a17de8a6aed
|
818dd3954e873a4dcb8251d8f5f896591942ead7
|
/Mouse/Muga/ATB_paper/permute_gn_QTL2.R
|
532d32c5a2ad703afdfb4dfab1883a702d303d80
|
[] |
no_license
|
DannyArends/HU-Berlin
|
92cefa16dcaa1fe16e58620b92e41805ebef11b5
|
16394f34583e3ef13a460d339c9543cd0e7223b1
|
refs/heads/master
| 2023-04-28T07:19:38.039132
| 2023-04-27T15:29:29
| 2023-04-27T15:29:29
| 20,514,898
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,850
|
r
|
permute_gn_QTL2.R
|
# Analysis of the PAT and MAT TRD regions in BFMI mice using BxD QTL data
#
# copyright (c) 2014-2020 - Brockmann group - HU Berlin, Danny Arends
# last modified Jan, 2017
# first written ??
setwd("D:/Edrive/Mouse/DNA/annotation/")
chrinfo <- read.table("chrLengths.txt",sep="\t", header=F, colClasses=c("character", "numeric"))
chrinfo <- chrinfo[-21,] # Ignore the Y chromosome
setwd("D:/Edrive/Mouse/DNA/MegaMuga/")
patRegions <- read.table("Analysis/ATB_PAT.txt",sep="\t",header=TRUE, check.names=FALSE, colClasses="character")
matRegions <- read.table("Analysis/ATB_MAT.txt",sep="\t",header=TRUE, check.names=FALSE, colClasses="character")
allRegions <- rbind(patRegions, matRegions)
setwd("D:/Edrive/Mouse/DNA/MegaMuga/GeneNetwork")
searchres <- read.csv("BXDpheno.txt", sep = "\t", header=FALSE, colClasses="character")
chr <- unlist(lapply(strsplit(searchres[,7], ": "),"[", 1))
loc <- as.numeric(as.character(unlist(lapply(strsplit(searchres[,7], ": "),"[", 2)))) * 1000000
chr <- gsub("Chr", "", chr)
searchres <- cbind(searchres, chr=chr, loc=loc)
searchres <- searchres[-which(searchres[,"chr"] == "N/A"),]
searchres[1:5,]
significant.lrs <- which(as.numeric(searchres[, 6]) > 16) # LRS scores not LOD :(
searchres <- searchres[significant.lrs,]
testQTLs <- function(allRegions, keywords = c("Obesity", "Weight")){
nQTL <- 0
nKeyw <- 0
for(x in 1:nrow(allRegions)){
idx <- which(searchres[, "chr"] == allRegions[x,"Chr"] & as.numeric(searchres[, "loc"]) > (as.numeric(allRegions[x,"Start"])- 500000) & searchres[, "loc"] < (as.numeric(allRegions[x,"Stop"])+ 500000))
hasKW <- FALSE
hasQTL <- FALSE
if(length(idx) > 0){
hasQTL <- TRUE
for(word in keywords){
grepidx <- which(grepl(word, searchres[idx,2], ignore.case=TRUE))
if(length(grepidx) > 0) {
cat(x, " - ", word, " - ", idx[grepidx],"\n")
for(ii in idx[grepidx]){
cat(unlist(allRegions[x,]), unlist(searchres[ii,]),"\n",file="regionsQTLMat.txt",append=TRUE,sep="\t")
}
hasKW <- TRUE
}
}
}
if(hasQTL) nQTL <- nQTL + 1
if(hasKW) nKeyw <- nKeyw + 1
}
cat("Regions with a QTL:", nQTL / nrow(allRegions) * 100, "\n")
cat("Keywords per QTL:", nKeyw / nQTL * 100, "\n")
cat("Keywords per region:", nKeyw / nrow(allRegions) * 100, "\n")
return(list(nQTL, nKeyw))
}
randomRegions <- function(allRegions, chrinfo){
newRegions <- allRegions
for(x in 1:nrow(allRegions)){
newChr <- sample(chrinfo[,1])[1]
chrLength <- as.numeric(chrinfo[chrinfo[,1] == newChr,2])
rL <- as.numeric(as.character(allRegions[x,"Stop"])) - as.numeric(as.character(allRegions[x,"Start"]))
newStart <- round(runif(1, 0, chrLength - rL))
newStop <- newStart + rL
newRegions[x, "Chr"] <- newChr
newRegions[x, "Start"] <- newStart
newRegions[x, "Stop"] <- newStop
}
return(newRegions)
}
realpat <- testQTLs(patRegions)
realmat <- testQTLs(matRegions)
realCombi <- testQTLs(allRegions)
nperms <- 10000
pPat <- vector("list", nperms)
pMat <- vector("list", nperms)
pCombi <- vector("list", nperms)
x <- 1
for(x in 1:nperms){
newRegions <- randomRegions(patRegions,chrinfo)
pPat[[x]] <- testQTLs(newRegions)
newRegions <- randomRegions(matRegions,chrinfo)
pMat[[x]] <- testQTLs(newRegions)
newRegions <- randomRegions(allRegions,chrinfo)
pCombi[[x]] <- testQTLs(newRegions)
}
# Mean and sd for n QTLs in 85 regions
cat(mean(unlist(lapply(pPat,"[",1))/nrow(patRegions)), sd(unlist(lapply(pPat,"[",1))/nrow(patRegions)), "\n")
cat(mean(unlist(lapply(pMat,"[",1))/nrow(matRegions)), sd(unlist(lapply(pMat,"[",1))/nrow(matRegions)), "\n")
cat(mean(unlist(lapply(pCombi,"[",1))/nrow(allRegions)), sd(unlist(lapply(pCombi,"[",1))/nrow(allRegions)), "\n")
pvalQTLsPAT <- (length(which(realpat[[1]][1] < sort(unlist(lapply(pPat,"[",1))))) + 1) / nperms
pvalQTLsMAT <- (length(which(realmat[[1]][1] < sort(unlist(lapply(pMat,"[",1))))) + 1) / nperms
pvalQTLsCombi <- (length(which(realCombi[[1]][1] < sort(unlist(lapply(pCombi,"[",1))))) + 1) / nperms
cat(pvalQTLsPAT, pvalQTLsMAT, pvalQTLsCombi, "\n")
# Mean and sd for n Keywords in 85 regions
cat(mean(unlist(lapply(pPat,"[",2))/nrow(patRegions)), sd(unlist(lapply(pPat,"[",2))/nrow(patRegions)), "\n")
cat(mean(unlist(lapply(pMat,"[",2))/nrow(matRegions)), sd(unlist(lapply(pMat,"[",2))/nrow(matRegions)), "\n")
cat(mean(unlist(lapply(pCombi,"[",2))/nrow(allRegions)), sd(unlist(lapply(pCombi,"[",2))/nrow(allRegions)), "\n")
pvalKWsPAT <- (length(which(realpat[[2]][1] < sort(unlist(lapply(pPat,"[",2))))) + 1) / nperms
pvalKWsMAT <- (length(which(realmat[[2]][1] < sort(unlist(lapply(pMat,"[",2))))) + 1) / nperms
pvalKWsCombi <- (length(which(realCombi[[2]][1] < sort(unlist(lapply(pCombi,"[",2))))) + 1) / nperms
cat(pvalKWsPAT, pvalKWsMAT, pvalKWsCombi, "\n")
|
03fa59b0a3f97ef69999f994fc03a0bff40f82c3
|
2b9876217c38d0ae95258ca6d6ce77b6410efbbb
|
/Rscripts/collect_data/0020_gumtree_scraping_pokoje.R
|
bf707c79749f83fb6258efafa681def3ba722523
|
[] |
no_license
|
mi2-warsaw/CzasDojazdu
|
ab7978e7942eaff0d3543687d06febf40165a6f4
|
3e341c6c03835665d3f872500707767debccb276
|
refs/heads/master
| 2020-04-11T10:41:59.579575
| 2016-12-28T12:41:07
| 2016-12-28T12:41:07
| 51,079,859
| 4
| 5
| null | 2016-12-04T23:46:52
| 2016-02-04T14:08:43
|
JavaScript
|
UTF-8
|
R
| false
| false
| 9,385
|
r
|
0020_gumtree_scraping_pokoje.R
|
source('Rscripts/inne/ulice/adres_z_opisu.R')
slownik <-
fread(
'dicts/warszawskie_ulice.txt', encoding = "UTF-8", data.table = FALSE
) %>%
unlist() %>%
unname()
aktualne_oferty <-
function(link) {
linki <-
read_html(link) %>%
html_nodes('.href-link') %>%
html_attr('href') %>%
paste0('http://www.gumtree.pl', .)
# usuniฤcie trzech pierwszych ofert, ktรณre sฤ
sponsorowane i z kaลผdym
# odลwieลผeniem ulegajฤ
zmianie
linki <- linki[-(1:3)]
return(linki)
}
scrapuj <-
function (x, slownik, miasto = "Warszawa") {
tryCatch({
read_html(x, encoding = "UTF-8") -> web
# cena
web %>%
html_nodes('.clearfix .amount') %>%
html_text() %>%
str_replace_all("[^0-9]","") -> cena
if(length(cena) == 0) cena <- ""
# # telefon
# web %>%
# html_nodes('.telephone') %>%
# html_text() %>%
# stri_extract_all_words() %>%
# unlist() -> telefon
# if(length(telefon)==0) telefon<-""
# opis
web %>%
html_nodes('.vip-details .description') %>%
html_text() %>%
stri_extract_all_words() %>%
unlist() %>%
paste(collapse = " ") -> opis
if(length(opis) == 0) opis <- ""
# usuniฤcie apostrofu
opis <- gsub("'",'',opis)
tryCatch(
{repair_encoding(opis) -> opis},
error = function(e) {return(opis)}
)
# adres
web %>%
html_nodes('.address') %>%
html_text() -> adres
if(length(adres) == 0) {
ulice(opis) -> poprawny_adres
num_adres <- grepl("1|2|3|4|5|6|7|8|9|0", poprawny_adres)
if (sum(num_adres) > 0) {
poprawny_adres[num_adres] -> poprawny_adres
}
# z odmiany adresu zrobienie poprawnej nazwy ulicy
if (length(poprawny_adres) > 0) {
if (length(strsplit(poprawny_adres, " ")[[1]]) > 1) {
numer_bloku <- tail(strsplit(poprawny_adres, " ")[[1]],1)
grep(
"1|2|3|4|5|6|7|8|9|0", numer_bloku, value = TRUE
) -> numer_bloku
} else {
numer_bloku <- ""
}
poprawny_adres %>%
stringdist(slownik) -> odleglosci
which.min(odleglosci) -> index_adresu
paste0(slownik[index_adresu], " ", numer_bloku) -> adres
} else {
adres <- ""
}
}
# linki do zdjec
web %>%
html_nodes('.main img') %>%
html_attr('src') -> link_do_zdj
if(length(link_do_zdj) == 0) link_do_zdj <- ""
# wspolrzedne
geocode(paste(adres, miasto)) -> wspolrzedne
# atrybuty oferty
c(".name", ".attribute .value") %>%
lapply(
function(css) {
css %>%
html_nodes(web, .) %>%
html_text()
}
) %>%
setNames(c("keys", "values")) %>%
as_data_frame() %>%
spread(keys, values) %>%
setNames(
names(.) %>%
tolower() %>%
gsub("[[:punct:]]", "", .) %>%
gsub("[[:space:]]", "_", .)
) %>%
lapply(
function(col) {
col %>%
trimws() %>%
gsub(",[[:space:]]*", ", ", .)
}
) %>%
as_data_frame() %>%
mutate(
data_dodania = as.Date(data_dodania, format = "%d/%m/%Y") %>%
as.character()
) -> atrybuty
if ("dostฤpny" %in% names(atrybuty)) {
atrybuty <-
atrybuty %>%
mutate(
dostฤpny = as.Date(dostฤpny, format = "%d/%m/%Y") %>%
as.character()
)
}
content <-
paste(
sep = "<br/>",
paste0('<b><a href="', x, '">', adres, "</a></b>"),
paste0("Cena: ", cena),
paste0("Wielkoลฤ: ", atrybuty$wielkoลฤ_m2)
)
lista <-
list(
link = x,
cena = cena,
# telefon = telefon,
opis = opis,
adres = adres,
link_do_zdj = link_do_zdj,
lon = wspolrzedne$lon,
lat = wspolrzedne$lat,
data_dodania = atrybuty$data_dodania,
dostepny = atrybuty$dostฤpny,
do_wynajecia_przez = atrybuty$do_wynajฤcia_przez,
liczba_pokoi = atrybuty$liczba_pokoi,
dzielnica = atrybuty$lokalizacja,
palacy = atrybuty$palฤ
cy,
preferowana_plec = atrybuty$preferowana_pลeฤ,
przyjazne_zwierzakom = atrybuty$przyjazne_zwierzakom,
rodzaj_nieruchomosci = atrybuty$rodzaj_nieruchomoลci,
wielkosc = atrybuty$wielkoลฤ_m2,
wspoldzielenie = atrybuty$wspรณลdzielenie,
content = content
) %>%
lapply(
function(x) {
ifelse(is.null(x), "", x)
}
)
return(lista)
}, error = function(e) {
return(
list(
link = x,
cena = NA,
# telefon = NA,
opis = NA,
adres = NA,
link_do_zdj = NA,
lon = NA,
lat = NA,
data_dodania = NA,
dostepny = NA,
do_wynajecia_przez = NA,
liczba_pokoi = NA,
dzielnica = NA,
palacy = NA,
preferowana_plec = NA,
przyjazne_zwierzakom = NA,
rodzaj_nieruchomosci = NA,
wielkosc = NA,
wspoldzielenie = NA,
content = NA
)
)
})
}
tworz_gumtree_pokoje <-
function(polaczenie) {
gumtree_warszawa_pokoje <-
data.frame(
link = "",
cena = "",
# telefon = "",
opis = "",
adres = "",
link_do_zdj = "",
lon = "",
lat = "",
data_dodania = "",
dostepny = "",
do_wynajecia_przez = "",
liczba_pokoi = "",
dzielnica = "",
palacy = "",
preferowana_plec = "",
przyjazne_zwierzakom = "",
rodzaj_nieruchomosci = "",
wielkosc = "",
wspoldzielenie = "",
content = "",
stringsAsFactors = FALSE
)
# polaczenie <- dbConnect(dbDriver("SQLite"), "dane/czas_dojazdu.db")
dbWriteTable(
polaczenie, "gumtree_warszawa_pokoje_02", gumtree_warszawa_pokoje,
overwrite = TRUE, row.names = FALSE
)
}
# if (!file.exists("dane/czas_dojazdu.db")){
# tworz_gumtree_pokoje()
# }
polaczenie <- dbConnect(dbDriver("SQLite"), "dane/czas_dojazdu.db")
if (!("gumtree_warszawa_pokoje_02" %in% dbListTables(polaczenie))) {
tworz_gumtree_pokoje(polaczenie)
}
# Zmienne startowe --------------------------------------------------------
liczba_stron <- 5
# Scrapowanie -------------------------------------------------------------
linki <- paste0(
'http://www.gumtree.pl/s-pokoje-do-wynajecia/warszawa/v1c9000l3200008p',
1:liczba_stron
)
adresy <- linki %>% pbsapply(aktualne_oferty) %>% c()
adresydb <-
dbGetQuery(polaczenie, "SELECT link FROM gumtree_warszawa_pokoje_02") %>%
.$link
adresy <- adresy[!(adresy %in% adresydb)]
if (length(adresy) > 0) {
dane <- adresy %>% pblapply(scrapuj, slownik = slownik)
search_keys <-
c("szukam", "poszukuj[eฤ]") %>%
paste0(collapse = "|")
person_keys <-
c(
"osoby", "dziewczyny", "kobiety", "wsp[oรณ][lล]lokator[ka]", "faceta",
"ch[lล]opaka", "m[eฤ][zลผ]czyzny"
) %>%
paste0(collapse = "|")
fake <- c()
for (i in seq_along(dane)) {
opis <- dane[[i]]$opis
ifelse(
grep(
paste0("(", search_keys, ")((?!", person_keys, ").)*pok[oรณ]j"),
opis,
ignore.case = TRUE,
perl = TRUE
),
fake[length(fake)+1] <- i,
next
)
}
dane[fake] <- NULL
# for( i in 1:length(adresy)){
# scrapuj(x = adresy[i], slownik = slownik)
# }
# Wgrywanie danych do DB --------------------------------------------------
zap <- c()
for (i in seq_along(dane)) {
zap[i] <-
paste(
"('",
dane[[i]]$link, "','",
dane[[i]]$cena, "','",
# dane[[i]]$telefon, "','",
dane[[i]]$opis, "','",
dane[[i]]$adres, "','",
dane[[i]]$link_do_zdj, "','",
dane[[i]]$lon, "','",
dane[[i]]$lat, "','",
dane[[i]]$data_dodania, "','",
dane[[i]]$dostepny, "','",
dane[[i]]$do_wynajecia_przez, "','",
dane[[i]]$liczba_pokoi, "','",
dane[[i]]$dzielnica, "','",
dane[[i]]$palacy, "','",
dane[[i]]$preferowana_plec, "','",
dane[[i]]$przyjazne_zwierzakom, "','",
dane[[i]]$rodzaj_nieruchomosci, "','",
dane[[i]]$wielkosc, "','",
dane[[i]]$wspoldzielenie, "','",
dane[[i]]$content,
"')",
collapse = "",
sep = ""
)
}
insert <-
paste0(
"INSERT INTO gumtree_warszawa_pokoje_02 (",
dane[[1]] %>% names() %>% paste0(collapse = ","),
") VALUES ",
paste(zap, collapse = ",")
)
dbGetQuery(polaczenie, insert)
# mala obczajka jakie sa potencjalne adresy
# dbGetQuery(polaczenie, "SELECT * FROM gumtree_warszawa_pokoje_02") -> adresy_w_bazce
# repair_encoding(adresy_w_bazce$adres, from = "UTF-8")
}
dbDisconnect(polaczenie)
|
c5d940e053402de8e93e86219f7d369eeae26dc3
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/CITAN/R/scopus.readcsv.R
|
804e4d24dc47ed06ef4bd579977994a1f8ab4912
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,893
|
r
|
scopus.readcsv.R
|
## This file is part of the CITAN package for R
##
## Copyright 2011-2015 Marek Gagolewski
##
##
## CITAN is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## CITAN is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with CITAN. If not, see <http://www.gnu.org/licenses/>.
#' Reads bibliography entries from a UTF-8 encoded CSV file.
#'
#'
#'
#' The \code{\link{read.csv}} function is used to read the bibliography.
#' You may therefore freely modify its behavior
#' by passing further arguments (\code{...}), see the manual page
#' of \code{\link{read.table}} for details.
#'
#' The CSV file should consist at least of the following columns.
#' \enumerate{
#' \item \code{Authors}: Author name(s) (surname first; multiple names are comma-separated,
#' e.g. \dQuote{Smith John, Nowak G. W.}),
#' \item \code{Title}: Document title,
#' \item \code{Year}: Year of publication,
#' \item \code{Source.title}: Source title, e.g. journal name,
#' \item \code{Volume}: Volume number,
#' \item \code{Issue}: Issue number,
#' \item \code{Page.start}: Start page number,
#' \item \code{Page.end}: End page number,
#' \item \code{Cited.by}: Number of citations received,
#' \item \code{Link}: String containing unique document identifier, by default of the form ...id=\emph{\strong{UNIQUE_ID}}&... (see \code{alternativeIdPattern} parameter),
#' \item \code{Document.Type}: Document type, one of: \dQuote{Article}, \dQuote{Article in Press},
#' \dQuote{Book}, \dQuote{Conference Paper}, \dQuote{Editorial},
#' \dQuote{Erratum}, \dQuote{Letter}, \dQuote{Note}, \dQuote{Report},
#' \dQuote{Review}, \dQuote{Short Survey}, or \code{NA}
#' (other categories are treated as \code{NA}s),
#' \item \code{Source}: Data source identifier, must be the same as the
#' \code{dbIdentifier} parameter value. It is used for parse errors detection.
#' }
#'
#' The CSV file to be read may, for example, be created by \emph{SciVerse Scopus}
#' (Export format=\emph{comma separated file, .csv (e.g. Excel)},
#' Output=\emph{Complete format} or \emph{Citations only}).
#' Note that the exported CSV file sometimes needs to be corrected by hand
#' (wrong page numbers, single double quotes in character strings instead of two-double quotes etc.).
#' We suggest to make the corrections in a \dQuote{Notepad}-like application
#' (in plain text). The function tries to indicate line numbers causing
#' potential problems.
#'
#' @title Import bibliography entries from a CSV file.
#' @param filename the name of the file which the data are to be read from, see \code{\link{read.csv}}.
#' @param stopOnErrors logical; \code{TRUE} to stop on all potential parse errors or just warn otherwise.
#' @param dbIdentifier character or \code{NA}; database identifier, helps detect parse errors, see above.
#' @param alternativeIdPattern character; regular expression used to extract AlternativeId, \code{NA} to get the id as is,
#' @param ... further arguments to be passed to \code{read.csv}.
#' @return A \code{data.frame} containing the following 11 columns:
#' \tabular{ll}{
#' \code{Authors} \tab Author name(s), comma-separated, surnames first.\cr
#' \code{Title} \tab Document title.\cr
#' \code{Year} \tab Year of publication.\cr
#' \code{AlternativeId} \tab Unique document identifier.\cr
#' \code{SourceTitle} \tab Title of the source containing the document.\cr
#' \code{Volume} \tab Volume.\cr
#' \code{Issue} \tab Issue.\cr
#' \code{PageStart} \tab Start page; numeric.\cr
#' \code{PageEnd} \tab End page; numeric.\cr
#' \code{Citations} \tab Number of citations; numeric.\cr
#' \code{DocumentType} \tab Type of the document; see above.\cr
#' }
#' The object returned may be imported into a local bibliometric storage via \code{\link{lbsImportDocuments}}.
#' @export
#' @examples
#' \dontrun{
#' conn <- lbsConnect("Bibliometrics.db");
#' ## ...
#' data <- Scopus_ReadCSV("db_Polish_MATH/Poland_MATH_1987-1993.csv");
#' lbsImportDocuments(conn, data, "Poland_MATH");
#' ## ...
#' lbsDisconnect(conn);}
#' @seealso \code{\link{Scopus_ASJC}}, \code{\link{Scopus_SourceList}},
#' \code{\link{lbsConnect}},
#' \code{\link{Scopus_ImportSources}},\cr
#' \code{\link{read.table}}, \code{\link{lbsImportDocuments}}
Scopus_ReadCSV <- function(filename, stopOnErrors=TRUE, dbIdentifier='Scopus', alternativeIdPattern="^.*\\id=|\\&.*$", ...)
{
datafile <- read.csv(filename, header = T, encoding="UTF-8", fileEncoding="UTF-8", stringsAsFactors=FALSE, ...);
if (!is.na(dbIdentifier) && is.null(datafile$Source)) stop("Column not found: `Source'.");
if (is.null(datafile$Authors)) stop("Column not found: `Authors'.");
if (is.null(datafile$Title)) stop("Column not found: `Title'.");
if (is.null(datafile$Year)) stop("Column not found: `Year'.");
if (is.null(datafile$Source.title)) stop("Column not found: `Source.title'.");
if (is.null(datafile$Volume)) stop("Column not found: `Volume'.");
if (is.null(datafile$Issue)) stop("Column not found: `Issue'.");
if (is.null(datafile$Page.start)) stop("Column not found: `Page.start'.");
if (is.null(datafile$Page.end)) stop("Column not found: `Page.end'.");
if (is.null(datafile$Cited.by)) stop("Column not found: `Cited.by'.");
if (is.null(datafile$Link)) stop("Column not found: `Link'.");
if (is.null(datafile$Document.Type)) stop("Column not found: `Document.Type'.");
if (!is.na(dbIdentifier) && any(datafile$Source != dbIdentifier))
{
msg <- (sprintf("source database does not match 'dbIdentifier'. This may possibly indicate a parse error. Check records: %s.",
paste(which(datafile$Source != dbIdentifier), collapse=", ")));
if (stopOnErrors) stop(msg) else warning(msg);
}
if (!is.na(alternativeIdPattern))
{
datafile$AlternativeId <- gsub(alternativeIdPattern, "", datafile$Link); # REG EXP
} else {
datafile$AlternativeId <- datafile$Link; # AS IS
}
naAlternativeId <- which(is.na(datafile$AlternativeId));
if (length(naAlternativeId) > 0)
{
msg <- (sprintf("some documents do not have unique identifiers. Check line %s (or its neighborhood). \
Perhaps somethings is wrong with the end page (check for ', ' nearby).",
naAlternativeId[1]+1));
if (stopOnErrors) stop(msg) else warning(msg);
}
checkAlternativeId <- unique(datafile$AlternativeId, incomparables=NA);
if (length(checkAlternativeId) != nrow(datafile))
{
msg <- (sprintf("non-unique document identifiers at rows: %s.",
paste((1:nrow(datafile))[-checkAlternativeId], collapse=", ")));
if (stopOnErrors) stop(msg) else warning(msg);
}
datafile$Cited.by[!is.na(gsub("^([[:digit:]]+)$", NA, datafile$Cited.by))] <- NA;
datafile$Cited.by <- as.numeric(datafile$Cited.by);
checkCitations <- which(datafile$Cited.by < 0 | datafile$Cited.by>100000);
if (length(checkCitations) > 0)
{
msg <- (sprintf("something is wrong with citation counts at rows: %s.",
paste((1:nrow(datafile))[-checkCitations], collapse=", ")));
if (stopOnErrors) stop(msg) else warning(msg);
}
datafile$Page.start[!is.na(gsub("^([[:digit:]]+)$", NA, datafile$Page.start))] <- NA;
datafile$Page.end [!is.na(gsub("^([[:digit:]]+)$", NA, datafile$Page.end ))] <- NA;
datafile$Page.start <- as.numeric(datafile$Page.start);
datafile$Page.end <- as.numeric(datafile$Page.end);
checkPages <- which((datafile$Page.start<0) | (datafile$Page.end<datafile$Page.start) | (datafile$Page.end-datafile$Page.start>10000));
if (length(checkPages) > 0)
{
msg <- (sprintf("some documents seem to have incorrect page numbers. Check line %s (or its neighborhood).",
checkPages[1]+1));
if (stopOnErrors) stop(msg) else warning(msg);
}
datafile <- data.frame(Authors=as.character(datafile$Authors),
Title=as.character(datafile$Title),
Year=datafile$Year,
AlternativeId=datafile$AlternativeId,
SourceTitle=as.character(datafile$Source.title),
Volume=datafile$Volume,
Issue=datafile$Issue,
PageStart=datafile$Page.start,
PageEnd=datafile$Page.end,
Citations=datafile$Cited.by,
DocumentType=datafile$Document.Type);
attr(datafile, "filename") <- filename;
return(datafile);
}
|
a082ab87e6af30bf8851e8065d2fd2296b52c186
|
5fd9f144c6952f7192d86f83ebd6981a29ed9dd6
|
/report/downturn_adarsh.r
|
83b088dde72fe784ddf6ac52ba83617850407096
|
[] |
no_license
|
koustubh-dwivedy/ISB_Coursera
|
44d0b685d206871da5494aa2ac4682e8bd7d6c2b
|
3a607aca1762df113ff0d3f637f53ed2a224c43f
|
refs/heads/master
| 2021-05-02T04:31:51.933468
| 2016-12-25T12:41:04
| 2016-12-25T12:41:04
| 76,651,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
downturn_adarsh.r
|
library(data.table)
library(quantmod)
library(PerformanceAnalytics)
library(lubridate)
library(zoo)
start.time <- proc.time()
daily=fread(file.choose(), sep=",", nrows=-1L, header=T, na.strings="NA",stringsAsFactors=FALSE,data.table = T) #Drawdowns analysis folder
end.time <- proc.time()
cat("Elapsed; ", end.time[3]-start.time[3], "seconds.\n")
colnames(daily)=c("PERMNO","DATE","PRC")
uq=unique(daily$PERMNO)
start.time <- proc.time()
#for(z in 1:10000){
z=1
g=grep(uq[z],daily$PERMNO)
if(length(g)==1){
t=t+1
next
}
SP=xts(daily$PRC[t:(t+length(g)-1)],order.by=as.Date(daily$DATE[t:(t+length(g)-1)],format="%d/%m/%Y"))
DJ.roc <- ROC(SP,n=1,type="discrete")
SP500.RET=monthlyReturn(SP)
dailyDD <- findDrawdowns(SP500.RET) #WHAT DOES THIS DO????
Drawdowns <- table.Drawdowns(DJ.roc[,1],top=500) #WHAT DOES THIS DO????
for(i in 1:nrow(Drawdowns)){
if(is.na(Drawdowns[i,1])==1){
Drawdowns[i,1]=as.Date(daily$DATE[t+1],format="%d/%m/%Y")
}
if(is.na(Drawdowns[i,3])==1){
Drawdowns[i,3]=as.Date(daily$DATE[t+length(g)-1],format="%d/%m/%Y")
}
}
df=daily$DATE[t:(t+length(g)-1)]
DD=Drawdowns[order(as.Date(Drawdowns[,1], format="%d/%m/%Y")),]
|
853c1ca3df95515b3de4632dc681ff3f102c20cc
|
a8e43b3d29fe7f4f8002b5d7fadf6d5fd01ee886
|
/Subset&Transform_ITRDBData.R
|
25f8fe1a2be105eabd5274718d47b05379a4d0e2
|
[] |
no_license
|
RStetler/TreeRingAnalysisR
|
043be257248b4e22c49fd3d943e5562289334743
|
092bf33380772b3391a5d07aeda46db8abd92824
|
refs/heads/master
| 2021-01-22T21:27:38.851939
| 2017-04-11T19:49:26
| 2017-04-11T19:49:26
| 85,431,143
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,765
|
r
|
Subset&Transform_ITRDBData.R
|
rm(list = ls())
setwd("N://R_Files/All_ITRDB")
library(httr) ### seems like a lot of packages, but all are used somewhere...
library(dplR)
library(TRADER)
library(tidyr)
library(dplyr)
library(tibble)
library(reshape2)
dat <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_Canada.csv", header = TRUE)
dat <- transform(dat, siteID = as.character(siteID),
Sitename = as.character(Sitename),
Sitelocation = as.character(Sitelocation),
Speciesinformation = as.character(Speciesinformation),
state = as.character(state))
dat <- transform(dat, Speciesinformation = gsub("([A-Z]{4}).*", "\\1", Speciesinformation))
dat <- transform(dat, Speciesinformation = gsub("[[:blank:]]", "", Speciesinformation))
dat <- transform(dat, siteID = gsub("[[:blank:]]", "", siteID))
dat <- transform(dat, state = gsub("[[:blank:]]", "", state))
write.csv(dat, file = "N://R_Files/All_ITRDB/ITRDB_Data_Canada_Clean.csv", row.names = FALSE)
dat1 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_50.csv", header = TRUE)
dat2 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_100.csv", header = TRUE)
dat3 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_150.csv", header = TRUE)
dat4 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_200.csv", header = TRUE)
dat5 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_280.csv", header = TRUE)
dat6 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_350.csv", header = TRUE)
dat7 <- read.csv("N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_413.csv", header = TRUE)
com1 <- rbind(dat1, dat2, dat3)
com2 <- rbind(com1, dat4, dat5, dat6, dat7)
write.csv(com2, file = "N://R_Files/All_ITRDB/ITRDB_Data_W_TRADER_ALL.csv", row.names = FALSE)
|
011e7100efffa800a2f48aede64a06ac3a478280
|
8f8eac85cfbf8d3bc768318848ec964cb297b1cb
|
/nesi/labour_situation/4_descriptive_statistics/1_process_descriptive_statistics.R
|
f06f4fe0b832845c4780032b19e980a4bbaa9581
|
[] |
no_license
|
jnaudon/datachile-etl
|
5231a3762dd32f3f3def4d568fc63934d603cf8b
|
8fa577378d38f8d63f6dfdb00ed515bbb439f154
|
refs/heads/master
| 2023-03-23T00:36:35.698292
| 2019-03-23T03:30:16
| 2019-03-23T03:30:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 673
|
r
|
1_process_descriptive_statistics.R
|
labour_description <- rbind(labour_description_2010,labour_description_2011,labour_description_2012,
labour_description_2013,labour_description_2014,labour_description_2015)
rm(labour_description_2010,labour_description_2011,labour_description_2012,
labour_description_2013,labour_description_2014,labour_description_2015)
source("labour_situation/4_descriptive_statistics/2_statistics_at_comuna_level.R")
source("labour_situation/4_descriptive_statistics/3_statistics_at_provincia_level.R")
source("labour_situation/4_descriptive_statistics/4_statistics_at_region_level.R")
source("labour_situation/4_descriptive_statistics/5_statistics_at_country_level.R")
|
12d7273112a67cc4bdaf1f96ef22c8db1fd16bbd
|
83ce3b39e88c03e2c98ef2f05174195708ac3dbe
|
/inst/shotGroups_AngularSize_bs4Dash_05/app_ui_sidebar.R
|
11e1702c54c78ff7b30804c98e6074d6cd15cee6
|
[] |
no_license
|
cran/shotGroups
|
e02467ffb36b8e528fa1c230b2a718512159fc19
|
ae04a8371aa1cc18af598413d1bc41d389762acb
|
refs/heads/master
| 2022-10-01T18:19:20.943958
| 2022-09-17T18:06:04
| 2022-09-17T18:06:04
| 17,699,651
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,015
|
r
|
app_ui_sidebar.R
|
bs4DashSidebar(
skin="light",
status="primary",
brandColor="primary",
# elevation=1,
bs4SidebarMenu(
id="sidebar",
bs4SidebarMenuItem(
"Background math",
tabName="tab_math",
icon="square-root-alt"
),
bs4SidebarMenuItem(
tagList("Absolute", icon("arrow-right", lib="font-awesome"), "angular"),
tabName="tab_angular",
icon="circle-thin"
),
bs4SidebarMenuItem(
tagList("Angular", icon("arrow-right", lib="font-awesome"), "absolute"),
tabName="tab_absolute",
icon="circle-thin"
),
bs4SidebarMenuItem(
tagList("Abs+ang", icon("arrow-right", lib="font-awesome"), "distance"),
tabName="tab_distance",
icon="circle-thin"
),
bs4SidebarMenuItem(
"About",
tabName="tab_about",
icon="lightbulb"
)
)
)
|
1cc9b039494042515470d2135ae7bf433ce926a8
|
392197450dbab7cb6b3084e33b0c844198ef770e
|
/R/undo.label.switching.old.R
|
7cee28309a5c282657f25b083558a548ad1e9294
|
[] |
no_license
|
bhaskarrao511/BayesLCAdevel
|
318345825795b0c77c6edffdc1148ea0937589ff
|
500fb85cc8f56e439106d2008bb74a30c3d85f27
|
refs/heads/master
| 2020-05-14T08:36:42.354450
| 2019-07-12T19:53:17
| 2019-07-12T19:53:17
| 181,725,757
| 0
| 0
| null | 2019-07-01T00:10:08
| 2019-04-16T16:20:52
|
C
|
UTF-8
|
R
| false
| false
| 3,441
|
r
|
undo.label.switching.old.R
|
#=========================================================
# Implementation of the label switching algorithm
# of Nobile and Fearnside (2007) Statistics & Computing.
#
# Author of this implementation:
# Jason Wyse,
# Discipline of Statistics,
# School of Computer Science and Statistics,
# Trinity College Dublin,
# Dublin 2, Ireland.
#
# Last update:
# Sat 16 May 2015 03:16:50 PM IST
#=========================================================
undo.label.switching.old <- function( Z, ngroups = NULL )
#undo label switching using Nobile and Fearnside approach
{
# Z is a matrix of labels with row i indexing classifications
# to groups from 1 : ngroups[i] or 1:ngroups if ngroups is an integer
if( is.null(ngroups) ) stop("\t argument ngroups must be specified as either a fixed number of groups or a vector of the number of groups corresponding to each row of Z")
if( length(ngroups) == 1 ) ngroups <- rep(ngroups, nrow(Z))
#different no.'s groups
Zrelab <- matrix( nrow = nrow(Z), ncol = ncol(Z) )
Perm <- matrix( nrow = nrow(Z), ncol = max(ngroups) )
nobs <- ncol(Z)
G <- unique(ngroups)
ret <- list()
ret$groups <- ngroups
ret$ncomponents <- numeric(length(G))
ret$item.tags <- list()
j <- 1
for(k in G){
idx.k <- which(ngroups == k)
labels.k <- Z[idx.k,]
nsamp.k <- length(idx.k)
ngrp.k <- k
permutation <- numeric(nsamp.k*k)
if( (k!=1) && (length(idx.k) > 1))
{
nonempty.k <- apply( labels.k, 1, function(x) length(unique(x)) )
t.k <- sort( nonempty.k, index.return=TRUE )
labels.arranged.k <- labels.k[ t.k$ix, ]
item.tags.k <- idx.k[ t.k$ix ] #this is the actual index of each row in the original matrix Z
labels.out.k <- numeric(nsamp.k*nobs)
w <- .C( "BLCA_RELABEL", as.integer(nobs),
as.integer(nsamp.k), as.integer(ngrp.k),
as.integer(labels.arranged.k), x=as.integer(labels.out.k),
xx = as.integer(permutation), PACKAGE = "BayesLCA" )
ret$ncomponents[j] = k
ret$memberships[[j]] = matrix(w$x,nrow = nsamp.k,ncol=nobs,byrow=FALSE)
ret$permutation[[j]] = matrix(w$xx,nrow=nsamp.k,ncol=k,byrow=FALSE)
#compute membership probabilities for each data pt
probs = matrix( nrow = nobs, ncol=k)
for(id in 1:nobs){
for(c in 1:k){
probs[id,c] = length(which(ret$memberships[[j]][,id] == c))
}
}
probs = probs/nsamp.k
ret$membership.probabilities[[j]] = probs
#for variable indicator
ret$item.tags[[j]] = item.tags.k
#store in the new Z matrix Zrelab
Zrelab[ item.tags.k, ] <- ret$memberships[[j]]
Perm[ item.tags.k, 1:k ] <- ret$permutation[[j]]
}else{
ret$ncomponents[j] = k
ret$memberships[[j]] = labels.k
idx.k = which(ngroups == k)
ret$item.tags[[j]] = idx.k #only in this case
}
j = j+1
}
x <- list()
x$call <- match.call()
x$relab <- Zrelab
x$components <- ret$ncomponents
x$label.prob <- ret$membership.probabilities
x$permutation <- Perm
return(x)
}
|
f8a2ef1a25d5ea9e033bb28d794873f91f062724
|
4499f2878a99237def8099503d76ca2f51d1ad18
|
/Follow-up Analysis/script_to_analyze_2004_split_data.R
|
785c6a277a9e35a4dcf53e317abee38d56a11f31
|
[] |
no_license
|
andytimm/election_stan_analysis
|
da21d3162dacb94414f05be3b86ebd7d1d405dfa
|
45cbf2acad6c91595978f07ce0d2e62dd10d757c
|
refs/heads/master
| 2020-05-06T13:37:25.874072
| 2016-09-13T19:54:22
| 2016-09-13T19:54:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,317
|
r
|
script_to_analyze_2004_split_data.R
|
remove(list=objects())
library(arm)
library(car)
library(maps)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
source("stan_lmer_voting_analysis.R")
source("generate_stan_fig_2.R")
########################################################################################
### labels
reg.lkup <- c(3,4,4,3,4,4,1,1,5,3,3,4,4,2,2,2,2,3,3,1,1,1,2,2,3,2,4,2,4,1,1,4,1,3,2,2,3,4,1,1,3,2,3,3,4,1,3,4,1,2,4)
reg.label <- c("Northeast", "Midwest", "South", "West", "DC")
stt.label <- c(state.abb[1:8], "DC", state.abb[9:50])
eth.label <- c("White", "Black", "Hispanic", "Other")
inc.label <- c("$0-20k", "$20-40k", "$40-75k", "$75-150k", "$150k+")
age.label <- c("18-29", "30-44", "45-64", "65+")
sex.label <- c("Male", "Female")
edu.label <- c("< HS", "HS", "Some Coll", "Coll", "Post-Grad")
mar.label <- c("Married", "Single")
rel.label <- c("Protestant (not born-again)","Born-again Protestant","Catholic","Mormon","Jewish","Other religion","No religion")
chu.label <- c("Nonattenders","Rare attenders","Occasional\nattenders","Frequent attenders","Very frequent\nchurch attenders")
kid.label <- c("Kids", "No Kids")
cit.label <- c("Citizen", "Not Citizen")
n.reg <- length(reg.label)
n.stt <- length(stt.label)
n.eth <- length(eth.label)
n.inc <- length(inc.label)
n.age <- length(age.label)
n.sex <- length(sex.label)
n.edu <- length(edu.label)
n.mar <- length(mar.label)
n.rel <- length(rel.label)
n.chu <- length(chu.label)
n.kid <- length(kid.label)
########################################################################################
### get data
### state-level data
dat.stt <- read.table("data/state-stats.dat", header=T, sep="\t")
dat.stt$z.inc2000 <- rescale(dat.stt$inc2000)
dat.stt$z.inc2004 <- rescale(dat.stt$inc2004)
dat.stt$z.inc2007 <- rescale(dat.stt$inc2007)
dat.stt$z.rep1996 <- rescale(dat.stt$rep1996)
dat.stt$z.rep2000 <- rescale(dat.stt$rep2000)
dat.stt$z.rep2004 <- rescale(dat.stt$rep2004)
dat.stt$z.rep2008 <- rescale(dat.stt$rep2008)
dat.stt$z.trn1996 <- rescale(dat.stt$vote1996/dat.stt$pop1996)
dat.stt$z.trn2000 <- rescale(dat.stt$vote2000/dat.stt$pop2000)
dat.stt$z.trn2004 <- rescale(dat.stt$vote2004/dat.stt$pop2004)
dat.stt$z.trn2008 <- rescale(dat.stt$vote2008/dat.stt$pop2007)
dat.stt$stt <- 1:nrow(dat.stt)
### annenberg / pew data for modeling vote choice
dat.vot <- read.table("data/votechoice2000-04-08.dat", header=T, sep="\t")
dat.vot$weight[is.na(dat.vot$weight)] <- 1
ok <- apply(is.na(dat.vot[1:8]), 1, sum)==0 &
(dat.vot$cit==1 | is.na(dat.vot$cit)) &
(dat.vot$regist=="registered" | is.na(dat.vot$regist))
dat.vot <- dat.vot[ok,]
dat.vot$reg <- reg.lkup[dat.vot$stt]
dat.vot$year <- recode(dat.vot$file, "'2000-ann'=2000; '2004-ann'=2004; '2008-pew'=2008", as.factor.result=F)
dat.vot <- dat.vot[dat.vot$year != 2000,]
dat.vot <- dat.vot[dat.vot$year != 2008,]
remove_list <- generate_unique_index_list(5630, nrow(dat.vot))
dat.vot <- dat.vot[-remove_list,]
split_list <- generate_unique_index_list(19170, nrow(dat.vot))
dat.vot.split_list <- list(dat.vot[split_list,], dat.vot[-split_list,])
### census data from PUMS for population cell sizes
dat.pop <- read.table("data/census-pums-pop-2000-04-08.dat", header=T, sep="\t")
### prepare data for looping through years
years <- c(2004, 2008)
L.z.incstt <- list(dat.stt$z.inc2004, dat.stt$z.inc2007)
L.z.repprv <- list(dat.stt$z.rep2000, dat.stt$z.rep2004)
L.z.trnprv <- list(dat.stt$z.trn2000, dat.stt$z.trn2004)
########################################################################################
### Run MRP for stt, eth, inc, age
data_matrix <- as.data.frame(expand.grid(1:n.stt, 1:n.eth, 1:n.inc, 1:n.age))
colnames(data_matrix) <- c("stt", "eth", "inc", "age")
data_matrix$age <- factor(data_matrix$age)
data_matrix$eth <- factor(data_matrix$eth)
data_matrix$inc <- factor(data_matrix$inc)
data_matrix$stt <- factor(data_matrix$stt)
data_matrix$grp <- apply(data_matrix, 1, paste, collapse="_")
data_matrix$ix <- 1:nrow(data_matrix)
### covariates
data_matrix <- mutate(data_matrix,
reg = factor(reg.lkup[stt]),
z_inc = rescale(inc),
z_incstt = L.z.incstt[[1]][stt],
z_trnprv = L.z.trnprv[[1]][stt],
z_repprv = L.z.repprv[[1]][stt])
### poststratification setup
interested_rows <- sapply(1:nrow(data_matrix), function(i) paste("p[", i, "]", sep = ""))
interested_cols <- c("mean")
result_matrix <- as.data.frame(expand.grid(1:n.stt, 1:n.eth, 1:n.inc, 1:n.age))
colnames(result_matrix) <- c("stt", "eth", "inc", "age")
result_matrix$grp <- apply(result_matrix, 1, paste, collapse="_")
result_matrix$ix <- 1:nrow(result_matrix)
dat.pop$grp <- apply(dat.pop[, c("stt", "eth", "inc", "age")], 1, paste, collapse="_")
data_pop_info <- summarize(group_by(dat.pop, grp), pop2004 = sum(wtd2004), pop2008 = sum(wtd2008))
result_matrix <- merge(x=result_matrix, y=data_pop_info, by="grp", all.x=T)
result_matrix <- result_matrix[order(result_matrix$ix),]
### multilevel models
M.cps <- M.vot <- list()
for (i in 1:2) {
cat(paste("***** Multilevel Models for Split", i, "\n"))
### vote choice model
cat("***** Annenberg/Pew Vote Choice Model\n")
tmp <- dat.vot.split_list[[i]]
tmp$grp <- apply(tmp[, c("stt", "eth", "inc", "age")], 1, paste, collapse="_")
tmp$ones <- 1
tmp <- rename(tmp,vote = rvote)
stan_lmer_obj <- stan_vote_regression(data_matrix, tmp, 3, paste("Pew_Vote_Model",years[i],sep="_"))
stanfit_obj <- stan(data = stan_lmer_obj$data, file = "../Stan/CPS_Turnout_Model_2004_Level_3.stan", iter = 500)
###Poststratification
#Rebuild design matrix
result_matrix$vote2004.M <- NA
result_matrix$vote2004 <- NA
result_matrix$vote2004.M <- invlogit(summary(stanfit_obj)$summary[interested_rows, interested_cols])
for (j in 1:n.stt) {
cat(paste(j, "of", n.stt, "\n"))
ok <- result_matrix$stt==j
result_matrix$vote2004[ok] <- weighted_correction(result_matrix$vote2004.M[ok], result_matrix$pop2004[ok]/sum(result_matrix$pop2004[ok]), dat.stt[as.character(j), "rep2004"])$corrected
}
M.vot[[i]] <- stanfit_obj
save.image("Split_analysis2.Rdata")
generate_fig_2_for_2004(result_matrix, dat.stt, dat.vot.split_list[[i]], paste("stan_split", i, "fig_2.png", sep = "_"))
}
|
55eea5b5f75dd1ffd57529cd0c98a70ee0fe70a8
|
2dd8627c5565348c242396547e164dfd2e9b09bb
|
/R/filter_merged_reads.R
|
f1ec69a4f7fab2129219e0b52126a06c326a520d
|
[] |
no_license
|
mottensmann/SealMHC
|
127aa578849e8173b60d53f5d1eb83166202d47e
|
aea34aa13e7c4e945fad2e286525dc5733086d6d
|
refs/heads/master
| 2021-10-10T17:48:59.222926
| 2019-01-14T14:20:49
| 2019-01-14T14:20:49
| 116,133,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,349
|
r
|
filter_merged_reads.R
|
#' Filters merged reads prior to (i) clustering Zotus and (ii) assigning raw reads to them
#'
#' @description
#' This script does the following tasks:
#' 1.) Ensure that reads contain expected primer pair
#' 2.) Discard reads with unexpected length
#' 3.) Adds barcodes to headers of the fastq file
#' 4.) Removes reads that contain unexpected barcodes
#' 5.) Export subsetted to new FASTQ files.
#'
#' @param reads
#' path to a file containing merged reads in fastq format with file ending '.fastq'
#'
#' @param barcodes
#' path to a file containing barcodes for \code{reads} with file ending '.fastq'
#'
#' @param mapping_file
#' mapping file that contains barcode sequences for all samples
#'
#' @param forward_primer
#' character giving the forward primer sequence in 5'-3' orientation
#'
#' @param reverse_primer
#' character giving the reverse primer sequence in 3'-5' orientation
#'
#' @param max.mismatch
#' Interger giving the maximum number of allowed mismatches between sequences and primer. By default 1
#'
#' @param with.indels
#' Boolean. By default no indels in primer sequences are allowed.
#'
#' @param suffix
#' string added to file names for generating the output. Note, existing files may be overwritten.
#'
#' @param illumina_adapter
#' illumina adapter sequence written at the end of the identifier line. Same for all samples. This string will be replaced by 'barcodelabel=' to allow demultiplexing by usearch
#'
#' @param pcr_size
#' expected fragment length. If specified all reads of differing size will be filtered out.
#'
#' @param min_size
#' minimum fragment length. If specifed all reads that are shorter will be discarded
#'
#' @export
#'
filter_merged_reads <- function(reads = NULL, barcodes = NULL, mapping_file = NULL, forward_primer = NULL, reverse_primer = NULL, max.mismatch = 1, with.indels = F, suffix = "_filtered", illumina_adapter = "GCCAAT", pcr_size = NULL, min_size = NULL) {
suppressPackageStartupMessages(library(ShortRead))
source("R/fasta_fastq_functions.R")
# checks
if (!file.exists(reads)) stop("Specify a valid path to the reads file")
if (!file.exists(barcodes)) stop("Specify a valid path to the barcodes file")
if (!file.exists(mapping_file)) stop("Specify a valid path to the mapping file")
if (!is.character(forward_primer)) stop("Give a forward primer sequence")
if (!is.character(reverse_primer)) stop("Give a reverse primer sequece")
## keep path information for later use
reads_path <- reads
barcodes_path <- barcodes
## get reads and associated barcodes
cat("Read files ... ")
reads <- readFastq(reads)
initial_size <- length(reads)
barcodes <- readFastq(barcodes)
cat("done\n")
# extract sequences from fastq files
cat("Extract sequences ... ")
reads_seq = sread(reads)
barcodes_seq <- sread(barcodes)
cat("done\n")
# search for forward primer
cat("Extract forward primer region ... ")
primer_f = DNAStringSet(substr(reads_seq, 1, nchar(forward_primer)))
cat("done\nDetect matches with the primer sequence ... ")
hits_f = vcountPattern(forward_primer,
primer_f,
max.mismatch = max.mismatch,
with.indels = with.indels)
cat("done\n")
perc_forward <- round(sum(hits_f/length(hits_f))*100,2)
size_forward <- sum(hits_f)
cat(perc_forward, "% of all reads contain the expected forward primer\n")
# search for reverse primer in the reverse complement
cat("Extract reverse primer region ... ")
primer_r = DNAStringSet(substr(reverseComplement(reads_seq), 1, nchar(reverse_primer)))
cat("done\n")
cat("Detect matches with the primer sequence ... ")
hits_r = vcountPattern(reverse_primer,
primer_r,
max.mismatch = max.mismatch,
with.indels = with.indels)
cat("done\n")
perc_reverse <- round(sum(hits_r/length(hits_r))*100,2)
size_reverse <- sum(hits_r)
cat(perc_reverse, "% of all raw reads contain the expected reverser primer\n")
# merge hits and turn into a logical
hits <- hits_f + hits_r
hits <- ifelse(hits == 2, 1, 0)
hits <- as.logical(hits)
# keep reads with bother primers present
reads <- reads[hits]
primers_truncated <- length(reads)
size_primers <- length(reads)
perc_primers <- round(size_primers/initial_size*100,2)
cat(perc_primers, "% of all raw reads contain both expected primers\n")
# size selection. Retain only reads of the expected length
if (!is.null(pcr_size)) {
cat("Size selection with expected n =", pcr_size," ... ")
width <- reads@sread@ranges@width
width.matched <- which(width %in% seq(from = floor(pcr_size*0.99), to = floor(pcr_size*1.01)))
# apply selection
reads <- reads[width.matched]
cat("done\n")
cat("Retained", round(100*(length(width.matched)/initial_size),2),"% of all reads")
}
if (!is.null(min_size)) {
cat("Size selection with expected length >= ", min_size," ... ")
width <- reads@sread@ranges@width
width.matched <- which(width >= min_size)
# apply selection
reads <- reads[width.matched]
#barcodes <- barcodes[width.matched]
cat("done\n")
cat("Retained", round(100*(length(width.matched)/initial_size),2),"% of all reads")
}
# get sequence header
reads_id <- id(reads)
barcodes_id <- id(barcodes)
# subset barcodes based on id of reads
barcodes <- barcodes[barcodes_id %in% reads_id]
## define output names
make_name <- function(x, suffix = suffix) {
path <- strsplit(x, "/")[[1]]
name <- strsplit(path[length(path)], ".fastq")[[1]]
prefix <- paste0(path[-length(path)], collapse = "/")
return(paste0(prefix, "/", name, suffix, ".fastq"))
}
out_names <- unlist(lapply(X = c(reads_path, barcodes_path), make_name, suffix))
cat("\nAdd barcodes to sequence headers ... ")
# preapre labels
x <- as.character(reads@id)
# remove whitespaces
x <- sub(pattern = " ", replacement = "", x = x)
# subsitute illumina label by 'barcodelabel='
x <- sub(pattern = illumina_adapter, replacement = "barcodelabel=", x = x)
x <- paste0(x, as.character(barcodes@sread))
## polish header
reads <- ShortReadQ(sread(reads), quality(reads), BStringSet(x))
barcodes <- ShortReadQ(sread(barcodes), quality(barcodes), BStringSet(x))
cat("done\n")
# extract sequences
cat("Filter for expected barcodes ... ")
barcodes_seq <- sread(barcodes)
# read mapping file
mapping <- read.table(file = mapping_file, header = T)[,2]
## match barcodes to samples
indices <- which(barcodes_seq %in% mapping)
reads <- reads[indices]
barcodes_selected <- length(reads)
## take a look at unmatched barcodes
unexp_barcode <- barcodes[-indices]
barcodes <- barcodes[indices]
cat("done\n")
cat("Write to disk ... ")
fastq2disk(data = reads, file = out_names[1])
fastq2disk(data = barcodes, file = out_names[2])
cat("done\n")
cat(round(100 * barcodes_selected/initial_size,2), " % of all reads are retained\n")
## write log file
log_out <- strsplit(reads_path, "/")[[1]]
log_out <- paste0(log_out[-length(log_out)], collapse = "/")
log_out <- paste0(log_out, "/log", suffix, ".txt")
if (file.exists(log_out)) {
sink(log_out, append = F)
sink()
}
sink(log_out)
cat("Input file:\t", reads_path, "\n\t", initial_size,"raw reads\n")
cat("Filter for primers:","\n\t",
size_forward, paste0("(", perc_forward,"%) matches with forward primer sequence\n\t"),
size_reverse, paste0("(", perc_reverse,"%) matches with reverse primer sequence\n\t"),
size_primers, paste0("(", perc_primers,"%) matches with both primer sequences\n"))
if (!is.null(pcr_size)) {
cat("Filter for expected size of", pcr_size,"bp:\n\t",
length(width.matched), paste0("(", round(length(width.matched)/initial_size*100,2),"%) sequences are retained\n"))
}
if (!is.null(min_size)) {
cat("Filter for expected size >", min_size,"bp:\n\t",
length(width.matched), paste0("(", round(length(width.matched)/initial_size*100,2),"%) sequences are retained\n"))
}
cat("Filter for expected barcodes:\n\t",
barcodes_selected, paste0("(", round(100 * barcodes_selected/initial_size,2),"%) of all raw reads are retained"))
sink()
}
|
352d4dc9a934c8aeec6321e0b6cfb8248fa53e7c
|
6ceb3ce7cecc55b352ebb9b0d1195719b517611b
|
/common_libraries.r
|
16e17660d00731bcb44756cb39d02eef293cc88d
|
[] |
no_license
|
Shee-Ra/classify_entity_anzsics
|
5b914787b28a77acde8f5131f9f8f5cd35fe2d5e
|
638f05f9df84b3cdd77d9fa9dfb04ebe67da05df
|
refs/heads/master
| 2022-11-23T07:56:41.727521
| 2020-07-20T11:54:11
| 2020-07-20T11:54:11
| 278,980,428
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 346
|
r
|
common_libraries.r
|
# common libraries
library(pacman)
p_load(cutpointr # metric / evaluation
, fst
, ggplot2
, here
, janitor
, magrittr
, quanteda # bag o' words
, rsample # sampling
, ranger # model
, tictoc # timing
, tidyverse
, XML
, neuralnet # neural net
)
|
42eed009d0c0c9baa196bf28311f7ba67fc2457b
|
2da7dde35cd933e9cfff7e96970011c328e78bc3
|
/chapter02/outlier.R
|
069edd5f62934fc7518c0abfe1694e4ec9c5b9f2
|
[] |
no_license
|
LoveAndCode/BigDataProcessing
|
29da5eeedeee01ddb3a2f353de141cc3abf00800
|
a585e1897f9a5a47576ebfe2141019f9387345af
|
refs/heads/master
| 2021-08-24T05:56:15.511007
| 2017-12-08T09:18:23
| 2017-12-08T09:18:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,184
|
r
|
outlier.R
|
#########################
# Create by John marc #
#########################
# Cerate vector variables
age <- c(23,23,27,27,39,41,47,49,50,52,54,54,56,57,58,58,60,61)
fat <- c(9.5, 26.5, 7.8, 17.8, 31.4, 25.9, 27.4, 27.2, 31.2, 34.6, 42.5, 28.8, 33.4, 30.2, 34.1, 32.9, 41.2, 35.7)
# Check five statistics factor
summary(fat)
summary(age)
# Check outlier factor using boxplot
boxplot(fat)
boxplot(age)
# The formula for detect to outlier factor
# Interquartile(IQR) = Q3 - Q1
# UpperFence = Q3+1.5*IQR
# LowerFence = Q1-1.5*IQR
# Assign Q1 and Q3 value from summary result to each Q1,Q3 variables
age_Q1 <- 23.50
age_Q3 <- 56.75
fat_Q1 <- 26.68
fat_Q3 <- 33.92
# Calculate IQR value
age_IQR <- age_Q3 - age_Q1
fat_IQR <- fat_Q3 - fat_Q1
# Check IQR value
age_IQR
fat_IQR
# Calculate LowerFence value and UpperFence value
fat_LowerFence = fat_Q1 - 1.5*fat_IQR
fat_UpperFence = fat_Q3 + 1.5*fat_IQR
age_LowerFence = age_Q1 - 1.5*age_IQR
age_UpperFence = age_Q3+ 1.5*age_IQR
# Detect to outlier value
fat[fat<fat_LowerFence]
fat[fat > fat_UpperFence]
age[age < age_LowerFence]
age[age > age_UpperFence]
# Remove outlier factor
fat <- fat[fat > fat_LowerFence]
fat
age
|
74929f62cf444ef5482ce4138d94583646520cd3
|
849368d2855b4ffcc80b7d1353ac22ac8a1efdce
|
/R/e046-logpatent1.R
|
4482d0cb897179e436fa1dedfde01b5894a43f02
|
[] |
no_license
|
kenjisato/receptivity_and_innovation
|
b9712c2ebbba23e65c3c7d5cb6dbf3aa89ceeb06
|
6eec8bfb763fdb0a47de59713c8a4d87e8095977
|
refs/heads/master
| 2021-03-27T15:45:25.591916
| 2017-06-19T05:22:06
| 2017-06-19T05:22:06
| 82,227,024
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 307
|
r
|
e046-logpatent1.R
|
source('R/e046-patent.R')
# Log Patent ----
xlabel = "Proportion of 'LEVEL'"
ylabel = "Patent applications by million residents (log)"
fname_lp = "output/e046_logpatent1_LEVEL"
save_plot(e046_patent, xvars = labels, yvar = "log(patent_count)",
xlabel, ylabel, fname_lp, placeholder = "LEVEL")
|
04dc4f4c6e63b917f2748685f6e5d40a1565b61e
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/nonfatal_code/cancer/03_models/05_nonfatal/refresh_incidence.r
|
e7c9a76ea604465d7e9ceabc784905aefcedcbdf
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,051
|
r
|
refresh_incidence.r
|
#!/usr/local/bin/R
#########################################
## Description: Loads functions to calculate incidence data
## Input(s)/Output(s): see individual functions
## How To Use: intended for submission as a cluster job in the nonfatal pipeline (see "Run Functions" below),
## can also be sourced to retrieve results for a single cause-location_id
## Notes: See additional notes in the format_mi_draws script
#########################################
## load Libraries
source(file.path(h, 'cancer_estimation/utilities.R')) # enables generation of common filepaths
source(get_path('handlers', process="nonfatal_model"))
## Run function if not using R interactively
if(!interactive()) {
handle_script(script_name="load_mi_draws",remove_old_outputs=TRUE, what_to_iterate = load_location_list())
handle_script(script_name="load_mortality_draws",remove_old_outputs=TRUE, what_to_iterate = load_location_list())
handle_script(script_name="generate_incidence",remove_old_outputs=TRUE, what_to_iterate = load_location_list())
}
|
33ca14a47c623a777f25b7abceba8db965d6edea
|
c0693698db7a132954d748ee3b4c7e156e12475f
|
/function/text/admin_fig_function_topline.R
|
25cf83acf08c67520ecf9151971814aece52bece
|
[] |
no_license
|
Flowminder/FDFA_01
|
cd527b2ed444da8d13eaf9aea58c8b8992f1e524
|
505792796b5cb4c88bc8f49518f87148976b1cf0
|
refs/heads/master
| 2023-04-10T04:51:56.346936
| 2021-04-15T10:58:10
| 2021-04-15T10:58:10
| 186,587,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,127
|
r
|
admin_fig_function_topline.R
|
admin_fig_function_topline=function(mig_db,simple_countries_df,country_clicked,admin_clicked){
# null parameters
if(is.null(country_clicked)){country_clicked="AFG"}
if(is.null(admin_clicked)){admin_clicked="13"}
# if(is.null(focus)){focus="global"}
# if(is.null(type)){type="int"}
# if(is.null(direction)){direction="X"}
# if(is.null(sex)){sex="F"}
# if(is.null(aggregation)){aggregation="admin"}
# if(is.null(n_slice)){n_slice="3"}
# if(is.null(top_n_per_country)){top_n_per_country=FALSE}
COUNTRY_TEXT=tbl(mig_db,"admin_names")%>%
collect(n=Inf)%>%
filter(JOIN_ID==paste0(c(country_clicked,admin_clicked),collapse = "_"))%>%
distinct(COUNTRY_NAME)%>%
top_n(1)
ADMIN_TEXT=tbl(mig_db,"admin_names")%>%
collect(n=Inf)%>%
filter(JOIN_ID==paste0(c(country_clicked,admin_clicked),collapse = "_"))%>%
distinct(ADMIN_NAME)%>%
top_n(1)
data_col_1_collected=col_slicer_1(mig_db,simple_countries_df,country_clicked,admin_clicked,"admin","int","X","all","admin")
int_X_all=data_col_1_collected%>%
filter(sex=="all")
int_X_F=data_col_1_collected%>%
filter(sex=="F")
int_X_M=data_col_1_collected%>%
filter(sex=="M")
data_col_1_collected=col_slicer_1(mig_db,simple_countries_df,country_clicked,admin_clicked,"admin","nat","X","all","admin")
nat_X_all=data_col_1_collected%>%
filter(sex=="all")
nat_X_F=data_col_1_collected%>%
filter(sex=="F")
nat_X_M=data_col_1_collected%>%
filter(sex=="M")
data_col_1_collected=col_slicer_1(mig_db,simple_countries_df,country_clicked,admin_clicked,"admin","int","M","all","admin")
int_M_all=data_col_1_collected%>%
filter(sex=="all")
int_M_F=data_col_1_collected%>%
filter(sex=="F")
int_M_M=data_col_1_collected%>%
filter(sex=="M")
data_col_1_collected=col_slicer_1(mig_db,simple_countries_df,country_clicked,admin_clicked,"admin","nat","M","all","admin")
nat_M_all=data_col_1_collected%>%
filter(sex=="all")
nat_M_F=data_col_1_collected%>%
filter(sex=="F")
nat_M_M=data_col_1_collected%>%
filter(sex=="M")
format_k=function(x){
paste(format(round(x / 1e3, 0), trim = TRUE), "k")
}
format_perc=function(x){
paste0(format(round(x*100,0), trim = TRUE), "%")
}
int_mig_X=paste0(format_k(int_X_all$move)," (", format_perc(int_X_F$move/int_X_all$move)," female)")
int_mig_M=paste0(format_k(int_M_all$move)," (", format_perc(int_M_F$move/int_M_all$move)," female)")
nat_mig_X=paste0(format_k(nat_X_all$move)," (", format_perc(nat_X_F$move/nat_X_all$move)," female)")
nat_mig_M=paste0(format_k(nat_M_all$move)," (", format_perc(nat_M_F$move/nat_M_all$move)," female)")
head_text=paste0(ADMIN_TEXT$ADMIN_NAME," (",COUNTRY_TEXT$COUNTRY_NAME,")")
HTML("<p><h3><b>",head_text," Summary</b></h3></p>",
"<h4>",
"<p><b> International emigrants:</b>",int_mig_X,"</p>",
"<p><b> International immigrants:</b>",int_mig_M,"</p>",
"<br>",
"<p><b> Internal emigrants:</b>",nat_mig_X,"</p>",
"<p><b> Internal immigrants:</b>",nat_mig_M,"</p>",
"</h4>")
}
|
f48f82c6d92db533bf27b6a7bccd1721ffcb5d1c
|
daa4e8cf09f8b0a7437c72e8400c901798cf5102
|
/man/as.data.frame.listofModels.Rd
|
afea1d59656f9d0ddfe62a5d2c6b31bf19700b1e
|
[] |
no_license
|
jacobbien/simulator
|
cbab7e91945b9a6dbe9b309a256f41e8ef1a6a30
|
3d325ec78d48c57cbdc16d1455c2ffcc85ae8bb1
|
refs/heads/master
| 2023-05-26T15:26:59.755742
| 2023-02-02T07:53:44
| 2023-02-02T07:53:44
| 62,213,043
| 51
| 13
| null | 2023-05-19T04:46:04
| 2016-06-29T09:21:45
|
R
|
UTF-8
|
R
| false
| true
| 641
|
rd
|
as.data.frame.listofModels.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-class.R
\name{as.data.frame.listofModels}
\alias{as.data.frame.listofModels}
\title{Convert a List of Models to a data.frame}
\usage{
\method{as.data.frame}{listofModels}(x, row.names = NULL, optional = FALSE, ...)
}
\arguments{
\item{x}{list}
\item{row.names}{not used}
\item{optional}{not used}
\item{...}{not used}
}
\description{
When \code{\link{load}} generates a list of Models, it assigns this
to be of (S3) class listofModels, inherited from list, so that this function
will be invoked instead of as.data.frame.list, which is defined in base.
}
|
5f6a3229f3db7f4c84787837f0051e15c3b4cf8d
|
92951f9fef15b6c664bbf7a57ac7833a1f55a052
|
/man/load_dirapp.Rd
|
cb1046c0cce6334d56374ec6bc9e461093ae606f
|
[] |
no_license
|
jverzani/gWidgetsWWW2
|
ac2727344c0aba50cc082e92617b874b6d0e8c2b
|
37ca4b89419ced286627ccec1a8df26fface702c
|
refs/heads/master
| 2021-01-01T18:48:10.743913
| 2020-02-06T16:46:51
| 2020-02-06T16:46:51
| 3,021,944
| 2
| 1
| null | 2014-10-16T11:36:16
| 2011-12-20T19:32:24
|
JavaScript
|
UTF-8
|
R
| false
| false
| 804
|
rd
|
load_dirapp.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/load.R
\name{load_dirapp}
\alias{load_dirapp}
\title{Load an app in a directory}
\usage{
load_dirapp(dir_name, ...)
}
\arguments{
\item{dir_name}{The directory name (with full path). We make several assuptions:
1) \code{dir/*.R} has one answer, or we
take \code{index.R}, or we take first alphabetically; 2)
\code{app_name} is \code{basename(dir_name)}; 3)
\code{brew/*rhtml} is empty (fullscreen), has one value, or has
\code{index.rhtml} or we take first alphabetically; 4)
authenticator (XXX to be determined, for now calls
\code{make_session_manager})}
\item{...}{passed to \code{load_dir} reference class method of
\code{R_http} instance.}
}
\value{
creates the app
}
\description{
Load an app in a directory
}
|
a2f06bb6bab90f117ec47908bf5dffe989fdd488
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/TrialSize/examples/TwoSampleProportion.NIS.Rd.R
|
eacd4b8af630497c2e66b533be38b192890290a2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
TwoSampleProportion.NIS.Rd.R
|
library(TrialSize)
### Name: TwoSampleProportion.NIS
### Title: Two sample proportion test for Non-Inferiority/Superiority
### Aliases: TwoSampleProportion.NIS
### Keywords: ~kwd1 ~kwd2
### ** Examples
Example.4.2.4<-TwoSampleProportion.NIS(0.05,0.2,0.65,0.85,1,0.2,0.05)
Example.4.2.4
|
b51c05a8b5fdeda5afaeeaccbe101480d62865a1
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/vines/tests/testthat.R
|
17872c1e5bfbe145622c854fb3bf19ae353110e5
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library("testthat")
library("vines")
test_check("vines")
|
5e4fa6062b914c576b4224064a11219a0eecb0ea
|
8bc6efc754ad04c8385b56916dae8879c254a653
|
/man/runSinApp.Rd
|
d9a721a6cfcc09b69ce8f7d10cd0d3e64d999d27
|
[
"MIT"
] |
permissive
|
xindizhang/SigAct
|
1735d33b64ba21c500a6020eee6d2c7c59f0f3ed
|
5746493625277c62ab49876e007b33cf868e438e
|
refs/heads/master
| 2020-04-09T20:17:37.182899
| 2018-12-06T05:46:30
| 2018-12-06T05:46:30
| 151,490,655
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 365
|
rd
|
runSinApp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/runSinApp.R
\name{runSinApp}
\alias{runSinApp}
\title{\code{runRptApp} launch the shiny app distributed with this package framework}
\usage{
runSinApp()
}
\description{
\code{runRptApp} launches the shiny app for which the code has been placed in \code{./inst/shiny-scripts/rptApp/}.
}
|
5b99e81f25db19d245d81e508139a5d91f658f0c
|
69c44f46b1194fdaf999bf6a9d193d31eb3aa8fb
|
/R/names_munging.R
|
3d4d00d5128778371810ab211119f0fca03a2d50
|
[] |
no_license
|
jmarca/calvad_impute_trucks
|
74d341931dde7c49d0142900a60b60a026c41d71
|
82574ffe6ec2f744303d84216446e165d0fbeeaa
|
refs/heads/master
| 2021-01-17T09:53:29.248533
| 2016-11-04T20:54:35
| 2016-11-04T20:54:35
| 20,712,974
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,193
|
r
|
names_munging.R
|
##' process variable names in preparation for Amelia call
##'
##' I split this out so that I could test different things
##'
##' @param names.df the dataframe's names
##' @param count.pattern how to recognize the count variables, default is
##' "(heavyheavy|^nl|^nr\\d|_weight|_axle|_length|_speed|_all_veh_speed)"
##' @param mean.pattern how to recognize what I used to call mean
##' variables but now that I by default ignore mean variables, there
##' you go. Default is "(^ol|^or\\d)" which will grab occupancy
##' variables
##' @param exclude.pattern what variables to exclude from the Amelia
##' run as id variables. Default is "^(mean|mt_|tr_)"
##' @param df the data frame. Optional but if you pass it in, then
##' will be used to set up boundaries too
##' @return a list of things,
##' list(exclude.as.id.vars=exclude.as.id.vars,
##' pos.count =pos.count,
##' mean.vars =mean.vars,
##' count.vars=count.vars,
##' pos.bds =pos.bds)
##'
##' which is what you need in the above amelia function
names.munging <- function(names.df
,count.pattern = "(heavyheavy|^nl|^nr\\d|_weight|_axle|_length|_speed|_all_veh_speed)"
,mean.pattern="(^ol|^or\\d)"
,exclude.pattern="^(mean|mt_|tr_)"
,df=data.frame()
){
ic.names <- names.df
occ.pattern <- "(^ol1$|^or\\d$)"
ic.names <- grep( pattern=exclude.pattern,x=ic.names ,perl=TRUE,value=TRUE,invert=TRUE)
sd.vars <- grep( pattern="sd(\\.|_)[r|l]\\d+$",x=ic.names,perl=TRUE,value=TRUE)
ic.names <- grep( pattern="sd(\\.|_)[r|l]\\d+$",x=ic.names,perl=TRUE,value=TRUE,invert=TRUE)
count.vars <- grep( pattern=count.pattern,x=ic.names,perl=TRUE,value=TRUE)
ic.names<- grep( pattern=count.pattern,x=ic.names,perl=TRUE,value=TRUE,invert=TRUE)
mean.vars <- grep( pattern=mean.pattern,x=ic.names,perl=TRUE,value=TRUE)
occ.vars <- grep( pattern=occ.pattern,x=mean.vars ,perl=TRUE,value=TRUE)
M <- 10000000000 #arbitrary bignum
ic.names <- names.df
pos.count <- (1:length(ic.names))[is.element(ic.names, c(count.vars))]
pos.bds <- c()
if(length(df)>0){
max.vals <- apply( df[,count.vars], 2, max ,na.rm=TRUE)
pos.bds <- cbind(pos.count,0,1.10*max.vals)
## limit the mean vars less, but exclude occ vars
if(length(setdiff(mean.vars,occ.vars))>0){
pos.count <- (1:length(ic.names))[is.element(ic.names, setdiff(mean.vars,occ.vars))]
pos.bds <- rbind(pos.bds,cbind(pos.count,0,M))
}
## now limit occupancy to (0,1)
pos.count <- (1:length(ic.names))[is.element(ic.names, occ.vars)]
pos.bds <- rbind(pos.bds,cbind(pos.count,0,1))
}
pos.count <- (1:length(ic.names))[is.element(ic.names, c(count.vars))]
exclude.as.id.vars <- setdiff(ic.names,c(mean.vars,count.vars,'tod','day'))
## print(paste("count vars:",paste(count.vars,collapse=' ')))
## print(paste("mean vars:", paste(mean.vars,collapse=' ')))
list(exclude.as.id.vars=exclude.as.id.vars,
pos.count=pos.count,
mean.vars=mean.vars,
count.vars=count.vars,
pos.bds=pos.bds
)
}
##' names munging steps for volume occupancy impute at WIM sites
##'
##' Only real difference I think is the default exclude pattern. But
##' I'm not sure yet so not going to use the above until I can test
##' that they are the same!
##'
##' @title names_munging_for_vo
##' @param count.pattern a pattern to find count variables
##' @param mean.pattern a pattern to find mean variables (no counts)
##' @param exclude.pattern a pattern to exclude variables from the analysis
##' @param df the data frame
##' @return a list of things,
##' list(exclude.as.id.vars=exclude.as.id.vars,
##' pos.count =pos.count,
##' mean.vars =mean.vars,
##' count.vars=count.vars,
##' pos.bds =pos.bds)
##'
##' which is what you need to call the above amelia function
##' @author James E. Marca
##'
names_munging_for_vo <- function(count.pattern= "(heavyheavy|^nl|^nr\\d|_weight|_axle|_length|_speed)"
,mean.pattern="(^ol|^or\\d)"
,exclude.pattern="(^mean|^mt_|^tr_|_all_veh_speed)"
,df){
occ.pattern <- "(^ol1$|^or\\d$)"
## sort out upper limits
ic.names <- names(df)
ic.names <- grep( pattern=exclude.pattern,
x=ic.names ,
perl=TRUE,value=TRUE,invert=TRUE)
sd.vars <- grep( pattern="sd(\\.|_)[r|l]\\d+$",x=ic.names,
perl=TRUE,value=TRUE)
ic.names <- grep( pattern="sd(\\.|_)[r|l]\\d+$",x=ic.names,
perl=TRUE,value=TRUE,invert=TRUE)
count.vars <- grep( pattern=count.pattern,x=ic.names,
perl=TRUE,value=TRUE)
not_counts <- grep( pattern=count.pattern,x=ic.names,
perl=TRUE,value=TRUE,invert=TRUE)
mean.vars <- grep( pattern=mean.pattern,x=not_counts,
perl=TRUE,value=TRUE)
occ.vars <- grep( pattern=occ.pattern,x=not_counts ,
perl=TRUE,value=TRUE)
M <- 10000000000 #arbitrary bignum
ic.names <- names(df)
pos.count <- (1:length(ic.names))[is.element(ic.names, c(count.vars))]
max.vals <- apply( df[,count.vars], 2, max ,na.rm=TRUE)
pos.bds <- cbind(pos.count,0,1.10*max.vals)
## limit the mean vars less, but exclude occ vars
means_not_occ <- setdiff(mean.vars,occ.vars)
if(length(means_not_occ)>0){
pos.count <- (1:length(ic.names))[is.element(ic.names,means_not_occ)]
pos.bds <- rbind(pos.bds,cbind(pos.count,0,M))
}
## now limit occupancy to (0,1)
pos.count <- (1:length(ic.names))[is.element(ic.names, occ.vars)]
pos.bds <- rbind(pos.bds,cbind(pos.count,0,1))
print(ic.names)
print("bounds:")
print(pos.bds)
exclude.as.id.vars <- setdiff(ic.names,c(mean.vars,count.vars,'tod','day'))
exclude.as.id.vars <- union(exclude.as.id.vars,'vds_id')
list(exclude.as.id.vars=exclude.as.id.vars,
pos.count=pos.count,
mean.vars=mean.vars,
count.vars=count.vars,
pos.bds=pos.bds
)
}
|
567ee81f5d6c8cb6f536c3621a3886badac7e6f4
|
d7d6a592fe258174b7359562863ea3417b95f872
|
/Codes/App_playground.R
|
8a0c0374a0a9f9c46fd2ac6a980605a8ef395bc2
|
[] |
no_license
|
Clarkbar36/Pro-Baseball-Experience
|
8e3f4fb36b125bc9440013bacc8e96867025a38a
|
f0d990c66660f282d8d9841b60d1169fd1b49f05
|
refs/heads/master
| 2020-05-20T17:29:25.059558
| 2019-11-22T01:30:33
| 2019-11-22T01:30:33
| 185,662,610
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,928
|
r
|
App_playground.R
|
suppressMessages(library(shiny))
suppressMessages(library(tidyverse))
suppressMessages(library(DT))
suppressMessages(library(dplyr))
suppressMessages(library(ggplot2))
suppressMessages(library(stringr))
suppressMessages(library(RColorBrewer))
suppressMessages(library(shinydashboard))
suppressMessages(library(readxl))
suppressMessages(library(directlabels))
suppressMessages(library(Hmisc))
suppressMessages(library(gridExtra))
source("/Codes/All-Time_Stats.R",local = TRUE)
source("/Codes/Season_Stats.R",local = TRUE)
#All time Hitter Graph Function
# hitter - Set variables to subset and graph by
hit.statistic <- "Stolen Bases"
hit.obs <- 10
hit.lg <- "PBE"
#hitter function
hitter.leaderboard <- function(x,y,z){
# subset hitter dataframe by league
hit.plt.df <- subset(c.all.hit,c.all.hit$league_abbr == hit.lg)
# if PBE subset dataframe by plate appearances greater than or equal to 760, if MiLPBE subset by PA greater than or equal to 470
if(hit.lg == "PBE"){
hit.plt.df <- subset(hit.plt.df,hit.plt.df$`Plate Apperances`>=760)
} else {
hit.plt.df <- subset(hit.plt.df,hit.plt.df$`Plate Apperances`>=470)
}
# find which column number the statistic variable is in the dataframe
num <- which( colnames(hit.plt.df)==hit.statistic)
# if statistic is K percent or K-BB percent, take the bottom obs, players with lower k-percents are better
if(hit.statistic %in% c("K Percent", "K-BB Percent")){
hit.plt.df <- top_n(hit.plt.df, n=hit.obs, -hit.plt.df[num])
} else {
hit.plt.df <- top_n(hit.plt.df, n=hit.obs, hit.plt.df[num])
}
# condense dataframe down to 2 columns: name & position, the statistic variable column
hit.plt.df <- hit.plt.df[c(36,as.numeric(num))]
#rename columns to x,y for easier plotting
colnames(hit.plt.df) <- c("x","y")
# plotting funtion, if statistic is k percetn or k-bb percent, plot in reverse order
if(hit.statistic %in% c("K Percent", "K-BB Percent")){
p <- ggplot(hit.plt.df, aes(x=reorder(x,-y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste("Top",length(hit.plt.df$x),"All-Time"), subtitle =paste(hit.statistic,"-", hit.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=hit.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(hit.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
} else {
p <- ggplot(hit.plt.df, aes(x=reorder(x,y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste("Top",length(hit.plt.df$x),"All-Time"), subtitle =paste(hit.statistic,"-", hit.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=hit.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(hit.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
}
}
# hitter - run function to create graph
hitter.leaderboard(x = hit.statistic, y = hit.obs, z = hit.lg)
# ---------------------------------------------------------------------------------------
#All time Pitcher Graph Function
# pitcher - Set variables to subset and graph by
pitch.statistic <- "Win Percent"
pitch.obs <- 10
pitch.lg <- "PBE"
#pitcher function
pitcher.leaderboard <- function(x,y,z){
# subset pitcher dataframe by league
pitch.plt.df <- subset(c.all.pitch,c.all.pitch$league_abbr == pitch.lg)
# if PBE subset dataframe by Innings Pitched greater than or equal to 400, if MiLPBE subset by IP greater than or equal to 160
if(pitch.statistic %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent', 'Win Percent')){
mean_ip <- round(mean(pitch.plt.df$`Innings Pitched`),0)
pitch.plt.df <- subset(pitch.plt.df,pitch.plt.df$`Innings Pitched`>=mean_ip)
} else {
pitch.plt.df <- pitch.plt.df
}
# find which column number the statistic variable is in the dataframe
num <- which( colnames(pitch.plt.df)==pitch.statistic)
# if statistic is a ratio, reverse order
if(pitch.statistic %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent')){
pitch.plt.df <- top_n(pitch.plt.df, n=pitch.obs, -pitch.plt.df[num])
} else {
pitch.plt.df <- top_n(pitch.plt.df, n=pitch.obs, pitch.plt.df[num])
}
# condense dataframe down to 2 columns: name & position, the statistic variable column
pitch.plt.df <- pitch.plt.df[c(39,as.numeric(num))]
#rename columns to x,y for easier plotting
colnames(pitch.plt.df) <- c("x","y")
# plotting funtion, if statistic is ratio, plot in reverse order
if(pitch.statistic %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent')){
p <- ggplot(pitch.plt.df, aes(x=reorder(x,-y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste("Top",length(pitch.plt.df$x),"All-Time"), subtitle =paste(pitch.statistic,"-", pitch.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=pitch.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(pitch.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
} else {
p <- ggplot(pitch.plt.df, aes(x=reorder(x,y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste("Top",length(pitch.plt.df$x),"All-Time"), subtitle =paste(pitch.statistic,"-", pitch.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=pitch.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(pitch.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
}
}
# pitcher - run function to create graph
pitcher.leaderboard(x = pitcher.statistic, y = pitcher.obs, z = pitcher.lg)
# ---------------------------------------------------------------------------------------
#Season Hitter Graph Function
# hitter - Set variables to subset and graph by
s.hit.statistic <- "K Percent"
s.hit.obs <- 15
s.hit.lg <- "PBE"
s.hit.year <- 2026
#hitter function
s.hitter.leaderboard <- function(w,x,y,z){
# subset hitter dataframe by league
s.hit.plt.df <- subset(s.all.hit,s.all.hit$league_abbr == s.hit.lg & s.all.hit$year == s.hit.year)
# if PBE subset dataframe by plate appearances greater than or equal to 760, if MiLPBE subset by PA greater than or equal to 470
if(s.hit.lg == "PBE"){
mean_pa <- round(mean(s.hit.plt.df$`Plate Apperances`),0)
s.hit.plt.df <- subset(s.hit.plt.df,s.hit.plt.df$`Plate Apperances`>=mean_pa)
} else {
s.hit.plt.df <- subset(s.hit.plt.df,s.hit.plt.df$`Plate Apperances`>=mean_pa)
}
# find which column number the statistic variable is in the dataframe
num <- which( colnames(s.hit.plt.df)==s.hit.statistic)
# if statistic is K percent or K-BB percent, take the bottom obs, players with lower k-percents are better
if(s.hit.statistic %in% c("K Percent", "K-BB Percent")){
s.hit.plt.df <- top_n(s.hit.plt.df, n=s.hit.obs, -s.hit.plt.df[num])
} else {
s.hit.plt.df <- top_n(s.hit.plt.df, n=s.hit.obs, s.hit.plt.df[num])
}
# condense dataframe down to 2 columns: team & name & position, the statistic variable column
s.hit.plt.df <- s.hit.plt.df[c(42,as.numeric(num))]
#rename columns to x,y for easier plotting
colnames(s.hit.plt.df) <- c("x","y")
# plotting funtion, if statistic is k percetn or k-bb percent, plot in reverse order
if(s.hit.statistic %in% c("K Percent", "K-BB Percent")){
p <- ggplot(s.hit.plt.df, aes(x=reorder(x,-y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste(s.hit.year,"Season -","Top",length(s.hit.plt.df$x)), subtitle =paste(s.hit.statistic,"-", s.hit.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=s.hit.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(s.hit.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
} else {
p <- ggplot(s.hit.plt.df, aes(x=reorder(x,y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste(s.hit.year,"Season -","Top",length(s.hit.plt.df$x)), subtitle =paste(s.hit.statistic,"-", s.hit.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=s.hit.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(s.hit.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
}
}
# hitter - run function to create graph
s.hitter.leaderboard(w = s.hit.year, x = s.hit.statistic, y = s.hit.obs, z = s.hit.lg)
# ---------------------------------------------------------------------------------------
#Season Pitcher Graph Function
# pitcher - Set variables to subset and graph by
s.pitch.statistic <- "Win Percent"
s.pitch.obs <- 15
s.pitch.lg <- "PBE"
s.pitch.year <- 2026
#pitcher function
s.pitcher.leaderboard <- function(w,x,y,z){
# subset pitcher dataframe by league
s.pitch.plt.df <- subset(s.all.pitch,s.all.pitch$league_abbr == s.pitch.lg & s.all.pitch$year == s.pitch.year)
# if PBE subset dataframe by plate appearances greater than or equal to 760, if MiLPBE subset by PA greater than or equal to 470
if(s.pitch.statistic %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent', 'Win Percent')){
mean_ip <- round(mean(s.pitch.plt.df$`Innings Pitched`),0)
s.pitch.plt.df <- subset(s.pitch.plt.df,s.pitch.plt.df$`Innings Pitched`>=mean_ip)
} else {
s.pitch.plt.df <- s.pitch.plt.df
}
# find which column number the statistic variable is in the dataframe
num <- which( colnames(s.pitch.plt.df)==s.pitch.statistic)
# if statistic is K percent or K-BB percent, take the bottom obs, players with lower k-percents are better
if(s.pitch.statistic %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent')){
s.pitch.plt.df <- top_n(s.pitch.plt.df, n=s.pitch.obs, -s.pitch.plt.df[num])
} else {
s.pitch.plt.df <- top_n(s.pitch.plt.df, n=s.pitch.obs, s.pitch.plt.df[num])
}
# condense dataframe down to 2 columns: team & name & position, the statistic variable column
s.pitch.plt.df <- s.pitch.plt.df[c(43,as.numeric(num))]
#rename columns to x,y for easier plotting
colnames(s.pitch.plt.df) <- c("x","y")
# plotting funtion, if statistic is k percetn or k-bb percent, plot in reverse order
if(s.pitch.statistic %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent')){
p <- ggplot(s.pitch.plt.df, aes(x=reorder(x,-y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste(s.pitch.year,"Season -","Top",length(s.pitch.plt.df$x)), subtitle =paste(s.pitch.statistic,"-", s.pitch.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=s.pitch.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(s.pitch.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
} else {
p <- ggplot(s.pitch.plt.df, aes(x=reorder(x,y), y=y,fill=y))+
geom_bar(stat='identity')+
ggtitle(paste(s.pitch.year,"Season -","Top",length(s.pitch.plt.df$x)), subtitle =paste(s.pitch.statistic,"-", s.pitch.lg)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
geom_text(data=s.pitch.plt.df,aes(x=x,y=y,label=y),size = 3, hjust=1, colour = "white") +
scale_y_continuous(toupper(s.pitch.statistic)) +
theme(axis.title.y = element_blank()) +
theme(legend.position = "none") +
scale_fill_gradient(low = "red", high = "dark blue") +
coord_flip()
p
}
}
# pitcher - run function to create graph
s.pitcher.leaderboard(w = s.pitch.year, x = s.pitch.statistic, y = s.pitch.obs, z = s.pitch.lg)
# Daily Standings
l <- 'PBE'
daily <- subset(ds.all_games,ds.all_games$league_abbr == l)
p.daily <- daily[c(4,9,14,18,31)]
colnames(p.daily) <- c("x","t","d","y","c")
p.daily$x <- as.Date(p.daily$x)
season <- substring(max(p.daily$x),1,4)
pbe.colors <- c('#97162B',
'#D0D02B',
'#0E1540',
'#FF6700',
'#005CAD',
'#87795E',
'#2C0060',
'#183013')
milpbe.colors <- c('#007EF3',
'#86572C',
'#6C0000',
'#115376')
if(l == 'PBE'){
p <- ggplot(p.daily, aes(x=x, y=y, color = t))+
geom_line() +
ggtitle("Games Above/Below .500", subtitle = paste(l)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
ylab("Games Above/Below .500") + xlab("Date") +
geom_dl(aes(label=t, color=t), method = list("last.points",cex = .75,hjust = .5, vjust = -.5)) +
theme(legend.position = "none") +
scale_colour_manual(values=pbe.colors)
p
} else {
p <- ggplot(p.daily, aes(x=x, y=y, color = t))+
geom_line() +
ggtitle("Games Above/Below .500", subtitle = paste(l)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
ylab("Games Above/Below .500") + xlab("Date") +
geom_dl(aes(label=t, color=t), method = list("last.points",cex = .75,hjust = .5, vjust = -.5)) +
theme(legend.position = "none") +
scale_colour_manual(values=milpbe.colors)
p
}
tbl.ds <- subset(ds.all_games,ds.all_games$league_abbr == l)
tbl.ds$date <- as.Date(tbl.ds$date)
tbl.ds <- tbl.ds %>% filter(date == max(date))
tbl.ds <- tbl.ds[c(10,14,18,19,21:23,30,32,33)]
colnames(tbl.ds)[colnames(tbl.ds) == 'team_name'] <-'Team Name'
colnames(tbl.ds)[colnames(tbl.ds) == 'division'] <-'Division'
colnames(tbl.ds)[colnames(tbl.ds) == 'below.500'] <-'Games Above/Below .500'
colnames(tbl.ds)[colnames(tbl.ds) == 'winloss'] <-'Win-Loss'
colnames(tbl.ds)[colnames(tbl.ds) == 'ttl_ra'] <-'Total Runs Against'
colnames(tbl.ds)[colnames(tbl.ds) == 'ttl_runs'] <-'Total Runs'
colnames(tbl.ds)[colnames(tbl.ds) == 'ttl_hits'] <-'Total Hits'
colnames(tbl.ds)[colnames(tbl.ds) == 'pythag_record'] <-'Pythag Record'
tbl.ds <- tbl.ds [c(1,2,9,10,6,5,7,4,8,3)]
## Records
tm = "Death Valley Scorpions"
pl.records <- subset(r.records,r.records$team_name == tm)
pl.color <- as.character(unique(pl.records$`Primary Color`))
pl.tcolor <- as.character(unique(pl.records$`Tertiary Color`))
mytheme <- gridExtra::ttheme_default(
core = list(fg_params=list(cex = .75)),
colhead = list(fg_params=list(cex = .75)),
rowhead = list(fg_params=list(cex = .75)))
records.tbl <- pl.records %>% group_by(team_name) %>% summarise(`Seasons Played` = n(), `Playoff Apperances` = sum(made_playoffs), Championships = sum(won_playoffs), `Team Average Wins` = round(mean(w),0))
colnames(records.tbl)[colnames(records.tbl) == 'team_name'] <- 'Team'
p <- ggplot(pl.records,aes(x=year,y=w,group=abbr)) +
geom_line(colour = pl.color) +
geom_point(shape = 23, size = 3, fill = pl.tcolor, colour = "black") +
ggtitle("Wins by Season", subtitle = paste(unique(pl.records$team_name),"-",unique(pl.records$league_abbr))) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
ylab("Wins") + xlab("Year") +
scale_x_continuous(breaks=seq(min(pl.records$year),max(pl.records$year),1)) +
geom_text(x=mean(pl.records$year), y=mean(pl.records$Lg_Average_Wins), label=paste("League Aveage Wins -",unique(pl.records$Lg_Average_Wins)), vjust = -.5, size = 4) +
geom_line(aes(x=year,y=Lg_Average_Wins),linetype = "longdash") +
annotation_custom(tableGrob(records.tbl, theme = mytheme, rows = NULL), xmin=min(pl.records$year), xmax=mean(pl.records$year), ymin=mean(pl.records$w)+10, ymax=max(pl.records$w))
p
## Volatility table
colnames(r.volatility)[colnames(r.volatility) == 'league_abbr'] <-'League'
colnames(r.volatility)[colnames(r.volatility) == 'team_name'] <-'Team'
r.volatility
## Season Table
season.info <- subset(r.records,r.records$team_name == tm)
season.info <- season.info[c(6,18,21:23,28,11)]
colnames(season.info)[colnames(season.info) == 'year'] <-'Year'
colnames(season.info)[colnames(season.info) == 'team_name'] <-'Team'
colnames(season.info)[colnames(season.info) == 'best_hitter_name'] <-'Best Hitter'
colnames(season.info)[colnames(season.info) == 'best_pitcher_name'] <-'Best Pitcher'
colnames(season.info)[colnames(season.info) == 'best_rookie_name'] <-'Best Rookie'
colnames(season.info)[colnames(season.info) == 'winloss'] <- 'Win-Loss'
colnames(season.info)[colnames(season.info) == 'pos'] <-'Final Divison Standing'
season.info <- season.info[order(season.info$Year),]
season.info
lg.lup <- subset(r.volatility,r.volatility$team_name == tm)
lg.lup <- lg.lup[1]
# Hitter Scatter - Career
l <- 'PBE'
p <- '2B'
x <- 'Homeruns'
y <- 'WAR'
s <- 2027
num.x <- which( colnames(c.all.hit)==x)
num.y <- which( colnames(c.all.hit)==y)
if (p=='All'){
c.pl.scatter <- subset(c.all.hit,c.all.hit$league_abbr == l)
}else if (p == 'OF'){
c.pl.scatter <- subset(c.all.hit,c.all.hit$league_abbr == l & c.all.hit$Position %in% c('LF','CF','RF'))
} else{
c.pl.scatter <- subset(c.all.hit,c.all.hit$league_abbr == l & c.all.hit$Position == p)
}
if(x %in% c('Average','OBP','SLG','OPS','ISO','BABIP','K Percent','BB Percent','K-BB Percent','Strikeouts') | y %in% c('Average','OBP','SLG','OPS','ISO','BABIP','K Percent','BB Percent','K-BB Percent','Strikeouts') ){
mean_pa <- round(mean(c.pl.scatter$`Plate Apperances`),0)
c.pl.scatter <- subset(c.pl.scatter,c.pl.scatter$`Plate Apperances`>=mean_pa)
} else {
c.pl.scatter <- c.pl.scatter
}
c.pl.scatter <- c.pl.scatter[c(36,as.numeric(num.x),as.numeric(num.y))]
colnames(c.pl.scatter) <- c("pl","x","y")
if(p=='All'){
l.pl <- subset(c.pl.scatter,c.pl.scatter$x >= quantile(c.pl.scatter$x,.97) | c.pl.scatter$y >= quantile(c.pl.scatter$y,.97))
}else{
l.pl <- subset(c.pl.scatter,c.pl.scatter$x >= quantile(c.pl.scatter$x,.93) | c.pl.scatter$y >= quantile(c.pl.scatter$y,.93))
}
pl <- ggplot(c.pl.scatter, aes(x=x, y=y,label=pl))+
geom_point(aes(colour = x)) +
scale_colour_gradient(low = "Orange", high = "#3945D7") +
ggtitle(paste(y,"by",x), subtitle =paste(l,"-",p)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
xlab(x) + ylab(y) +
geom_text_repel(
arrow = arrow(length = unit(0.01, 'npc')),
point.padding = unit(.80, "lines"),
box.padding = unit(.60, "lines"),
force = 2,
data=l.pl) +
theme(legend.position = "none")
pl
# Pitcher Scatter - Career
l <- 'PBE'
p <- 'SP'
x <- 'Strikeouts'
y <- 'WAR'
s <- 2027
num.x <- which( colnames(c.all.pitch)==x)
num.y <- which( colnames(c.all.pitch)==y)
if (p=='All'){
c.p.pl.scatter <- subset(c.all.pitch,c.all.pitch$league_abbr == l)
} else{
c.p.pl.scatter <- subset(c.all.pitch,c.all.pitch$league_abbr == l & c.all.pitch$Position == p)
}
if(x %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent', 'Win Percent','K percent', 'K-BB percent') | y %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent', 'Win Percent','K percent', 'K-BB percent')){
mean_ip <- round(mean(c.p.pl.scatter$`Innings Pitched`),0)
c.p.pl.scatter <- subset(c.p.pl.scatter,c.p.pl.scatter$`Innings Pitched`>=mean_ip)
} else {
c.p.pl.scatter<- c.p.pl.scatter
}
c.p.pl.scatter <- c.p.pl.scatter[c(39,as.numeric(num.x),as.numeric(num.y))]
colnames(c.p.pl.scatter) <- c("pl","x","y")
if(p=='All'){
l.pl <- subset(c.p.pl.scatter,c.p.pl.scatter$x >= quantile(c.p.pl.scatter$x,.97) | c.p.pl.scatter$y >= quantile(c.p.pl.scatter$y,.97))
}else{
l.pl <- subset(c.p.pl.scatter,c.p.pl.scatter$x >= quantile(c.p.pl.scatter$x,.93) | c.p.pl.scatter$y >= quantile(c.p.pl.scatter$y,.93))
}
pl <- ggplot(c.p.pl.scatter, aes(x=x, y=y,label=pl))+
geom_point(aes(colour = x)) +
scale_colour_gradient(low = "Orange", high = "#3945D7") +
ggtitle(paste("All-Time",y,"by",x), subtitle =paste(l,"-",p)) +
theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = .5)) +
xlab(x) + ylab(y) +
geom_text_repel(
arrow = arrow(length = unit(0.01, 'npc')),
point.padding = unit(.80, "lines"),
box.padding = unit(.60, "lines"),
force = 2,
data=l.pl) +
theme(legend.position = "none")
pl
p <- 'All'
if(p=='All'){
pitch.plt.df <- subset(c.all.pitch,c.all.pitch$league_abbr == z)
}else{
pitch.plt.df <- subset(c.all.pitch,c.all.pitch$league_abbr == z & c.all.pitch$Position == p)
}
# if PBE subset dataframe by Innings Pitched greater than or equal to 400, if MiLPBE subset by IP greater than or equal to 160
if(x %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent', 'Win Percent','K percent', 'K-BB percent')){
mean_ip <- round(mean(pitch.plt.df$`Innings Pitched`),0)
pitch.plt.df <- subset(pitch.plt.df,pitch.plt.df$`Innings Pitched`>=mean_ip)
} else {
pitch.plt.df <- pitch.plt.df
}
# find which column number the statistic variable is in the dataframe
num <- which( colnames(pitch.plt.df)==x)
# if statistic is a ratio, reverse order
if(x %in% c('ERA', 'WHIP','BABIP','FIP','HR per 9','R per 9','Hits per 9','BB per 9','BB percent')){
pitch.plt.df <- top_n(pitch.plt.df, n=y, -pitch.plt.df[num])
} else {
pitch.plt.df <- top_n(pitch.plt.df, n=y, pitch.plt.df[num])
}
# condense dataframe down to 2 columns: name & position, the statistic variable column
pitch.plt.df <- pitch.plt.df[c(39,as.numeric(num))]
#rename columns to x,y for easier plotting
colnames(pitch.plt.df) <- c("pl.x","pl.y")
pitch.plt.df <- filter(pitch.plt.df, pl.y != 0)
|
169957d422c2df2d60759c1e7a5a5860d2f3f31a
|
913b69cf5afb21e845ba605193b17da7114afed0
|
/h18/h17/Gruppe 2/prove/make_data.R
|
9670f497c33bafe7511126fddfa760a0ff083353
|
[] |
no_license
|
langoergen/stv4020aR
|
0751b2a1d8caad6e0aeec3d989ecc22ecda175eb
|
73ceed813d52015fe01cdedd0f75cd614c93f87d
|
refs/heads/master
| 2021-06-26T05:59:49.103342
| 2020-08-17T10:46:12
| 2020-08-17T10:46:12
| 201,868,165
| 0
| 7
| null | 2019-08-16T12:59:01
| 2019-08-12T06:08:59
|
HTML
|
UTF-8
|
R
| false
| false
| 2,051
|
r
|
make_data.R
|
data("steak_survey")
write.csv(steak_survey, file = "./prove/steak_survey.csv", row.names = FALSE)
rm(list = ls())
steak_survey <- read.csv("./prove/steak_survey.csv", stringsAsFactors = FALSE)
head(steak_survey)
table(steak_survey$hhold_income)
steak_survey$hhold_income <- ifelse(steak_survey$hhold_income == "$0 - $24,999", sample(0:24999, length(which(steak_survey$hhold_income == "$0 - $24,999"))),
ifelse(steak_survey$hhold_income == "$100,000 - $149,999", sample(100:149999, length(which(steak_survey$hhold_income == "$100,000 - $149,999"))),
ifelse(steak_survey$hhold_income == "$150,000+", sample(150000:200000, length(which(steak_survey$hhold_income == "$150,000+"))),
ifelse(steak_survey$hhold_income == "$25,000 - $49,999", sample(25000:49999, length(which(steak_survey$hhold_income == "$25,000 - $49,999"))),
ifelse(steak_survey$hhold_income == "$50,000 - $99,999", sample(50000:99999, length(which(steak_survey$hhold_income == "$50,000 - $99,999"))), NA)))))
steak_survey$age <- ifelse(steak_survey$age == "18-29", sample(18:29, length(which(steak_survey$age == "18-29")), replace = TRUE),
ifelse(steak_survey$age == "30-44", sample(30:44, length(which(steak_survey$age == "30-44")), replace = TRUE),
ifelse(steak_survey$age == "45-60", sample(45:60, length(which(steak_survey$age == "45-60")), replace = TRUE),
ifelse(steak_survey$age == "> 60", sample(61:90, length(which(steak_survey$age == "> 60")), replace = TRUE), NA))))
steak_survey$smoke <- ifelse(steak_survey$smoke == TRUE, 1, 0)
steak_survey$alcohol <- ifelse(steak_survey$alcohol == TRUE, 1, 0)
steak_survey <- steak_survey[, c("respondent_id", "steak_prep", "hhold_income", "age", "smoke", "alcohol")]
write.csv(steak_survey, file = "./prove/steak_survey.csv", row.names = FALSE)
|
9c4a12d59afc6b47a7f6474f70b4ba647b874985
|
506df9cb5e97c1fbf4586e3d2f504db1e283eec4
|
/man/eeg_average.Rd
|
2fb9cf6228c906eef0d6a2559a07b9367f2cb7f4
|
[
"MIT"
] |
permissive
|
erzk/eegUtils
|
713dea18df6a38d24c739b402b95b86f74cfed5c
|
c646601fb69f689aa357f6fb21f7e76f52ab849f
|
refs/heads/master
| 2021-09-12T23:22:01.404787
| 2018-04-22T15:40:43
| 2018-04-22T15:40:43
| 110,753,388
| 0
| 0
| null | 2017-11-14T22:41:58
| 2017-11-14T22:41:57
| null |
UTF-8
|
R
| false
| true
| 660
|
rd
|
eeg_average.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_modifiers.R
\name{eeg_average}
\alias{eeg_average}
\alias{eeg_average.eeg_epochs}
\title{Calculate averages (e.g. ERPs)}
\usage{
eeg_average(data, ...)
\method{eeg_average}{eeg_epochs}(data, ...)
}
\arguments{
\item{data}{An \code{eeg_epochs} object.}
\item{...}{Other arguments passed to the averaging functions}
}
\description{
Calculate averages (e.g. ERPs)
Create an eeg_evoked objects from eeg_epochs
}
\section{Methods (by class)}{
\itemize{
\item \code{eeg_epochs}: Create evoked data from \code{eeg_epochs}
}}
\author{
Matt craddock \email{matt@mattcraddock.com}
}
|
5bd01b4cd8d38e10a3970aa2f8a80cb9dad29164
|
07a74984bf59ce4486e1bcaefafb8ce692b50d5a
|
/tests/testthat/test-layer_pointcloud.R
|
f2aed9ea8ad4138fe78fe44a62423e3d0a50ba33
|
[] |
no_license
|
SymbolixAU/mapdeck
|
c3bc3a61b8d8ade69b9b67fa69a00f9294281630
|
6138c6845e37ab3479e4ff65d9b0fff29e20f070
|
refs/heads/master
| 2023-09-03T22:34:43.418728
| 2023-08-24T22:14:59
| 2023-08-24T22:14:59
| 141,350,341
| 344
| 50
| null | 2023-08-09T22:22:59
| 2018-07-17T22:06:34
|
HTML
|
UTF-8
|
R
| false
| false
| 3,037
|
r
|
test-layer_pointcloud.R
|
context("pointcloud")
test_that("add_pointcloud accepts multiple objects", {
# testthat::skip_on_cran()
# library(sfheaders)
#
# geo <- '{"elevation":12345.0,"fill_colour":[68.0,1.0,84.0,255.0],"lon":69.11,"lat":34.28,"geometry":[69.11,34.28,12345.0]}'
# poly <- '[{\"elevation\":123.0,\"fill_colour\":\"#440154FF\",\"polyline\":\"_ifpEo`ydL\"}]'
#
# check <- function( geo, res ) {
# geo <- jsonify::from_json( geo )
# res <- jsonify::from_json( res$x$calls[[1]]$args[[2]] )
# expect_equal(geo[["lon"]], res[["lon"]])
# expect_equal(geo[["lat"]], res[["lat"]])
# expect_equal(geo[["fill_colour"]], res[["fill_colour"]])
# expect_equal(geo[["stroke_colour"]], res[["stroke_colour"]])
# expect_equal(geo[["stroke_width"]], res[["stroke_width"]])
# }
#
# ## sf
# set_token("abc")
# m <- mapdeck()
#
# df <- capitals[1, ]
# df$elev <- 12345
# sf <- sfheaders::sf_point( df[1, ], x = "lon", y = "lat", z = "elev" )
# p <- add_pointcloud(map = m, data = sf)
# check( geo, p )
#
# ## sfencoded
# enc <- googlePolylines::encode( sf )
# enc$z <- 123
# p <- add_pointcloud( map = m, data = enc, elevation = "z" )
# check( poly, p )
#
# ## sfencodedLite
# enc <- googlePolylines::encode( sf, strip = T )
# enc$z <- 123
# p <- add_pointcloud( map = m, data = enc, elevation = "z" )
# check( poly, p )
#
# ## data.frame with polyline
# df <- as.data.frame( enc )
# df$geometry <- unlist( df$geometry )
# p <- add_pointcloud( map = m, data = df, elevation = "z", polyline = "geometry")
# check( poly, p )
#
# ## data.frame
# df <- capitals[1, ]
# df$z <- 12345
# geo <- '{"elevation":12345.0,"fill_colour":[68.0,1.0,84.0,255.0],"lon":69.11,"lat":34.28,"geometry":[69.11,34.28,12345.0]}'
# p <- add_pointcloud( map = m, data = df, lon = "lon", lat = "lat", elevation = "z" )
# check( geo, p )
})
test_that("pointcloud reads elevation from sf Z attribute", {
# geo <- '[{"type":"Feature","properties":{},"geometry":{"type":"Point","coordinates":[0,0,1]}},{"type":"Feature","properties":{},"geometry":{"type":"Point","coordinates":[0,0,2]}}]'
# sf <- geojsonsf::geojson_sf( geo )
# # mapdeck:::resolve_data( sf, list(), "POINT" )
#
# check <- function( geo, res ) {
# geo <- jsonify::from_json( geo )
# res <- jsonify::from_json( res )
# expect_equal(geo[["lon"]], res[["lon"]])
# expect_equal(geo[["lat"]], res[["lat"]])
# expect_equal(geo[["fill_colour"]], res[["fill_colour"]])
# expect_equal(geo[["stroke_colour"]], res[["stroke_colour"]])
# expect_equal(geo[["stroke_width"]], res[["stroke_width"]])
# }
#
# l <- list()
# l[["palette"]] <- "viridis"
# l[["legend"]] <- FALSE
# l[["geometry"]] <- "geometry"
# geometry_column <- list( geometry = c("lon","lat","elevation") )
# shape <- mapdeck:::rcpp_point_sf_columnar( sf, l, geometry_column, digits = 6, "pointcloud" )
# js <- '{"elevation":[1.0,2.0],"fill_colour":[68.0,1.0,84.0,255.0,68.0,1.0,84.0,255.0],"lat":[0.0,0.0],"lon":[0.0,0.0],"geometry":[0.0,0.0,1.0,0.0,0.0,2.0]}'
# check( js, shape$data )
})
|
64fa441725dbd851f9ec8b75489438f138c94d78
|
92befee27f82e6637c7ed377890162c9c2070ca9
|
/R/sirt_sup.R
|
17932671a8e412ea8a7dd5c7e5e56e2fc0e3bbbf
|
[] |
no_license
|
alexanderrobitzsch/sirt
|
38e72ec47c1d93fe60af0587db582e5c4932dafb
|
deaa69695c8425450fff48f0914224392c15850f
|
refs/heads/master
| 2023-08-31T14:50:52.255747
| 2023-08-29T09:30:54
| 2023-08-29T09:30:54
| 95,306,116
| 23
| 11
| null | 2021-04-22T10:23:19
| 2017-06-24T15:29:20
|
R
|
UTF-8
|
R
| false
| false
| 129
|
r
|
sirt_sup.R
|
## File Name: sirt_sup.R
## File Version: 0.01
## sort(unique(paste(x)))
sirt_sup <- function(x)
{
sort(unique(paste(x)))
}
|
195be53b6361eda521f6470cc43bfea0ab7d597a
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/vottrans/R/vottrans.R
|
ec4b346864cdcf44d42b326dc7901b3eb5f6c286
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,870
|
r
|
vottrans.R
|
vottrans <-
function(Ro,Rn,v=1,nw=FALSE){
X2<-Ro; Y2<-Rn
# Ro beinhaltet die Ergebnisse der Vergleichswahl in Absolutzahlen; Rn die der aktuellen Wahl; die erste Spalte muss die Anzahl der Wahlberechtigten enthalten
N1<-nrow(X2); I<-ncol(X2)-1; J<- ncol(Y2)-1
# N= Anzahl der Gemeinden/Sprengel, I= Anzahl der Parteien bei der Vergleichswahl; I=Anzahl der Parteien bei er aktuellen Wahl
if(nw==TRUE){Y2[,(I+1)]<-Y2[,(I+1)]+Y2[,1]-X2[,1]}
#Option, welche die Differenz der Wahlberechtigten zur Anzahl der NW bei der aktuellen Wahl addiert
loeschen<-function(A){k<-1; b<-ncol(A); while(k<(N1+1)){if(A[k,b]<0){A<-A[-k,]; N1<-N1-1} else{k<-k+1}};return(A)}
# Loescht Eintraege mit negativen Werten fuer Nichtwaehler
X<-loeschen(X2)
Y<-loeschen(Y2)
N<-nrow(X)
Xabs<-X[,-1];Yabs<-Y[,-1];
#Erstellt Matrix, welche die Ergebnisse ohne die erste Spalte enthaelt
Diag<-function(X){E<-X[,1]; DD<- matrix(0,N,N) ; for(i in 1:N){DD[i,i]<-E[i]};return(DD)}
# erstellt eine Diagonalmatrix deren Eintraege die Anzahl der Wahlberechtigten sind
DD<-Diag(Y)
Xpro<-solve(Diag(X))%*% Xabs
Ypro<-solve(Diag(Y))%*% Yabs
#berechnet die Ergebnismatrix in Prozent
teilcode<-function(A,i){r<-nrow(A); teilcode<-matrix(7,r,1); k<-1; while(k<i){teilcode<-cbind(teilcode, matrix(0,r,I)); k<-k+1}; teilcode<-cbind(teilcode,A); k<-0 ; while(k<(J-i)){teilcode<-cbind(teilcode,matrix(0,r,I));k<-k+1}; teilcode<-teilcode[,-1];return(teilcode)}
#erstellt zeilenweise Teile der geblockten Matrix DD
geblockt<-function(A){geblockt<-matrix(7,1,ncol(A)*J); k<-1; while(k<(J+1)){geblockt<-rbind(geblockt,teilcode(A,k)); k<-k+1};geblockt<-geblockt[-1,]; return(geblockt)}
#erstellt eine grosze geblockte Matrix mit den Elementen von 'teilcode'
Ygeb<-function(C){Ygeb<-c(1); k<-1; while(k<(J+1)){Ygeb<-matrix(rbind(Ygeb,matrix(c(C[,k]))),ncol=1); k<-k+1}; Ygeb<-Ygeb[-1,]; return(Ygeb)}
Y1<-Ygeb(Ypro)
# blockt die aktuellen Ergebnisse der einzelnen Parteien zu einem Spaltenvektor
summe<-function(F){summe<-c(1); ges<-sum(F[,1]); k<-2; while(k< (ncol(F)+1)){summe<-cbind(summe,sum(F[,k])/ges); k<-k+1}; summe<-summe[,-1]; return(summe)}
summealt<-summe(X)
summeneu<-summe(Y)
#berechnet die Gesamtergebnisse der Vorwahl der einzelnen Parteien
teila<-function(){teila<-matrix(7,I,1); k<-0; while(k<J){teila<-cbind(teila,diag(I)); k<-k+1}; teila<-teila[,-1]; return(teila)}
teilcode1<-function(i){teilcode1<-c(7); k<-1 ; while(k<i){teilcode1<-cbind(teilcode1, matrix(0,1,I)); k<-k+1}; teilcode1<-cbind(teilcode1, matrix(summealt,1)); k<-0; while(k<J-i){teilcode1<-cbind(teilcode1,matrix(0,1,I)); k<-k+1}; teilcode1<-teilcode1[,-1]; return(teilcode1)}
#erstellt ersten Teil der Nebenbedingungsmatrix Amat
teilb<-function(){teilb<-matrix(7,1,I*J); k<-1; while(k<(J+1)){teilb<-rbind(teilb,teilcode1(k)); k<-k+1}; teilb<-teilb[-1,]; return(teilb)}
#erstellt zweiten Teil der Nebenbedingungsmatrix Amat
Amat<-rbind(teila(),teilb(),diag(I*J))
bvec<-rbind(matrix(1,I,1),matrix(summeneu,ncol=1),matrix(0,I*J,1))
if(v ==1){vottrans<-solve.QP(geblockt(t(Xpro)%*%Xabs)*1e-9,t(Y1%*%geblockt(Xabs))*1e-9,t(Amat),bvec,meq=2*I)}
if(v ==2){vottrans<-solve.QP(geblockt(t(Xpro)%*%Xpro)*1e-9,t(Y1%*%geblockt(Xpro))*1e-9,t(Amat),bvec,meq=2*I)}
if(v ==3){vottrans<-solve.QP(geblockt(t(Xabs)%*%Xabs)*1e-9,t(Y1%*%geblockt(Xabs))*1e-9,t(Amat),bvec,meq=2*I)}
solution1<-vottrans[1]
teil<-function(A,n){teil<-c(1); for(k in (I*(n-1)+1):(I*n)){teil<-rbind(teil,A[[1]][k]); k<-k+1}; teil<-teil[-1,]}
gesamtf<-function(A){gesamtf<-matrix(7,I,1); for(k in 1:J){gesamtf<-cbind(gesamtf, teil(A,k)); k<-k+1}; gesamtf<-gesamtf[,-1]; return(gesamtf)}
gesamt<-gesamtf(solution1)
#waehlt die gewuenschte Loesung aus der Ausgabe von solve.QP aus und formt diese in eine Matrix mit den Uebergangswahrscheinlichkeiten um.
return(gesamt)
}
|
4627da3c72728cf0392630e73a982328f00b5f32
|
b4fac7f23ac3c51c7de3ae2a298f2385ac3a3879
|
/plot2.R
|
b1d499a2a2be2149a05f2c2f6a7fdfae64db1b99
|
[] |
no_license
|
JorisEekhout/ExData_Plotting1
|
414316e09a2aea13ab9180aa9c0e172813cd81d8
|
d12f54d37a1e15a86d181d5ab3a815343094644a
|
refs/heads/master
| 2020-12-27T05:23:17.480130
| 2015-04-12T18:03:37
| 2015-04-12T18:03:37
| 33,824,624
| 0
| 0
| null | 2015-04-12T16:37:12
| 2015-04-12T16:37:12
| null |
UTF-8
|
R
| false
| false
| 704
|
r
|
plot2.R
|
setwd("D:/personal//courses//data science//4. exploratory data analysis//assignments//week 1")
# 1. Reading Local Files
data <- read.table("./data/household_power_consumption.txt", sep = ";", header = TRUE, na.strings = "?")
# 2. Create subset from 2007-02-01 and 2007-02-02
dataSubset <- data[66637:69516, ]
# 3. Create plot 2
png(filename = "plot2.png", width = 480, height = 480)
plot(dataSubset$Global_active_power, ylab = "Global Active Power (kilowatts)", xlab = "", type = "n", axes = FALSE)
lines(dataSubset$Global_active_power)
ticks = c(1, 1440, 2880)
axis(side = 1, at = ticks, labels = c("Thu", "Fri", "Sat"))
axis(side = 2)
box(which = "plot", lty = "solid")
# Create png file
dev.off()
|
60cf0f903ad3f73498ad29feda0274b17b92cd18
|
53d7e351e21cc70ae0f2b746dbfbd8e2eec22566
|
/man/umxRAM.Rd
|
dc70fc31235f37e45cd1e96ecf4b62dbe8001b95
|
[] |
no_license
|
tbates/umx
|
eaa122285241fc00444846581225756be319299d
|
12b1d8a43c84cc810b24244fda1a681f7a3eb813
|
refs/heads/master
| 2023-08-31T14:58:18.941189
| 2023-08-31T09:52:02
| 2023-08-31T09:52:02
| 5,418,108
| 38
| 25
| null | 2023-09-12T21:09:45
| 2012-08-14T20:18:01
|
R
|
UTF-8
|
R
| false
| true
| 12,566
|
rd
|
umxRAM.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.R
\name{umxRAM}
\alias{umxRAM}
\title{Build and run path-based SEM models}
\usage{
umxRAM(
model = NA,
...,
data = NULL,
name = NA,
group = NULL,
group.equal = NULL,
suffix = "",
comparison = TRUE,
type = c("Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"),
weight = NULL,
allContinuousMethod = c("cumulants", "marginals"),
autoRun = getOption("umx_auto_run"),
tryHard = c("no", "yes", "ordinal", "search"),
std = FALSE,
refModels = NULL,
remove_unused_manifests = TRUE,
independent = NA,
setValues = TRUE,
optimizer = NULL,
verbose = FALSE,
std.lv = FALSE,
lavaanMode = c("sem", "lavaan"),
printTab = FALSE
)
}
\arguments{
\item{model}{A model to update (or set to string to use as name for new model)}
\item{...}{umxPaths, mxThreshold objects, etc.}
\item{data}{data for the model. Can be an \code{\link[=mxData]{mxData()}} or a data.frame}
\item{name}{A friendly name for the model}
\item{group}{(optional) Column name to use for a multi-group model (default = NULL)}
\item{group.equal}{In multi-group models, what to equate across groups (default = NULL: all free)}
\item{suffix}{String to append to each label (useful if model will be used in a multi-group model)}
\item{comparison}{Compare the new model to the old (if updating an existing model: default = TRUE)}
\item{type}{One of "Auto", "FIML", "cov", "cor", "WLS", "DWLS", "ULS"}
\item{weight}{Passes weight values to mxData}
\item{allContinuousMethod}{"cumulants" or "marginals". Used in all-continuous WLS data to determine if a means model needed.}
\item{autoRun}{Whether to run the model (default), or just to create it and return without running.}
\item{tryHard}{Default ('no') uses normal mxRun. "yes" uses mxTryHard. Other options: "ordinal", "search"}
\item{std}{Whether to show standardized estimates, raw (NULL print fit only)}
\item{refModels}{pass in reference models if available. Use FALSE to suppress computing these if not provided.}
\item{remove_unused_manifests}{Whether to remove variables in the data to which no path makes reference (defaults to TRUE)}
\item{independent}{Whether the model is independent (default = NA)}
\item{setValues}{Whether to generate likely good start values (Defaults to TRUE)}
\item{optimizer}{optionally set the optimizer (default NULL does nothing)}
\item{verbose}{Whether to tell the user what latents and manifests were created etc. (Default = FALSE)}
\item{std.lv}{Whether to auto standardize latent variables when using string syntax (default = FALSE)}
\item{lavaanMode}{Defaults when building out string syntax default = "sem" (alternative is "lavaan", with very few defaults)}
\item{printTab}{(for string input, whether to output a table of paths (FALSE)}
}
\value{
\itemize{
\item \code{\link[=mxModel]{mxModel()}}
}
}
\description{
\code{umxRAM} expedites creation of structural equation models, still without doing invisible things to the model. It
supports \code{\link[=umxPath]{umxPath()}}. To support cross-language sharing and science learning, \code{umxRAM} also supports lavaan model strings.
Here's a path example that models miles per gallon (mpg) as a function of weight (wt) and engine displacement (disp)
using the widely used \code{mtcars} data set.
\if{html}{\out{<div class="sourceCode">}}\preformatted{m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(v.m. = c("wt", "disp", "mpg"))
)
}\if{html}{\out{</div>}}
As you can see, most of the work is done by \code{\link[=umxPath]{umxPath()}}. \code{umxRAM} wraps these paths up, takes the \verb{data =} input, and
then internally sets up all the labels and start values for the model, runs it, and calls \code{\link[=umxSummary]{umxSummary()}}, and \code{\link[=plot.MxModel]{plot.MxModel()}}.
Try it, or one of the several models in the examples at the bottom of this page.
A common error is to include data in the main list, a bit like
saying \code{lm(y ~ x + df)} instead of \code{lm(y ~ x, data = df)}.
\strong{nb}: Because it uses the presence of a variable in the data to detect if a variable is latent or not, \code{umxRAM} needs data at build time.
\strong{String Syntax}
Here is an example using lavaan syntax (for more, see \code{\link[=umxLav2RAM]{umxLav2RAM()}})
\if{html}{\out{<div class="sourceCode R">}}\preformatted{m1 = umxRAM("mpg ~ wt + disp", data = mtcars)
}\if{html}{\out{</div>}}
\strong{Sketch mode}
If you are at the "sketching" stage of theory consideration, \code{umxRAM} supports
setting data to a simple vector of manifest names.
As usual in \code{umxRAM}, any variables you refer to that are not in data are treated as latents.
\if{html}{\out{<div class="sourceCode R">}}\preformatted{m1 = umxRAM("sketch", data = c("A", "B"),
umxPath("C", to = c("A", "B"), values=.3),
umxPath("A", with = "B", values=.45),
umxPath(v.m. = c("A", "B")),
umxPath(v1m0 = "C")
)
plot(m1, means = FALSE)
}\if{html}{\out{</div>}}
Will create this figure:
\if{html}{\figure{sketch.png}{options: alt="Figure: sketch.png"}}
\if{latex}{\figure{sketch.pdf}{options: width=7cm}}
}
\details{
\strong{Comparison for OpenMx users}
\code{umxRAM} differs from \code{\link[OpenMx:mxModel]{OpenMx::mxModel()}} in the following ways:
\enumerate{
\item You don't need to set type = "RAM".
\item You don't need to list manifestVars (they are detected from path usage).
\item You don't need to list latentVars (detected as anything in paths but not in \code{mxData}).
\item You don't need to create mxData when you already have a data.frame.
\item You add data with \verb{data = } (as elsewhere in R, e.g. \code{\link[=lm]{lm()}}).
\item You don't need to add labels: paths are automatically labelled "a_to_b" etc.
\item You don't need to set start values, they will be done for you.
\item You don't need to \code{mxRun} the model: it will run automatically, and print a summary.
\item You don't need to run \code{summary}: with \code{autoRun=TRUE}, it will print a summary.
\item You get a plot of the model with estimates on the paths, including multiple groups.
\item Less typing: \code{\link[=umxPath]{umxPath()}} offers powerful verbs to describe paths.
\item Supports a subset of lavaan string input.
}
\strong{Start values}. Currently, manifest variable means are set to the observed means,
residual variances are set to 80\% of the observed variance of each variable,
and single-headed paths are set to a positive starting value (currently .9).
\emph{note}: The start-value strategy is subject to improvement, and will be documented in the help for \code{\link[=umxRAM]{umxRAM()}}.
\strong{Comparison with other software}
Some SEM software does a lot of behind-the-scenes defaulting and path addition.
If you want this, I'd say use \code{umxRAM} with lavaan string input.
}
\examples{
\dontrun{
# ============================================
# = 1. Here's a simple example with raw data =
# ============================================
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars,
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
# 2. Use parameters to see the parameter estimates and labels
parameters(m1)
# And umxSummary to get standardized parameters, CIs etc from the run model.
umxSummary(m1, std=TRUE)
# |name | Std.Estimate| Std.SE|CI |
# |:--------------|------------:|------:|:--------------------|
# |wt_to_mpg | -0.54| 0.17|-0.54 [-0.89, -0.2] |
# |disp_to_mpg | -0.36| 0.18|-0.36 [-0.71, -0.02] |
# |mpg_with_mpg | 0.22| 0.07|0.22 [0.08, 0.35] |
# |wt_with_wt | 1.00| 0.00|1 [1, 1] |
# |b1 | 0.89| 0.04|0.89 [0.81, 0.96] |
# |disp_with_disp | 1.00| 0.00|1 [1, 1] |
# 3. Of course you can plot the model
plot(m1)
plot(m1, std=TRUE, means=FALSE)
plot(m1, std = TRUE, means=FALSE, strip= TRUE, resid = "line")
# ===============================================
# = lavaan string example (more at ?umxLav2RAM) =
# ===============================================
m1 = umxRAM(data = mtcars, "#modelName
mpg ~ wt + disp")
# =======================
# = A multi-group model =
# =======================
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars, group = "am",
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
# In this model, all parameters are free across the two groups.
# ====================================
# = A cov model, with steps laid out =
# ====================================
# *note*: The variance of displacement is in cubic inches and is very large.
# to help the optimizer, one might, say, multiply disp *.016 to work in litres
tmp = mtcars; tmp$disp= tmp$disp *.016
# We can just give the raw data and ask for it to be made into type cov:
m1 = umxRAM("tim", data = tmp, type="cov",
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("mpg", "wt", "disp"))
)
# (see ?umxPath for more nifty options making paths...)
# =========================================
# = umxRAM can also accept mxData as data =
# =========================================
# For convenience, list up the manifests you will be using
selVars = c("mpg", "wt", "disp")
tmp = mtcars; tmp$disp= tmp$disp *.016
myCov = mxData(cov(tmp[, selVars]), type = "cov", numObs = nrow(mtcars) )
m1 = umxRAM("tim", data = myCov,
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = selVars)
)
# =======================
# = umxRAM supports WLS =
# =======================
# 1. Run an all-continuous WLS model
mw = umxRAM("raw", data = mtcars[, c("mpg", "wt", "disp")],
type = "WLS", allContinuousMethod = "cumulants",
umxPath(var = c("wt", "disp", "mpg")),
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("wt", "disp", "mpg"))
)
# 2. Switch to marginals to support means
mw = umxRAM("raw", data = mtcars[, c("mpg", "wt", "disp")],
type = "WLS", allContinuousMethod= "marginals",
umxPath(var = c("wt", "disp", "mpg")),
umxPath(c("wt", "disp"), to = "mpg"),
umxPath("wt", with = "disp"),
umxPath(var = c("wt", "disp", "mpg"))
)
# ===============================
# = Using umxRAM in Sketch mode =
# ===============================
# No data needed: just list variable names!
# Resulting model will be plotted automatically
m1 = umxRAM("what does unique pairs do, I wonder", data = c("A", "B", "C"),
umxPath(unique.pairs = c("A", "B", "C"))
)
m1 = umxRAM("ring around the rosey", data = c("B", "C"),
umxPath(fromEach = c("A", "B", "C"))
)
m1 = umxRAM("fromEach with to", data = c("B", "C"),
umxPath(fromEach = c("B", "C"), to= "D")
)
m1 = umxRAM("CFA_sketch", data = paste0("x", 1:4),
umxPath("g", to = paste0("x", 1:4)),
umxPath(var = paste0("x", 1:4)),
umxPath(v1m0 = "g")
)
# =================================================
# = This is an example of using your own labels: =
# umxRAM will not over-ride them =
# =================================================
m1 = umxRAM("tim", data = mtcars, type="cov",
umxPath(c("wt", "disp"), to = "mpg"),
umxPath(cov = c("wt", "disp"), labels = "b1"),
umxPath(var = c("wt", "disp", "mpg"))
)
omxCheckEquals(m1$S$labels["disp", "wt"], "b1") # label preserved
m1$S$labels
# mpg wt disp
# mpg "mpg_with_mpg" "mpg_with_wt" "disp_with_mpg"
# wt "mpg_with_wt" "wt_with_wt" "b1"
# disp "disp_with_mpg" "b1" "disp_with_disp"
parameters(m1)
# ===========
# = Weights =
# ===========
# !!! Not tested !!!
mtcars$litres = mtcars$disp/61.02
m1 = umxRAM("tim", data = mtcars, weight= "cyl",
umxPath(c("wt", "litres"), to = "mpg"),
umxPath("wt", with = "litres"),
umxPath(v.m. = c("wt", "litres", "mpg"))
)
}
}
\references{
\itemize{
\item \url{https://tbates.github.io}, \url{https://github.com/tbates/umx}
}
}
\seealso{
\code{\link[=umxPath]{umxPath()}}, \code{\link[=umxSummary]{umxSummary()}}, \code{\link[=plot]{plot()}}, \code{\link[=parameters]{parameters()}}, \code{\link[=umxSuperModel]{umxSuperModel()}}, \code{\link[=umxLav2RAM]{umxLav2RAM()}}
Other Core Model Building Functions:
\code{\link{umxMatrix}()},
\code{\link{umxModify}()},
\code{\link{umxPath}()},
\code{\link{umxSuperModel}()},
\code{\link{umx}}
}
\concept{Core Model Building Functions}
|
4cf68b4394ca99235345c58a1247bb6be6858609
|
544c53900a8e9d428129d5d5d315ae6a0af15588
|
/minutes.R
|
2bb2c68f5f3c57f4fd72d0f6c21c7ac992329d90
|
[] |
no_license
|
andrewwoods1/kaggle-football-events
|
d5b2607822359b96a321375b6b038de4ec6ff600
|
51b42c162f07a1ad1bd3ff650d107c6d52e18e99
|
refs/heads/master
| 2020-03-19T07:05:58.415900
| 2018-06-19T11:42:34
| 2018-06-19T11:42:34
| 136,085,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,050
|
r
|
minutes.R
|
library(ggplot2)
library(MASS)
library(dplyr)
df = read.csv(file.choose())
names(df)
#Home shots and time between them
soth = subset(df,df$event_type == 1 & df$side == 1)
soth = soth %>%
arrange(id_odsp)
wait_time = aggregate(soth$time,by = list(soth$id_odsp,soth$side),FUN = diff)
wait_time = wait_time %>%
arrange(Group.1)
wait_diff = c()
for (x in wait_time$Group.1){
wait_diff = c(wait_diff,'first',wait_time$x[wait_time$Group.1 == x])
}
wait_diff = unlist(wait_diff)
soth$wait_diff = wait_diff
soth$wait_diff[soth$wait_diff < 0] = 0
soth$wait_diff[soth$wait_diff == "first"] = soth$time[soth$wait_diff == "first"]
soth$wait_diff = as.numeric(soth$wait_diff)
summary(soth$wait_diff)
# Away shots
sota = subset(df,df$event_type == 1 & df$side == 2)
sota = sota %>%
arrange(id_odsp)
wait_time = aggregate(sota$time,by = list(sota$id_odsp,sota$side),FUN = diff)
wait_time = wait_time %>%
arrange(Group.1)
wait_diff = c()
for (x in wait_time$Group.1){
wait_diff = c(wait_diff,'first',wait_time$x[wait_time$Group.1 == x])
}
wait_diff = unlist(wait_diff)
sota$wait_diff = wait_diff
sota$wait_diff[sota$wait_diff < 0] = 0
sota$wait_diff[sota$wait_diff == "first"] = sota$time[sota$wait_diff == "first"]
sota$wait_diff = as.numeric(sota$wait_diff)
summary(sota$wait_diff)
# Combine the two
sot = rbind(soth,sota)
summary(sot$wait_diff)
# Analyse
#Look at correlation and distribution of times in each category
mean(sot$wait_diff[sot$is_goal == 1])
mean(sot$wait_diff[sot$is_goal == 0])
sot$bin = cut(sot$wait_diff,breaks = c(0,10,20,30,40,50,100),right = FALSE)
conv = sot %>%
group_by(bin) %>%
summarise(Conversion_Rate = mean(is_goal), count = n())
ggplot(data = conv, aes(x = bin,y = conv$Conversion_Rate)) +
geom_bar(stat = "identity",width = 0.7, fill = "#0B75B6") + coord_flip() +
ylab("Shot Conversion Rate") + xlab("Time since previous shot") +
geom_text(aes(label=count), vjust=0.3,hjust =1.2)
t.test(sot$is_goal[sot$wait_diff > 20],sot$is_goal[sot$wait_diff <= 20],
alternative = "g")
|
fab72526dfbf3486fcfd534f8f168f57fe3e213a
|
3c5e7ebcf26cefac18b82a3637eb0e13e865f1b2
|
/man/insertImage.Rd
|
9558c2c54a05ff4a048d07385277ea311cc9bff3
|
[] |
no_license
|
david-f1976/openxlsx
|
5e9e190b9b90b3eb7a6d971c32324f6232edfdea
|
4078efbcf5a7cab93b2988827fef6c76169457bf
|
refs/heads/master
| 2020-05-18T13:41:25.641064
| 2019-11-08T12:36:05
| 2019-11-08T12:36:05
| 184,448,650
| 1
| 0
| null | 2019-05-01T16:47:35
| 2019-05-01T16:47:35
| null |
UTF-8
|
R
| false
| true
| 1,476
|
rd
|
insertImage.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wrappers.R
\name{insertImage}
\alias{insertImage}
\title{Insert an image into a worksheet}
\usage{
insertImage(wb, sheet, file, width = 6, height = 3, startRow = 1,
startCol = 1, units = "in", dpi = 300)
}
\arguments{
\item{wb}{A workbook object}
\item{sheet}{A name or index of a worksheet}
\item{file}{An image file. Valid file types are: jpeg, png, bmp}
\item{width}{Width of figure.}
\item{height}{Height of figure.}
\item{startRow}{Row coordinate of upper left corner of the image}
\item{startCol}{Column coordinate of upper left corner of the image}
\item{units}{Units of width and height. Can be "in", "cm" or "px"}
\item{dpi}{Image resolution used for conversion between units.}
}
\description{
Insert an image into a worksheet
}
\examples{
## Create a new workbook
wb <- createWorkbook("Ayanami")
## Add some worksheets
addWorksheet(wb, "Sheet 1")
addWorksheet(wb, "Sheet 2")
addWorksheet(wb, "Sheet 3")
## Insert images
img <- system.file("extdata","einstein.jpg", package = "openxlsx")
insertImage(wb, "Sheet 1", img, startRow = 5, startCol = 3, width = 6, height = 5)
insertImage(wb, 2, img, startRow = 2, startCol = 2)
insertImage(wb, 3 , img, width = 15, height = 12, startRow = 3, startCol = "G", units = "cm")
## Save workbook
\dontrun{saveWorkbook(wb, "insertImageExample.xlsx", overwrite = TRUE)}
}
\seealso{
\code{\link{insertPlot}}
}
\author{
Alexander Walker
}
|
9526fa32725c3610f121a918faac61a2d8edc081
|
77d7be56771c9ea750de7ea1118859287178fed0
|
/R/api_counter_v2_out.R
|
44e425987457184e994daf566c6991a70b68c251
|
[] |
no_license
|
namsor/namsor-r-sdk2
|
d1664c84ddd97dd3c657aeaeb2a5f9e807a9fdc1
|
835cb3e361581cb9369b8635fa784cdb05d43f91
|
refs/heads/master
| 2021-06-15T03:22:10.226568
| 2021-03-15T08:47:25
| 2021-03-15T08:47:25
| 168,223,476
| 12
| 1
| null | 2020-12-22T14:44:32
| 2019-01-29T20:28:30
|
R
|
UTF-8
|
R
| false
| false
| 7,477
|
r
|
api_counter_v2_out.R
|
# NamSor API v2
#
# NamSor API v2 : enpoints to process personal names (gender, cultural origin or ethnicity) in all alphabets or languages. Use GET methods for small tests, but prefer POST methods for higher throughput (batch processing of up to 100 names at a time). Need something you can't find here? We have many more features coming soon. Let us know, we'll do our best to add it!
#
# The version of the OpenAPI document: 2.0.10
# Contact: contact@namsor.com
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title APICounterV2Out
#' @description APICounterV2Out Class
#' @format An \code{R6Class} generator object
#' @field apiKey \link{APIKeyOut} [optional]
#'
#' @field apiService character [optional]
#'
#' @field createdDateTime integer [optional]
#'
#' @field totalUsage integer [optional]
#'
#' @field lastFlushedDateTime integer [optional]
#'
#' @field lastUsedDateTime integer [optional]
#'
#' @field serviceFeaturesUsage named list( integer ) [optional]
#'
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
APICounterV2Out <- R6::R6Class(
'APICounterV2Out',
public = list(
`apiKey` = NULL,
`apiService` = NULL,
`createdDateTime` = NULL,
`totalUsage` = NULL,
`lastFlushedDateTime` = NULL,
`lastUsedDateTime` = NULL,
`serviceFeaturesUsage` = NULL,
initialize = function(`apiKey`=NULL, `apiService`=NULL, `createdDateTime`=NULL, `totalUsage`=NULL, `lastFlushedDateTime`=NULL, `lastUsedDateTime`=NULL, `serviceFeaturesUsage`=NULL, ...){
local.optional.var <- list(...)
if (!is.null(`apiKey`)) {
stopifnot(R6::is.R6(`apiKey`))
self$`apiKey` <- `apiKey`
}
if (!is.null(`apiService`)) {
stopifnot(is.character(`apiService`), length(`apiService`) == 1)
self$`apiService` <- `apiService`
}
if (!is.null(`createdDateTime`)) {
stopifnot(is.numeric(`createdDateTime`), length(`createdDateTime`) == 1)
self$`createdDateTime` <- `createdDateTime`
}
if (!is.null(`totalUsage`)) {
stopifnot(is.numeric(`totalUsage`), length(`totalUsage`) == 1)
self$`totalUsage` <- `totalUsage`
}
if (!is.null(`lastFlushedDateTime`)) {
stopifnot(is.numeric(`lastFlushedDateTime`), length(`lastFlushedDateTime`) == 1)
self$`lastFlushedDateTime` <- `lastFlushedDateTime`
}
if (!is.null(`lastUsedDateTime`)) {
stopifnot(is.numeric(`lastUsedDateTime`), length(`lastUsedDateTime`) == 1)
self$`lastUsedDateTime` <- `lastUsedDateTime`
}
if (!is.null(`serviceFeaturesUsage`)) {
stopifnot(is.vector(`serviceFeaturesUsage`), length(`serviceFeaturesUsage`) != 0)
sapply(`serviceFeaturesUsage`, function(x) stopifnot(is.character(x)))
self$`serviceFeaturesUsage` <- `serviceFeaturesUsage`
}
},
toJSON = function() {
APICounterV2OutObject <- list()
if (!is.null(self$`apiKey`)) {
APICounterV2OutObject[['apiKey']] <-
self$`apiKey`$toJSON()
}
if (!is.null(self$`apiService`)) {
APICounterV2OutObject[['apiService']] <-
self$`apiService`
}
if (!is.null(self$`createdDateTime`)) {
APICounterV2OutObject[['createdDateTime']] <-
self$`createdDateTime`
}
if (!is.null(self$`totalUsage`)) {
APICounterV2OutObject[['totalUsage']] <-
self$`totalUsage`
}
if (!is.null(self$`lastFlushedDateTime`)) {
APICounterV2OutObject[['lastFlushedDateTime']] <-
self$`lastFlushedDateTime`
}
if (!is.null(self$`lastUsedDateTime`)) {
APICounterV2OutObject[['lastUsedDateTime']] <-
self$`lastUsedDateTime`
}
if (!is.null(self$`serviceFeaturesUsage`)) {
APICounterV2OutObject[['serviceFeaturesUsage']] <-
self$`serviceFeaturesUsage`
}
APICounterV2OutObject
},
fromJSON = function(APICounterV2OutJson) {
APICounterV2OutObject <- jsonlite::fromJSON(APICounterV2OutJson)
if (!is.null(APICounterV2OutObject$`apiKey`)) {
apiKeyObject <- APIKeyOut$new()
apiKeyObject$fromJSON(jsonlite::toJSON(APICounterV2OutObject$apiKey, auto_unbox = TRUE, digits = NA))
self$`apiKey` <- apiKeyObject
}
if (!is.null(APICounterV2OutObject$`apiService`)) {
self$`apiService` <- APICounterV2OutObject$`apiService`
}
if (!is.null(APICounterV2OutObject$`createdDateTime`)) {
self$`createdDateTime` <- APICounterV2OutObject$`createdDateTime`
}
if (!is.null(APICounterV2OutObject$`totalUsage`)) {
self$`totalUsage` <- APICounterV2OutObject$`totalUsage`
}
if (!is.null(APICounterV2OutObject$`lastFlushedDateTime`)) {
self$`lastFlushedDateTime` <- APICounterV2OutObject$`lastFlushedDateTime`
}
if (!is.null(APICounterV2OutObject$`lastUsedDateTime`)) {
self$`lastUsedDateTime` <- APICounterV2OutObject$`lastUsedDateTime`
}
if (!is.null(APICounterV2OutObject$`serviceFeaturesUsage`)) {
self$`serviceFeaturesUsage` <- ApiClient$new()$deserializeObj(APICounterV2OutObject$`serviceFeaturesUsage`, "map(integer)", loadNamespace("namsor"))
}
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`apiKey`)) {
sprintf(
'"apiKey":
%s
',
jsonlite::toJSON(self$`apiKey`$toJSON(), auto_unbox=TRUE, digits = NA)
)},
if (!is.null(self$`apiService`)) {
sprintf(
'"apiService":
"%s"
',
self$`apiService`
)},
if (!is.null(self$`createdDateTime`)) {
sprintf(
'"createdDateTime":
%d
',
self$`createdDateTime`
)},
if (!is.null(self$`totalUsage`)) {
sprintf(
'"totalUsage":
%d
',
self$`totalUsage`
)},
if (!is.null(self$`lastFlushedDateTime`)) {
sprintf(
'"lastFlushedDateTime":
%d
',
self$`lastFlushedDateTime`
)},
if (!is.null(self$`lastUsedDateTime`)) {
sprintf(
'"lastUsedDateTime":
%d
',
self$`lastUsedDateTime`
)},
if (!is.null(self$`serviceFeaturesUsage`)) {
sprintf(
'"serviceFeaturesUsage":
"%s"
',
jsonlite::toJSON(lapply(self$`serviceFeaturesUsage`, function(x){ x }), auto_unbox = TRUE, digits=NA)
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(APICounterV2OutJson) {
APICounterV2OutObject <- jsonlite::fromJSON(APICounterV2OutJson)
self$`apiKey` <- APIKeyOut$new()$fromJSON(jsonlite::toJSON(APICounterV2OutObject$apiKey, auto_unbox = TRUE, digits = NA))
self$`apiService` <- APICounterV2OutObject$`apiService`
self$`createdDateTime` <- APICounterV2OutObject$`createdDateTime`
self$`totalUsage` <- APICounterV2OutObject$`totalUsage`
self$`lastFlushedDateTime` <- APICounterV2OutObject$`lastFlushedDateTime`
self$`lastUsedDateTime` <- APICounterV2OutObject$`lastUsedDateTime`
self$`serviceFeaturesUsage` <- ApiClient$new()$deserializeObj(APICounterV2OutObject$`serviceFeaturesUsage`, "map(integer)", loadNamespace("namsor"))
self
}
)
)
|
2ead323af7d995d6093ea766d3248ca44a2a4d02
|
a35aa4ec643c9f777c6c32ee64739f7ce48e0a07
|
/man/fdlm_factor.Rd
|
1ca72e1e39c68621d56bdf3d78591b685d202feb
|
[] |
no_license
|
drkowal/FDLM
|
4ffcbfc383c8951dc37bf13289138f12fdc602f0
|
61132895f7e2296fee343f1ade62673ee1d6c361
|
refs/heads/master
| 2021-01-21T07:30:48.307553
| 2018-08-16T16:40:18
| 2018-08-16T16:40:18
| 91,615,677
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,339
|
rd
|
fdlm_factor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/component_samplers.R
\name{fdlm_factor}
\alias{fdlm_factor}
\title{Sample the dynamic factors}
\usage{
fdlm_factor(Y, sigma_et, Wt, Fmat = NULL, YF = NULL, Gt = NULL,
W0 = NULL, kfas_model = NULL, useFastImpute = FALSE)
}
\arguments{
\item{Y}{the \code{T x m} data observation matrix, where \code{T} is the number of time points and \code{m} is the number of observation points (\code{NA}s allowed)}
\item{sigma_et}{\code{T}- or \code{1}-dimensional vector of observation error standard deviation(s)}
\item{Wt}{\code{K x K} matrix or \code{K x K x T} array of evolution error covariances}
\item{Fmat}{\code{m x K} matrix of FLCs; only needed for \code{useFastImpute = FALSE}}
\item{YF}{\code{T x K} matrix of data \code{Y} projected onto FLCs, \code{Y\%*\%Fmat}, which takes the place
of \code{Y}; only needed for \code{useFastImpute = TRUE}}
\item{Gt}{\code{K x K} evolution matrix; if NULL, set as identity (for random walk)}
\item{W0}{\code{K x K} matrix of initial evolution error covariances; if NULL, set to diag(10^-4, K)}
\item{kfas_model}{\code{SSModel} object from KFAS package; if NULL, construct model w/in the sampler (might be slower!)}
\item{useFastImpute}{logical; when TRUE, use imputation/projection scheme for the dynamic factors; otherwise use full state space model for factors (slower)}
}
\value{
The \code{T x K} matrix of dynamic factors, \code{Beta}.
}
\description{
Sample the dynamic factors (latent state space variables) using the simulation smoothing
algorithm of Koopman and Durbin (2001), implemented in the KFAS package.
}
\note{
The sampler has two options: \code{useFastImpute = TRUE}, in which the response is
\code{YF = Y\%*\%Fmat} (\code{T x K}) and the observation matrix is the identity (\code{K x K});
and \code{useFastImpute = FALSE}, in which the response is \code{Y} (\code{T x m})
and the observation matrix is \code{Fmat} (\code{m x K}). Recall that typically \code{K < < m},
so \code{useFastImpute = TRUE} is often much faster.
}
\examples{
# Read in the yield curve data:
data("US_Yields")
# Restrict to dates since 2006:
Y = Y[which(dates > as.Date("2006-01-01")),];
# This is a simple (yet meaningless) example:
K = 3
Beta = fdlm_factor(Y, sigma_et = 1, Wt = diag(K), Fmat = diag(ncol(Y))[,1:K])
}
|
784a04368610d77715ebc99628d4756505e776cd
|
21118dec07c98c1acd3d5808742257b7e858d9d1
|
/cmp_YL_TopDom_results.R
|
c9849607b4d57d7c8a2f41d22ee76014d2c66b2e
|
[] |
no_license
|
marzuf/v2_TopDom_Cancer_HiC_data_TAD_DA
|
e4857463c7365adc6d8d5c43c15cf5e5c0b47138
|
f29f32512e23d64d88fe6836f62252ebcd4d8dd1
|
refs/heads/master
| 2022-12-16T07:58:45.325260
| 2020-09-09T15:05:43
| 2020-09-09T15:05:43
| 288,790,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,303
|
r
|
cmp_YL_TopDom_results.R
|
require(ggpubr)
require(ggsci)
outFolder <- "CMP_YL_TOPDOM_RESULTS"
dir.create(outFolder, recursive = TRUE)
myHeight <- 5
myWidth <- 7
plotType <- "svg"
plotTypeP <- "png"
myHeightP <- myWidthP <- 400
plotCex <- 1.2
# Rscript cmp_YL_TopDom_results.R
source("../Cancer_HiC_data_TAD_DA/utils_fct.R")
td_folder <- "v2_TopDom_Cancer_HiC_data_TAD_DA"
yl_folder <- "v2_Yuanlong_Cancer_HiC_data_TAD_DA"
hicds <- "ENCSR489OCU_NCI-H460_40kb"
exprds <- "TCGAluad_norm_luad"
setDir <- "/media/electron"
setDir <- ""
############################################################################### COMPARE TAD P-VALUES
td_pvals <- get(load(file.path(setDir,
"/mnt/etemp/marie", td_folder,
"PIPELINE/OUTPUT_FOLDER", hicds, exprds, "11sameNbr_runEmpPvalCombined/emp_pval_combined.Rdata")))
td_pvals <- p.adjust(td_pvals, method="BH")
yl_pvals <- get(load(file.path(setDir,
"/mnt/etemp/marie", yl_folder,
"PIPELINE/OUTPUT_FOLDER", hicds, exprds, "11sameNbr_runEmpPvalCombined/emp_pval_combined.Rdata")))
yl_pvals <- p.adjust(yl_pvals, method="BH")
pvals_dt <- data.frame(
pval_log10 = c(-log10(td_pvals), -log10(yl_pvals)),
tads_data = c(rep("TopDom", length(td_pvals)), rep("YL", length(yl_pvals))),
stringsAsFactors =FALSE
)
legTitle <- "TAD caller"
plotTit <- "Comparison TAD adj. p-vals."
my_cols <- c(pal_jama()(5)[c(3, 2)])
signifThresh <- 0.01
tmp1 <- by(pvals_dt, pvals_dt$tads_data, function(x) sum(x$pval_log10 >= -log10(signifThresh)))
tmp2 <- by(pvals_dt, pvals_dt$tads_data, function(x) length(x$pval_log10))
mySub <- paste0("# TADs p-val <= ", signifThresh, ": ", paste0(names(tmp1), "= ", as.numeric(tmp1), "/", as.numeric(tmp2), collapse="; "))
p_pvals <- ggdensity(pvals_dt,
title="TAD pvals [-log10]",
x = "pval_log10",
y = "..density..",
# combine = TRUE, # Combine the 3 plots
xlab = "TAD p-val [-log10]",
# add = "median", # Add median line.
rug = FALSE, # Add marginal rug
color = "tads_data",
fill = "tads_data",
palette = "jco"
) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))+
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_color_manual(values=my_cols)+
ggtitle(plotTit, subtitle = mySub)+
scale_fill_manual(values=my_cols) +
labs(color=paste0(legTitle),fill=paste0(legTitle), y="Density") +
theme( plot.title = element_text(hjust=0.5, size = 16, face="bold"),
plot.subtitle = element_text(hjust=0.5, size = 14, face="italic"))
outFile <- file.path(outFolder, paste0("cmp_YL_TopDom_TADpvals_density.", plotType))
ggsave(p_pvals, file=outFile, height=myHeight, width=myWidth)
cat(paste0("... written: ", outFile, "\n"))
check_dt <- get(load(file.path(setDir,
"/mnt/etemp/marie", yl_folder,
"CREATE_FINAL_TABLE//all_result_dt.Rdata")))
stopifnot(sum(check_dt$adjPvalComb[check_dt$hicds == hicds & check_dt$exprds == exprds] <= signifThresh) == tmp1["YL"])
# GENE_RANK_TAD_RANK/all_gene_tad_signif_dt.Rdata
gt_td_dt <- get(load(file.path("GENE_RANK_TAD_RANK/all_gene_tad_signif_dt.Rdata")))
gt_yl_dt <- get(load(file.path("..", yl_folder, "GENE_RANK_TAD_RANK/all_gene_tad_signif_dt.Rdata")))
gt_dt <- merge(gt_td_dt, gt_yl_dt, all.x=F, all.y=F,by=c("hicds", "exprds", "entrezID"), suffixes=c("_td", "_yl"))
nDS <- length(unique(file.path(gt_dt$hicds, gt_dt$exprds)))
all_plot_vars <- c("gene_rank", "tad_rank", "tad_adjCombPval", "adj.P.Val")
for(plot_var in all_plot_vars) {
my_x <- gt_dt[,paste0(plot_var, "_yl")]
my_y <- gt_dt[,paste0(plot_var, "_td")]
outFile <- file.path(outFolder, paste0(plot_var, "_TopDom_vs_YL_densplot.", plotTypeP))
do.call(plotTypeP, list(outFile, height=myHeightP, width=myWidthP))
densplot(x=my_x, y=my_y,
main=paste0(plot_var),
xlab=paste0("YL TADs"), ylab=paste0("TopDom TADs"),
cex.main=plotCex,cex.lab=plotCex,cex.axis=plotCex,
cex=0.7, pch=16)
mtext(side=3, text=paste0("# DS = ", nDS))
addCorr(x=my_x,y=my_y,bty="n")
foo <- dev.off()
cat(paste0("...written:", outFile,"\n"))
}
|
088cdb663cfcd54166ab96909bb54322f1009b7c
|
cd1e68036a9e1f95e4e5fa6638bc8a068c9318e2
|
/R/summary.quantileplot.R
|
f8b6e8a7b84e263874b30734c61d2b5ca650ada0
|
[] |
no_license
|
Allisterh/quantileplot
|
d29a8f22f1f52b13d7d259762448cdff2d3408e3
|
86757339471ca2328b0d7fe6f21386a3b0dd34fd
|
refs/heads/master
| 2023-07-10T07:39:34.761760
| 2021-08-15T19:17:35
| 2021-08-15T19:17:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,553
|
r
|
summary.quantileplot.R
|
#' Summary function for \code{quantileplot} objects
#' @description Summarizes the S3 class object returned by the quantileplot function
#' @param object An object of class \code{quantileplot}, which results from a call to the function \code{quantileplot}
#' @param ... Other arguments to \code{summary} commands
#' @return Prints a summary of the estimates.
#' @references Lundberg, Ian, Robin C. Lee, and Brandon M. Stewart. 2021. "The quantile plot: A visualization for bivariate population relationships." Working paper.
#' @references Lundberg, Ian, and Brandon M. Stewart. 2020. "Comment: Summarizing income mobility with multiple smooth quantiles instead of parameterized means." Sociological Methodology 50(1):96-111.
#' @references Fasiolo, Matteo, Simon N. Wood, Margaux Zaffran, Raphaรซl Nedellec, and Yannig Goude. 2020. "Fast calibrated additive quantile regression." Journal of the American Statistical Association.
#' @export
summary.quantileplot <- function(object, ...) {
# Return the call to the user
cat("Call:\n")
print(object$call)
# Say a bit about the plot
cat("\nA quantileplot with the following elements:\n")
cat("\n1. A horizontal density for the predictor\n")
cat(paste0("\n2. Vertical densities for the conditional outcome distribution at predictor values:\n ",
paste0(format(object$slice_x_values, digits = 3), collapse = ", "),"\n"))
cat(paste0("\n3. Curves for conditional quantiles of the outcome given the predictor, at quantiles:\n ",
paste0(names(object$mqgam.out$fit), collapse = ", "),"\n"))
cat(paste0("\nDensities are estimated by a Gaussian product kernel with horizontal bandwidth ",
format(object$densities$x_bw, digits = 3),
" and vertical bandwidth ",
format(object$densities$y_bw, digits = 3),
".\n"))
# Summarize convergence of the mqgam
converged <- sapply(object$mqgam.out$fit, function(each_fit) each_fit$converged)
if (all(converged)) {
cat("\nConvergence achieved for every quantile curve.\n")
}
if (any(!converged)) {
cat("\nWarning: convergence not achieved for the following quantile curves.\n")
print(names(converged[!converged]))
cat("\nIt is possible that you have requested a very extreme quantile for which there is very little data.\n")
cat("See the mqgam function in the qgam package to address this problem.")
}
# Print the plot
if (object$arguments$uncertainty_draws) {
print(object$with_uncertainty_draws)
} else {
print(object$plot)
}
}
|
ccb570cce1d04194f3278512415fa6977590d9e3
|
8e9b6cef86337c5fc82491f4e6e212e733cbdb51
|
/R/summaryNodeFunction.R
|
700d41c5096ca0e5f86df21a7df4d04c7ea9e842
|
[] |
no_license
|
ClinicoPath/vtree
|
1f1e3b439c46a6ce502c306690594c15fa82cd83
|
4a6f111e38b80410344d29b5aec20bd6008d1c06
|
refs/heads/master
| 2022-06-16T07:06:00.611631
| 2020-02-02T00:18:07
| 2020-02-02T00:18:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,408
|
r
|
summaryNodeFunction.R
|
#' @importFrom stats median quantile sd
summaryNodeFunction <- function (u, varname, value, args) {
justpct <- function(w,digits=2,vp=TRUE,empty="") {
if (vp) {
num <- sum(w==1,na.rm=TRUE)
den <- length(w) - sum(is.na(w))
} else {
num <- sum(w==1,na.rm=TRUE)
den <- length(w)
}
pctString <- paste0(around(100*num/den,digits),"%")
if (den==0) {
pctString <- empty
}
if (any(is.na(w)))
pctString <- paste0(pctString," mv=",sum(is.na(w)))
pctString
}
nAndpct <- function(w,digits=2,vp=TRUE,empty="") {
if (vp) {
num <- sum(w==1,na.rm=TRUE)
den <- length(w) - sum(is.na(w))
} else {
num <- sum(w==1,na.rm=TRUE)
den <- length(w)
}
npctString <- paste0(num," (",
around(100*num/den,digits),"%)")
if (den==0) {
npctString <- empty
}
if (any(is.na(w)))
npctString <- paste0(npctString," mv=",sum(is.na(w)))
npctString
}
qntl <- function(x,...) {
if (any(is.na(x))) {
NA
} else {
stats::quantile(x,...)
}
}
sepN <- args$sepN
if (is.null(args$digits))
args$digits <- 1
if (is.null(args$cdigits))
args$cdigits <- 2
if (is.null(args$na.rm))
args$na.rm <- TRUE
if (is.null(args$root)) {
args$root <- FALSE
}
if (is.null(args$leaf)) {
args$leaf <- FALSE
}
nargs <- length(args$var)
RESULT <- rep("",nargs)
for (i in 1:nargs) {
var <- args$var[i]
y <- u[[var]]
show <- TRUE
if (!is.null(args$sf)) {
show <- args$sf[[i]](u)
}
if (show) {
format <- args$format[i]
digits <- args$digits
cdigits <- args$cdigits
na.rm <- args$na.rm
missingNum <- sum(is.na(y))
nonmissingNum <- sum(!is.na(y))
if (na.rm) {
x <- y[!is.na(y)]
if (is.null(x)) x <- NA
} else {
x <- y
}
result <- format
ShowNodeText <- TRUE
# check the %var=V% and %node=N% codes
if (length(grep("%var=([^%]+)%",result))>0) {
varspec <- sub("(.*)%var=([^%]+)%(.*)","\\2",result)
if (varspec==varname) {
if (length(grep("%node=([^%]+)%",result))>0) {
nodespec <- sub("(.*)%node=([^%]+)%(.*)","\\2",result)
if (!is.na(value) & (nodespec==value)) {
ShowNodeText <- TRUE
} else {
ShowNodeText <- FALSE
}
} else {
ShowNodeText <- TRUE
}
} else {
ShowNodeText <- FALSE
}
} else {
if (length(grep("%node=([^%]+)%",result))>0) {
nodespec <- sub("(.*)%node=([^%]+)%(.*)","\\2",result)
if (!is.na(value) & (nodespec==value)) {
ShowNodeText <- TRUE
} else {
ShowNodeText <- FALSE
}
}
}
y_event <- NULL
if (length(grep("%pct=([^%]+)%",result))>0) {
pct_arg <- sub(
"(.*)%pct=([^%]+)%(.*)","\\2",result)
y_event <- y==pct_arg
}
if (length(grep("%npct=([^%]+)%",result))>0) {
npct_arg <- sub(
"(.*)%npct=([^%]+)%(.*)","\\2",result)
y_event <- y==npct_arg
}
if (!args$leaf) {
if (length(grep("%leafonly%",result))>0) {
ShowNodeText <- FALSE
}
}
if (args$root) {
if (length(grep("%noroot%",result))>0) {
ShowNodeText <- FALSE
}
}
TruncNodeText <- FALSE
if (length(grep("%trunc=([^%]+)%",result))>0) {
truncval <- as.numeric(sub("(.*)%trunc=([^%]+)%(.*)","\\2",result))
TruncNodeText <- TRUE
}
# Format %list% output
tabval <- tableWithoutSort(around(sort(y,na.last=TRUE),digits=cdigits),exclude=NULL)
countval <- paste0(" (n=",tabval,")")
countval[tabval==1] <- ""
listOutput <- paste0(paste0(names(tabval),countval),collapse=", ")
listLinesOutput <- paste0(paste0(names(tabval),countval),collapse=sepN)
if (ShowNodeText) {
if (length(x)==0 || !is.numeric(x)) {
minx <- maxx <- NA
} else {
minx <- min(x)
maxx <- max(x)
}
result <- gsub("%var=[^%]+%","",result)
result <- gsub("%node=[^%]+%","",result)
result <- gsub("%trunc=(.+)%","",result)
result <- gsub("%noroot%","",result)
result <- gsub("%leafonly%","",result)
result <- gsub("%v%",args$var[i],result)
result <- gsub("%list%",listOutput,result)
result <- gsub("%listlines%",listLinesOutput,result)
result <- gsub("%mv%",paste0(missingNum),result)
result <- gsub("%nonmv%",paste0(nonmissingNum),result)
if (is.numeric(x) | is.logical(x)) {
# Note that y is used in the call to nAndpct
# so that missing values can be handled as desired
result <- gsub("%npct%",nAndpct(y,digits=digits),result)
result <- gsub("%pct%",justpct(y,digits=digits),result)
result <- gsub("%mean%", around(mean(x), digits = cdigits),result)
result <- gsub("%sum%", around(sum(x), digits = cdigits),result)
result <- gsub("%median%", around(stats::median(x), digits = cdigits),
result)
result <- gsub("%SD%", around(stats::sd(x), digits = cdigits), result)
result <- gsub("%min%", around(minx, digits = cdigits), result)
result <- gsub("%max%", around(maxx, digits = cdigits), result)
result <- gsub("%IQR%",
paste0(
around(qntl(x,0.25), digits = cdigits),", ",
around(qntl(x,0.75), digits = cdigits)),
result)
repeat {
if (length(grep("%(p)([0-9]+)%", result)) == 0)
break
quant <- sub("(.*)%(p)([0-9]+)%(.*)", "\\3", result)
if (quant != "") {
qq <- around(qntl(x, as.numeric(quant)/100),
digits = digits)
result <- sub(paste0("%p", quant,"%"), qq, result)
}
}
}
} else {
result <- ""
}
if (TruncNodeText) {
if (nchar(result)>truncval) {
RESULT[i] <- paste0(substr(result,1,truncval),"...")
} else {
RESULT[i] <- result
}
} else {
RESULT[i] <- result
}
}
}
RESULT
}
|
ba15869d6b3a4161722884d8ef6b1182f2486480
|
c50a2f7f2e73a2a3c94e58a428f369caa7d28b4e
|
/global.R
|
458446f9a814832a045699923783b4ae77b5e25e
|
[] |
no_license
|
ardeldel/GeoVis
|
ba54dae57901f09a8b584e61aad6ca96c5effe79
|
838d3de1853c889a5ad0c9e8a5c0ab4b90d6bb59
|
refs/heads/master
| 2020-07-15T07:18:19.357757
| 2019-08-31T07:23:44
| 2019-08-31T07:23:44
| 205,510,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,019
|
r
|
global.R
|
library(shiny)
library(shinyjs)
library(shinyWidgets)
library(shinydashboard)
library(shinydashboardPlus)
library(shinyalert)
library(RSQLite)
library(V8)
library(mapview)
library(rgdal)
library(data.table)
library(DT)
library(tools)
library(shinycssloaders)
library(writexl)
library(rgeos)
library(spgwr)
library(readxl)
library(DBI)
library(sodium)
library(ggplot2)
# cat("loading .rda\n")
# load(file = "indo.rda")
load(file = "gispedia.rda")
shinyInput <- function(FUN, len, id) {
inputs <- character(len)
for (i in seq_len(len)) {
inputs[i] <- as.character(FUN(paste0(id, i)))
}
inputs
}
b<- NULL
countBeta<- function(mgwr){
idx<- (length(names(mgwr$SDF))-6) /3
for(i in 2:(idx+1)){
names<- names(mgwr$SDF)[i]
b<- cbind(b, mgwr$SDF@data[,names])
}
return(b)
}
se<- NULL
countSE<- function(mgwr){
idx<- (length(names(mgwr$SDF))-6) /3
for(i in (idx+2): (2*idx+1)){
names<- names(mgwr$SDF)[i]
se<- cbind(se, mgwr$SDF@data[,names])
}
return(se)
}
|
5c8814800fd5aef953f8d0f9ad02aa7422b460bc
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/3864_0/rinput.R
|
85b4844c26665f5a47cefe241ee9e6c2ab22dd41
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("3864_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3864_0_unrooted.txt")
|
30b62f8c2b56e358b0aad5ae778de60b4378959e
|
e58b74c8073977666efb35804f703297d48253b1
|
/R/sed_insert.R
|
56e05a5ba286058f56c86594da46a691185c7e29
|
[] |
no_license
|
cran/rsed
|
d1ca717dc1973900dce32bd57665dd1d2502dbb4
|
062356808d906b1ab56300d4cc2b7365da0b8ac5
|
refs/heads/master
| 2021-08-26T05:01:23.500154
| 2017-11-21T15:13:02
| 2017-11-21T15:13:02
| 111,405,173
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,823
|
r
|
sed_insert.R
|
##' Insert one or more lines
##'
##' @details \code{sed_insert} only accomodates a single insertion point. Multiple lines may be inserted, but only one
##' insertion point is allowed, which is why \code{length(insertion)} must be 1. To make insertions at multiple locations,
##' \code{sed_insert} can be called repeatedly on the same string as needed.
##'
##' @export
##' @param stream A character vector, each element typically (but not necessarily) containing the text
##' from a single line in a file, which can be generated via \code{\link{readLines}}.
##'
##' @param insertion A character vector that will be inserted into the stream after element \code{after}.
##' Each element in the vector would correspond to a separate line in the file.
##'
##' @param after An integer or character string that designates where \code{insertion} is added to \code{stream}.
##' If \code{after} is numeric, it designates the line (or element) number in \code{stream} after which the
##' \code{insertion} will be placed. The numeric value of \code{after} must be in \code{[0:length(stream)]}.
##' To make an insertion at the very beginning of \code{stream}, use \code{after = 0}. If \code{after} is a
##' character string, the insertion is placed after the first element in \code{stream} that contains the string,
##' where matching is obtained using \code{\link{grep}}.
##'
##' @param warn If \code{TRUE}, warning messages are produced if insertion fails due to mispecifification
##' of \code{after}.
##'
##' @param \dots Additional named arguments to \code{\link{grep}}, which are applicable if \code{after} is a character string.
##' In other words, \code{\link{grep}} is used to search for the first instance of \code{after}.
##'
##' @return The new \code{stream} with the insertions added. If the insertion fails because \code{after} is
##' specified incorrectly, \code{stream} is returned unchanged.
##'
##' @author Landon Sego
##'
##' @seealso \code{\link{sed_replace}}, \code{\link{sed_substitute}}, \code{\link{sed_comment}}, \code{\link{streamEdit}}
##'
##' @keywords misc
##'
##' @examples
##'################################################################################
##'# Let's create an example stream we can edit
##'################################################################################
##'stream <- c("Here's a line",
##' "And another line",
##' "Line after which we'll insert a string",
##' "A line after which we'll insert another string",
##' "A final line")
##'as.stream(stream)
##'
##'# Insert a string using line numbers
##'stream <- sed_insert(stream, after = 3, "Here's the first insertion")
##'stream
##'
##'# Insert a stream by searching for a string
##'stream <- sed_insert(stream,
##' c("Here's the second insertion",
##' "",
##' "Another line of the second insertion after the blank line"),
##' after = "insert another")
##'stream
sed_insert <- function(stream, after, insertion, warn = FALSE, ...) {
# Basic checks
Smisc::stopifnotMsg(is.character(stream),
"'stream' must be a character vector",
(length(after) == 1) & (is.character(after) | is.numeric(after)),
"'after' must be a whole number in [0, length(stream)] or a single character string",
is.character(insertion),
"'insertion' must be a character vector",
is.logical(warn) & (length(warn) == 1),
"'warn' must be TRUE or FALSE")
# Get the length of the stream
ls <- length(stream)
# If it's character, figure out the first line after which the insertion should take place
if (is.character(after)) {
pattern <- after
after <- grep(after, stream, ...)[1]
# If match was not found, just return the string
if (!length(after)) {
if (warn) {
warning("The pattern provided, `after = '", pattern, "'` was not found in 'stream'. No changes were made.")
}
return(as.stream(stream))
}
}
# Otherwise, verify it's in the acceptable range
else if (!(after %in% 0:ls)) {
if (warn) {
warning("'after' must be an integer in [0, length(stream)] or a single character string. No changes were made.")
}
return(as.stream(stream))
}
# Append before
if (after == 0) {
outStream <- c(insertion, stream)
}
# Append after
else if (after == ls) {
outStream <- c(stream, insertion)
}
# Insert in the middle
else {
# Cut the string in two and insert
s1 <- stream[1:after]
s2 <- stream[(after + 1):ls]
outStream <- c(s1, insertion, s2)
}
# Return the inserted stream
return(as.stream(outStream))
} # sed_insert
|
572b718af0313f86dd9d5a47089a383fed828044
|
cef3b5e2588a7377281a8f627a552350059ca68b
|
/cran/paws.cost.management/man/costexplorer_get_anomaly_monitors.Rd
|
cf6f6077da0bba6ffc83d1e96f36ec6fa7fb7385
|
[
"Apache-2.0"
] |
permissive
|
sanchezvivi/paws
|
b1dc786a9229e0105f0f128d5516c46673cb1cb5
|
2f5d3f15bf991dcaa6a4870ed314eb7c4b096d05
|
refs/heads/main
| 2023-02-16T11:18:31.772786
| 2021-01-17T23:50:41
| 2021-01-17T23:50:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,017
|
rd
|
costexplorer_get_anomaly_monitors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/costexplorer_operations.R
\name{costexplorer_get_anomaly_monitors}
\alias{costexplorer_get_anomaly_monitors}
\title{Retrieves the cost anomaly monitor definitions for your account}
\usage{
costexplorer_get_anomaly_monitors(MonitorArnList, NextPageToken,
MaxResults)
}
\arguments{
\item{MonitorArnList}{A list of cost anomaly monitor ARNs.}
\item{NextPageToken}{The token to retrieve the next set of results. AWS provides the token
when the response from a previous call has more results than the maximum
page size.}
\item{MaxResults}{The number of entries a paginated response contains.}
}
\description{
Retrieves the cost anomaly monitor definitions for your account. You can
filter using a list of cost anomaly monitor Amazon Resource Names
(ARNs).
}
\section{Request syntax}{
\preformatted{svc$get_anomaly_monitors(
MonitorArnList = list(
"string"
),
NextPageToken = "string",
MaxResults = 123
)
}
}
\keyword{internal}
|
6b8fd7d1fa970e4c37d763c7c35b94e2804f7be2
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051564-test.R
|
191722aa69c1f1f795053eb0018ebfa71204e23d
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 583
|
r
|
1610051564-test.R
|
testlist <- list(rates = numeric(0), thresholds = numeric(0), x = c(-2.30331099970591e-156, -2.25931868540718e-156, -2.30331110816476e-156, 1.09015244729545e+217, -3.0784026009623e-288, NaN, 2.12199579047121e-314, 0, 0, 0, 0, 0, 0, 0, NaN, 1.3854086120054e-309, NaN, 2.80614289249855e-312, 1.62587122733437e-319, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
748f99c33f7c87136fdf85515eac75fbfce34dcd
|
ea605f4a3ec4cbc7ec1fa8ea9b8ff74b71651d25
|
/jmlr/code/plot.R
|
c434af302a072d951d5c2444ef39ad653c63dad6
|
[] |
no_license
|
ajgreen93/local_spectral_density_clustering
|
07f4de71306fb62500bafe479c052bbcf24348d4
|
6703cc9700298e1a587eb9427d0fa16e17711926
|
refs/heads/master
| 2021-12-31T05:33:17.875088
| 2021-12-23T02:02:16
| 2021-12-23T02:02:16
| 164,931,242
| 0
| 0
| null | 2021-09-10T13:28:13
| 2019-01-09T20:15:59
|
TeX
|
UTF-8
|
R
| false
| false
| 5,457
|
r
|
plot.R
|
#----------------------------------------------------#
# Plot results from validate theory pipeline
#
# Plotting options are
#
# -- vectors <- c("samples","normalized_ppr","sweep_cut")
# -- quantities <- c("normalized_cut","conductance","local_spread","normalized_volume_ssd")
# -- parameter <- c()
#----------------------------------------------------#
# What is to be plotted?
vectors <- "samples"
quantities <- c("normalized_cut", "conductance","local_spread")
parameter <- "sigma"
# Would you like to save plots to file?
save_plots <- TRUE
# Which distributions (by index) would you like to plot
plot_indx <- 1:10
# Structure for storing data to plot
n_quantities <- length(quantities)
list_of_plot_data <- vector(mode = "list", length = n_quantities)
names(list_of_plot_data) <- quantities
# Load results
save_file <- paste0("2d_uniform_rectangles_",parameter,".rda")
load(file.path("data",save_file))
empirical_data <- list(normalized_cut = empirical_ncuts,
conductance = empirical_conductances,
local_spread = empirical_local_spreads,
normalized_volume_ssd = empirical_normalized_volume_ssd)
population_bounds <- list(normalized_cut = population_ncuts,
conductance = population_conductances,
local_spread = population_local_spreads,
normalized_volume_ssd = population_condition_numbers)
# Take only the asked-for quantities.
empirical_data <- empirical_data[quantities]
population_bounds <- population_bounds[quantities]
# Data for plotting
parameter_values <- get(parameter)
if(parameter == "epsilon"){
parameter <- "density_ratio"
parameter_values <- 2*rho*sigma/(4 - 2*rho*sigma) * epsilon/(1 - epsilon)
}
for(ii in 1:n_quantities)
{
avg_empirical <- rowMeans(empirical_data[[ii]])[plot_indx]
list_of_plot_data[[ii]] <- list(empirical = avg_empirical,
population = population_bounds[[ii]][plot_indx])
}
# Generic plotting functions.
generic_2ddata_plot <- function(X,colors,subsample = FALSE,cex = 2){
if(subsample){
n <- nrow(X)
indx <- sample(1:n,size = 8000)
X <- X[indx,]
colors <- colors[indx]
}
plot(X[,1],X[,2],col = colors, pch = 16, xlab = "x1", ylab = "x2",
cex.lab = cex, cex.axis = cex)
}
generic_quantity_plot <- function(x, list_of_ys, log = "xy", xlab = "diameter", ylab = "y",
colors, pchs, lwd = 2, cex = cex){
ylims <- c(.5 * min(unlist(list_of_ys)), 2 * max(unlist(list_of_ys)))
xlims <- c(min(x),max(x))
xlab <- gsub("_", " ", xlab)
ylab <- gsub("_", " ", ylab)
plot(x = x,y = list_of_ys[[1]], ylim = ylims, type = "n",log = log, xlab = xlab,ylab = ylab,
cex.lab = cex, cex.axis = cex)
grid(lwd = 3*lwd)
for(ii in 1:length(list_of_ys))
{
y <- list_of_ys[[ii]]
lines(x = x,
y = y,
col = colors[ii],
lwd = lwd)
points(x = x,
y = y,
col = colors[ii],
pch = pchs[ii],
cex = cex)
}
}
color.gradient <- function(x, colors=c("blue","red"), colsteps=100, transform = function(x){x}) {
return( colorRampPalette(colors) (colsteps) [ findInterval(transform(x), seq(quantile(transform(x),.01),quantile(transform(x),.995), length.out=colsteps)) ] )
}
# Generate some plots
if(save_plots){
plot_dir <- file.path("plots",parameter)
if(!dir.exists(plot_dir)) dir.create(plot_dir,recursive = TRUE)
}
# Plot the samples, the normalized (by degree) PPR vector, and resulting sweep cut
subsample <- TRUE
plot_vectors <- if(is.null(vectors)) FALSE else TRUE
if(plot_vectors){
ppr_color_transform <- sqrt
for(ii in plot_indx)
{
X <- Xs[[ii]]
p <- ps[ii,]
estimated_cluster <- estimated_clusters[ii,]
# TODO: put this is main pipeline
library(magrittr)
library(Matrix)
library(RANN)
library(reshape2)
source("graph.R")
G <- neighborhood_graph(X,r)
p_norm <- ifelse(is.na(p/degree(G)),0,p/degree(G))
# Colors
n <- nrow(X)
colors_for_each_plot <- vector(mode = "list", length = 3)
names(colors_for_each_plot) <- vectors
colors_for_each_plot[["samples"]] <- rep("black",n)
colors_for_each_plot[["normalized_ppr"]] <- color.gradient(p_norm, transform = ppr_color_transform)
colors_for_each_plot[["sweep_cut"]] <- ifelse(estimated_cluster,"red","blue")
for(plot_name in vectors){
if(save_plots)
{
file_name <- paste0(plot_name,ii,".pdf")
plot_path <- file.path(plot_dir,file_name)
pdf(plot_path, 8, 8)
}
generic_2ddata_plot(X,colors_for_each_plot[[plot_name]],subsample)
if(save_plots){dev.off()}
}
}
}
# Plot the quantities we use to bound volume of SSD
colors <- c("red","blue")
pchs <- c(15,16)
log <- "xy"
if(save_plots){
lwd <- 2
cex <- 2
} else{
lwd <- 1
cex <- 1
}
plot_quantities <- if(is.null(quantities)) FALSE else TRUE
if(plot_quantities){
for(ii in 1:n_quantities)
{
if(save_plots)
{
file_name <- paste0(quantities[ii],".pdf")
plot_path <- file.path(plot_dir,file_name)
pdf(plot_path, 8, 8)
}
generic_quantity_plot(parameter_values, list_of_plot_data[[ii]], xlab = parameter, ylab = names(list_of_plot_data)[ii],
colors = colors, pchs = pchs, lwd = lwd, cex = cex, log = log)
if(save_plots)
{
dev.off()
}
}
}
|
b9729179cedb89536f41100fe41c6fe3bec5e944
|
d92ad57706b24066ff15a21bc73c801684ac0c19
|
/server.R
|
1616e1fd59f7a854f56c54359a6a6b0699926d98
|
[] |
no_license
|
maisam8149/developing-data-products
|
e7b2fabf33adffcc55bb71e86efa26d238488b0f
|
81199a36aa3409be82c79196882a9caaf9606882
|
refs/heads/master
| 2022-08-29T12:19:17.226098
| 2020-05-30T14:18:33
| 2020-05-30T14:18:33
| 267,650,604
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,337
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
shinyServer(
function(input, output){
output$rentalYield <- renderText({ calculateRentalYield(input$weeklyRent, input$price) })
output$cashflowPerYear <- renderText({calculateYearlyCashflow(input$weeklyRent, input$waterPerQuarter, input$managementFees, input$weeklyRepayments)})
output$cashflowPerWeek <- renderText({calculateWeeklyCashflow(input$weeklyRent, input$waterPerQuarter, input$managementFees, input$weeklyRepayments)})
}
)
calculateRentalYield <- function (weeklyRent, propertyPrice)
{
result <- weeklyRent * 52 / propertyPrice * 100
return(round(result, digits = 2))
}
calculateYearlyCashflow <- function(weeklyRent, water, managementFees, weeklyRepayments)
{
result <- weeklyRent * 52 - (water) * 4 - managementFees * 52 - weeklyRepayments * 52
return(round(result, digits = 2))
}
calculateWeeklyCashflow <- function(weeklyRent, water, managementFees, weeklyRepayments)
{
result <- (weeklyRent * 52 - (water) * 4 - managementFees * 52 - weeklyRepayments * 52) / 52
return(round(result, digits = 2))
}
|
d3441ee295736bc7f5386671b20b703d4eaa3bbd
|
b626f3d5090a85990ea689bc152775a6359ef429
|
/process_student.R
|
0a7ca2cac85585cb9c00eb022e12eabe45c19749
|
[] |
no_license
|
Planktos/SpData2019_midterm
|
ade4cca7d5bae7653051f9287209d9eabded7e75
|
d8d80de4abee6ba5f686c5c476547018e2043184
|
refs/heads/master
| 2020-05-02T21:22:30.174677
| 2019-03-28T14:47:26
| 2019-03-28T14:47:26
| 178,218,166
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,125
|
r
|
process_student.R
|
library(stringr)
library(plyr)
library(dplyr)
library(lubridate)
# Macrozooplankton data ----
#read in data
z <- read.csv("195101-201404_Zoop.csv", sep = ",",stringsAsFactors = F, header = T)
z <- z[,which(unlist(lapply(z, function(x)!all(is.na(x)))))] #using the "lapply" function from the "dplyr" package, remove fields which contain all "NA" values
#create new fields with decimal degree latitude and longitude values
z$Lat_DecDeg <- z$Lat_Deg + (z$Lat_Min/60)
z$Lon_DecDeg <- (z$Lon_Deg + (z$Lon_Min/60))*-1
# create a date-time field
z$dateTime <- str_c(z$Tow_Date," ",z$Tow_Time,":00")
z$dateTime <- as.POSIXct(strptime(z$dateTime, format = "%m/%d/%Y %H:%M:%S", tz = "America/Los_Angeles")) #Hint: look up input time formats for the 'strptime' function
z$tow_date <- NULL; z$tow_time <- NULL
#export data as tab delimited file
write.table(z, "Zoop.txt", sep="\t", row.names = F)
#Egg data Set-----
#read in data set
e <-
#turn these character fields into date-time field
e$stop_time_UTC <-
e$time_UTC <- gsub(x = e$time_UTC, pattern = "T", replacement = " ")
e$time_UTC <-
e <- e[,c(1:4,29,5:26)]
#export data
|
1f6af3b54efdf10aeb1de41ef8f2f5e5fc49aaaa
|
168db5797a3b38a990ffe7724dbaff5700551a3e
|
/Hopfield/hopfield.r
|
2222ba567187139b73194b5587687adc9614f4de
|
[] |
no_license
|
Lucaslsps/Inteligencia-Computacional
|
9729f8b5a47bc250a0d4eb261565412a664291fe
|
7133cfb447b0f041cee5593685c58a9e6854aab7
|
refs/heads/master
| 2020-09-14T20:07:04.895711
| 2019-12-12T01:28:31
| 2019-12-12T01:28:31
| 223,239,611
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 5,055
|
r
|
hopfield.r
|
rm(list=ls())
UM <- matrix(c(-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1), nrow=10,ncol = 12)
DOIS <- matrix(c(1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1), nrow = 10, ncol = 12)
TRES <- matrix(c(1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1),nrow = 10,ncol = 12)
QUATRO <- matrix(c(1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1),nrow = 10,ncol = 12)
CINCO <- matrix(c(1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1),nrow = 10,ncol = 12)
SEIS <- matrix(c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1,1,1,-1,-1,1,1,1,1,1,1),nrow = 10,ncol = 12)
SETE <- matrix(c(1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1),nrow = 10,ncol = 12)
OITO <- matrix(c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,-1,-1,1,1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1),nrow = 10,ncol = 12)
NOVE <- matrix(c(1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,-1,-1,1,1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1),nrow = 10,ncol = 12)
ZERO <- matrix(c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1),nrow = 10,ncol = 12)
## Funรงรฃo que printa os nรบmeros
show.letter <- function(letter.vector){
letter.vector[letter.vector == 1] <- "*"
letter.vector[letter.vector == -1] <- " "
colnames(letter.vector) <- rep("",12)
row.names(letter.vector) <- rep("",10)
print(letter.vector, quote = FALSE)
}
## Printando todas os nรบmeros originais
for (i in mget(ls(pattern = "^[A-Z]"))){show.letter(i)}
mutate <- function(letter.vector, number.pixel.flips){
letter.vector[sample(length(letter.vector),number.pixel.flips)] <- letter.vector[sample(length(letter.vector),number.pixel.flips)]
return(letter.vector)
}
mutated.UM <- mutate(UM, 4)
mutated.DOIS <- mutate(DOIS, 4)
mutated.TRES <- mutate(TRES, 4)
mutated.QUATRO <- mutate(QUATRO, 4)
mutated.CINCO <- mutate(CINCO, 4)
mutated.SEIS <- mutate(SEIS, 4)
mutated.SETE <- mutate(SETE, 4)
mutated.OITO <- mutate(OITO, 4)
mutated.NOVE <- mutate(NOVE, 4)
mutated.ZERO <- mutate(ZERO, 4)
## Printando todos os nรบmeros com mutaรงรฃo
for (i in mget(ls(pattern = "mutated"))){show.letter(i)}
## Juntando todos os nรบmeros em uma รบnica matrix, dividida por 10
x <- matrix(c( ZERO, UM, DOIS, TRES, QUATRO, CINCO, SEIS, SETE, OITO, NOVE), nrow = 10, byrow = T)
## Funรงรฃo que utiliza Hopfield com a regra de aprendizado Hebbian
hopfield <- function(current.letter, iteration, memory = w){
w <- t(x) %*% x
diag(w) <- 0
for(i in 1:iteration){
a <- w %*% as.vector(current.letter)
current.letter <- ifelse(a>0, 1, -1)
}
return(show.letter(matrix(current.letter, ncol = 12, nrow = 10)))
}
## Printa todas as iteraรงรตes de reconstruรงรฃo, para cada nรบmero, 5 vezes
for (i in mget(ls(pattern = "mutated"))){
print("###########################")
for (iter in 1:5){
hopfield(current.letter = i, iteration = iter)
}
}
|
9b7351d7a47576262df6f26f6d16b8268483e5bb
|
44692f59da65ef2a635b7c721a2f2e9a8ada38bd
|
/R/MS_SSM_obs_cons_rate.R
|
5482a0e569b59db6b598660c7062eb48d41fcd25
|
[] |
no_license
|
vtrijoulet/MS_SSM
|
d35a807a7305c8b2ea504ad9149fc2104e1050ae
|
672535011415f0e2fe1825b76b809501028f9642
|
refs/heads/master
| 2020-04-22T21:11:46.074173
| 2019-09-30T11:56:13
| 2019-09-30T11:56:13
| 170,665,801
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,742
|
r
|
MS_SSM_obs_cons_rate.R
|
##### Prepare data for observed consumption rates #####
cons_data_fn <- function (data.file.diet2){
data<-read.csv(data.file.diet2)
# cf.("Consumption rates/Make meansw and bottemp as data_per prey species.R")
# Separate other food from consumption for 10 modelled sp
data2<-data[which(data$prey!="OTHER"),]
data.other<-data[which(data$prey=="OTHER"),]
# ignore data when <5 tows
data3 <- data2
data.other3 <- data.other
for (n in 1:nrow(data3)){
if (data3$num_tows[n]<5){
data3$meansw[n]<- NA
}
}
for (n in 1:nrow(data.other3)){
if (data.other3$num_tows[n]<5 || is.na(data.other3$num_tows[n])==TRUE){
data.other3$meansw[n]<- NA
}
}
# ignore data when <20 stomachs
data4 <- data3
data.other4 <- data.other3
for (n in 1:nrow(data4)){
if (data4$nstom[n]<20){
data4$meansw[n]<- NA
}
}
for (n in 1:nrow(data.other4)){
if (data.other4$nstom[n]<20){
data.other4$meansw[n]<- NA
}
}
#### Make data so rows=years and columns = meansw (summed over species) and bottemp for each survey
years<-sort(unique(data4$year))
pred<-unique(data4$svspp)
Y<-length(years)
# sp_common_names<- c("cod", "haddock", "goosefish", "spinydogfish", "silverhake",
# "winterskate", "yellowtailflounder", "mackerel", "herring","winterflounder")
# sp<-c("GADUS MORHUA","MELANOGRAMMUS AEGLEFINUS","LOPHIUS AMERICANUS","SQUALUS ACANTHIAS","MERLUCCIUS BILINEARIS",
# "LEUCORAJA OCELLATA","LIMANDA FERRUGINEA","SCOMBER SCOMBRUS","CLUPEA HARENGUS","PSEUDOPLEURONECTES AMERICANUS")
svspp_order<-c(73,74,197,15,72,
23,105,121,32,106)
prey<-c("GADMOR","MELAEG","LOPAME","SQUACA","MERBIL","LIMFER","SCOSCO","CLUHAR","PLEAME")
n_pred<-length(svspp_order)
n_prey<-length(prey)
spring<-array(0,dim=c(Y,2,length(prey),n_pred))
fall<-array(0,dim=c(Y,2,length(prey),n_pred))
spring.other<-array(0,dim=c(Y,2,n_pred))
fall.other<-array(0,dim=c(Y,2,n_pred))
for (j in 1:n_pred){
species<-data4[which(data4$svspp==svspp_order[j]),]
species.other<-data.other4[which(data.other4$svspp==svspp_order[j]),]
for (t in 1:Y){
annual<-species[which(species$year==years[t]),]
if (length(annual$meansw[which(annual$seacat=="SPRING")])==0){
spring[t,,,j]<-NA
} else {
for (i in 1:n_prey){
annual_prey<-annual[which(annual$prey==prey[i]),]
spring[t,1,i,j]<-annual_prey$meansw[which(annual_prey$seacat=="SPRING")]
spring[t,2,i,j]<-annual_prey$bottemp[which(annual_prey$seacat=="SPRING")]
}
}
if (length(annual$meansw[which(annual$seacat=="FALL")])==0){
fall[t,,,j]<-NA
} else {
for (i in 1:n_prey){
annual_prey<-annual[which(annual$prey==prey[i]),]
fall[t,1,i,j]<-annual_prey$meansw[which(annual_prey$seacat=="FALL")]
fall[t,2,i,j]<-annual_prey$bottemp[which(annual_prey$seacat=="FALL")]
}
}
annual.other<-species.other[which(species.other$year==years[t]),]
if (length(annual.other$meansw[which(annual.other$seacat=="SPRING")])==0){
spring.other[t,,j]<-NA
} else {
spring.other[t,1,j]<-annual.other$meansw[which(annual.other$seacat=="SPRING")]
spring.other[t,2,j]<-annual.other$bottemp[which(annual.other$seacat=="SPRING")]
}
if (length(annual.other$meansw[which(annual.other$seacat=="FALL")])==0){
fall.other[t,,j]<-NA
} else{
fall.other[t,1,j]<-annual.other$meansw[which(annual.other$seacat=="FALL")]
fall.other[t,2,j]<-annual.other$bottemp[which(annual.other$seacat=="FALL")]
}
}
}
return(list(spring,fall,spring.other,fall.other,Y))
}
|
b6eec354af47f162a194b4a1f685768ad0b4499a
|
15f54cf88824f8ef2581f0a00f8d7fd208f7d0f4
|
/R/wiot2012_data.R
|
19dd91496a6933f0c37d20e73c2b8c7c96bfe023
|
[] |
no_license
|
MatthewSmith430/GVTr
|
83ee4761156096ca65a4406bd34204a36233c640
|
c6cb9dd3f09a62c6df09238bdca96ad2785eabb7
|
refs/heads/master
| 2022-11-07T19:44:14.133448
| 2022-11-04T09:04:09
| 2022-11-04T09:04:09
| 115,260,573
| 4
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 151
|
r
|
wiot2012_data.R
|
#' @title WIOD 2012 Data
#'
#' @description WIOD 2012 dataset
#' @name wiot2012
#' @docType data
#' @usage wiot2012
#' @keywords datasets
NULL
|
f07af077f54554ac911a93b33b4ea4bd36f1b23d
|
3925662c0fe04f924a952ecd0ddeb5b48237e125
|
/plot4.R
|
adee1df26e5ada252e5e6423d3a56d06f93dc0fd
|
[] |
no_license
|
globalstatements/ExData_Plotting1
|
62b00bc074b2fa9d16dd5cd35eab66aa8b7ae258
|
ce2122ece2341c050dd1cbc9ad59b2c3f3053f78
|
refs/heads/master
| 2020-12-26T03:23:09.588817
| 2015-01-10T21:20:43
| 2015-01-10T21:20:43
| 29,069,930
| 0
| 0
| null | 2015-01-10T20:25:08
| 2015-01-10T20:25:08
| null |
UTF-8
|
R
| false
| false
| 1,639
|
r
|
plot4.R
|
# Read input data with 1,442 days
allpowerdata <- read.csv("household_power_consumption.txt",
sep=";", na.strings="?")
# Subset with 2 days, February 1-2, 2007
the_days_str <- c("1/2/2007", "2/2/2007")
the_days_rows <- as.character(allpowerdata$Date) %in% the_days_str
powerdata <- allpowerdata[the_days_rows, ]
# Convert date and time to POSIXlt
powerdata$ndatetime <- strptime(
paste(powerdata$Date, powerdata$Time, sep=" "),
format="%d/%m/%Y %H:%M:%S")
# Open PNG file for output
png("plot4.png", width=480, height=480)
# Panels for 4 plots
# all are point-to-point line graphs with time in x axis
par(mfrow = c(2, 2))
# Row 1, left
# PGlobal active power
plot(x=powerdata$ndatetime, y=powerdata$Global_active_power,
type="l", main="", xlab="",
ylab="Global Active Power")
# Row 1, right
# Voltage
plot(x=powerdata$ndatetime, y=powerdata$Voltage,
type="l", main="", xlab="datetime",
ylab="Voltage")
# Row 2, left
# Multiple submeters
plot(x=powerdata$ndatetime, y=powerdata$Sub_metering_1,
type="l", main="", xlab="",
ylab="Energy sub metering")
lines(x=powerdata$ndatetime, y=powerdata$Sub_metering_2,
col="red")
lines(x=powerdata$ndatetime, y=powerdata$Sub_metering_3,
col="blue")
legend("topright", bty="n", lwd=1,
col=c("black", "red", "blue"),
legend=c("Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"))
# Row 2, right
# Global_reactive_power
plot(x=powerdata$ndatetime, y=powerdata$Global_reactive_power,
type="l", main="", xlab="datetime",
ylab="Global_reactive_power")
# Close output
dev.off()
|
7057618c12424f03b528651c6f6d59faae2de249
|
9fbd34dd260879468ee3710dc80f1a96478d39f9
|
/R/deprecated/Extract_LBDA.R
|
7f7cfc12f8bbce635b53053c60f170ef50d2feb3
|
[] |
no_license
|
Kah5/bimodality
|
2fa26842ba50cdceff22a2f9eb335fc73bcec496
|
2b53dd59777292f84666ac2fcbd7031eda8ddb71
|
refs/heads/master
| 2023-02-01T05:48:49.361038
| 2023-01-21T01:25:00
| 2023-01-21T01:25:00
| 49,456,870
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,974
|
r
|
Extract_LBDA.R
|
# investigating the LBDA
library(ncdf4)
library(raster)
library(sp)
library(ggplot2)
LBDA <- brick('C:/Users/Jmac/Box Sync/LBDA/nada_hd2_cl.nc')
LBDA <- nc_open('C:/Users/Jmac/Box Sync/LBDA/nada_hd2_cl.nc')
summary(LBDA$var)
LBDA.out <- list()
LBDA.out$lat <- ncvar_get(LBDA, "lat")
LBDA.out$lon <- ncvar_get(LBDA, "lon")
LBDA.out$Year <- ncvar_get(LBDA, "time")
LBDA.out$pdsi <- ncvar_get(LBDA, "pdsi")
#for(v in names(LBDA$var)){
# LBDA.out[[v]] <- ncvar_get(LBDA, v)
#}
PDSI <- LBDA.out$pdsi[1700:2006,138:1,1:237]
new.data <- aperm(PDSI, c(2,3,1)) # need to reorder array so it has x,y,pdsi
# lets make a raster brick of this
PDSI.brk<- brick(new.data)
names(PDSI.brk) <- 1700:2006
plot(PDSI.brk[[1]])
plot(PDSI.brk$X1936)
for(y in 1:dim(LBDA.out$pdsi)[3]){
print(paste0(" ---- Lat: ", y, " ---- "))
dat.temp <- stack(data.frame(PDSI[,y,]))
names(dat.temp) <- c("pdsi", "Year")
dat.temp$Year <- as.numeric(substr(dat.temp$Year,2,nchar(paste(dat.temp$Year))))
dat.temp$lat <- LBDA.out$lat[y]
dat.temp$lon <- LBDA.out$lon
if(y==1) lbda.pdsi <- dat.temp else lbda.pdsi <- rbind(lbda.pdsi, dat.temp)
}
#naven't gotten the above function to run for lbda.pdsi years over 1 and 2
ggplot(data=lbda.pdsi[lbda.pdsi$Year %in% 119,]) +
facet_grid(Year~.) +
geom_raster(aes(x=lon, y=lat, fill=pdsi)) +
scale_y_continuous(name="Latitude", expand=c(0,0)) +
scale_x_continuous(name="Longitude", expand=c(0,0)) +
ggtitle("LPJ-wsl") +
coord_equal(ratio=1)
spec.table <- read.csv('C:/Users/JMac/Documents/Kelly/biomodality/data/midwest_pls_full_density_pr_alb1.6-5.csv')
coordinates(spec.table) <- ~x +y
proj4string(spec.table) <- '+init=epsg:3175'
spec.table.ll<- spTransform(spec.table, crs('+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0 '))
t <- crop(s, extent(spec.table.ll))
s <- projectRaster(t, crs='+init=epsg:3175') # project in great lakes albers
y <- data.frame(rasterToPoints(s)) #covert to dataframe
|
4ffd5f0c44d9ae5064abe9fad00e82bc3ac65054
|
89190c99d8309e9e5d9923886b1b530bd4fb54a4
|
/r_for_data_science_tutorial_project/9_relational_data.R
|
b87b130febb395d67229a883cdce4201a93b9ef7
|
[] |
no_license
|
brezaie/RTutorial
|
be7415c7caf7c40743ea1ef5fbf4d0402b8f1cd7
|
917fc035945f18a5041746db4d9dbde3611fbf70
|
refs/heads/master
| 2020-06-18T05:41:52.267705
| 2019-07-30T10:59:27
| 2019-07-30T10:59:27
| 196,183,592
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,393
|
r
|
9_relational_data.R
|
library(nycflights13)
library(dplyr)
airlines
airports
planes
weather
planes %>%
count(tailnum) %>%
filter(n > 1)
weather %>%
count(year, month, day, hour, origin)
## Add primary key (surrogate key) to 'flights'
flights <- flights %>%
mutate(id = row_number())
## left_join
joined_flights_airlines <- flights %>%
left_join(airlines, by = "carrier")
View(joined_flights_airlines)
## End of page 186
library(ggplot2)
install.packages('maps')
library(maps)
airports %>%
semi_join(flights, by = c("faa" = "dest")) %>%
ggplot(aes(lon, lat)) +
borders("state") +
geom_point() +
coord_quickmap()
## Get the flights to the top 10 destinations
top_dest <- flights %>%
count(dest, sort = TRUE) %>%
head(10)
flights %>%
filter(dest %in% top_dest$dest)
## To get only the rows in x (which is joined with y), use 'semi_join
## The above query can run in this way:
flights %>%
semi_join(top_dest)
## Other useful table functions:
## anti_join(x, y): keeps the rows that have no match with y
## intersect(x, y): returns only observations both in x and y
## union(x, y): Return unique observations in x and y
## setdiff(x, y): Return observations in x, but not in y
## Examples:
library(tibble)
df1 <- tribble(
~x, ~y,
1, 1,
2, 1
)
df2 <- tribble(
~x, ~y,
1, 1,
1, 2
)
intersect(df1, df2)
union(df1, df2)
setdiff(df1, df2)
setdiff(df2, df1)
|
d38c3cc6bf4fba9dc1137f8a3d13ae97811f0f17
|
364dcb95aac6dff3f8548768dc99bba945ec81b6
|
/tests/testthat/test-geom-point.R
|
76b285e2e61f57b4248aa153d8742a85a331eb1b
|
[
"MIT"
] |
permissive
|
tidyverse/ggplot2
|
3ef62b72861c246b13ffc2d95678079984fe65c0
|
c76b9aeda648e9b6022b7169021e854c3d3890cb
|
refs/heads/main
| 2023-08-31T07:08:20.846510
| 2023-08-17T16:19:44
| 2023-08-17T16:19:44
| 19,438
| 4,632
| 1,971
|
NOASSERTION
| 2023-09-14T13:25:40
| 2008-05-25T01:21:32
|
R
|
UTF-8
|
R
| false
| false
| 746
|
r
|
test-geom-point.R
|
test_that("single strings translate to their corresponding integers", {
expect_equal(translate_shape_string("square open"), 0)
})
test_that("vectors of strings translate to corresponding integers", {
shape_strings <- c(
"square open",
"circle open",
"square open",
"triangle open"
)
expect_equal(translate_shape_string(shape_strings), c(0, 1, 0, 2))
})
test_that("single characters are not translated to integers", {
expect_equal(translate_shape_string(letters), letters)
expect_equal(translate_shape_string(as.character(0:9)), as.character(0:9))
})
test_that("invalid shape names raise an error", {
expect_snapshot_error(translate_shape_string("void"))
expect_snapshot_error(translate_shape_string("tri"))
})
|
a48bc63c30c0520940ed4d6d0072b9473be6082e
|
0e78c3b67b9e49c94ebfbd0b6d5f9e6d2c1b4a3a
|
/code/analysis/est/est_func.R
|
30427a4173ba30e55e002f9467a54e07ed73a982
|
[] |
no_license
|
yusuket0123/relief_covid-19
|
b82aef5972f81e055e1a076703717bee62126eb0
|
a6b00f46d779379278ce1296361423e4d57b5046
|
refs/heads/main
| 2023-05-01T17:32:29.745775
| 2021-05-21T00:14:30
| 2021-05-21T00:14:30
| 304,525,190
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,984
|
r
|
est_func.R
|
#"""
# estimation functiions
#"""
library(R6)
est = R6Class(
"est", # estใจใใใฏใฉในใไฝๆ
public = list(
data = NA,
initialize = function(data){
#ใณใณในใใฉใฏใฟใฏใ"initialize"
#ใคใณในใฟใณในๅคๆฐใฏself$
self$data = data
},
### ใกใฝใใใๅฎ็พฉ
## did estimation
est_did = function(year = "2020", outcome, covariates) {
# dataframe for estimation
year = y
df_use = self$data %>%
dplyr::mutate(post = dplyr::case_when(year == y ~ 1, TRUE ~ 0)
)
# estimation
formula = paste(outcome, paste(covariates, collapse = " + "), sep = " ~ ")
reg = lfe::felm(as.formula(formula), df_use)
summary = list(formula = formula,
tidy = broom::tidy(reg),
augment = broom::augment(reg),
glance = broom::glance(reg)
)
return(summary)
},
## linear prob. model at household level
estimate_error_hhlevel = function(comid = "yes", outcome, covariates){
if(comid == "yes"){
formula = paste(outcome, paste(paste(covariates, collapse = " + "), "comid", sep = " + "), sep = " ~ ")
} else {
formula = paste(outcome, paste(covariates, collapse = " + "), sep = " ~ ")
}
est = lfe::felm(as.formula(formula) , data = self$data)
summary = list(formula = formula, estimate = broom::tidy(est), glance = broom::glance(est), augment = broom::augment(est))
return(summary)
},
## linear prob. model at community level
estimate_error_comlevel = function(outcome, covariates){
formula = paste(outcome, paste(covariates, collapse = " + "), sep = " ~ ")
est = lfe::felm(as.formula(formula) , data = self$data)
summary = list(formula = formula, estimate = broom::tidy(est), glance = broom::glance(est), augment = broom::augment(est))
return(summary)
},
## change instance var.
set_data = function(data){
self$data = data
}
)
)
output.excelsheets = R6Class(
"output.excelsheets",
public = list(
file_name = NA,
initialize = function(file_name){
self$file_name = file_name
},
## write xlsx files
output_xlsxfile = function(list_est){
wb <- openxlsx::createWorkbook()
for (i in names(list_est)[!stringi::stri_detect_regex(names(list_est), ".*imp.*")]) {
sheet = i
openxlsx::addWorksheet(wb, paste0(sheet,"e"))
openxlsx::addWorksheet(wb, paste0(sheet,"g"))
openxlsx::writeData(wb, sheet = paste0(sheet,"e"), x = list_est[[i]]$tidy)
openxlsx::writeData(wb, sheet = paste0(sheet,"g"), x = list_est[[i]]$glance)
}
## Save workbook to working directory
openxlsx::saveWorkbook(wb, file = paste0(self$file_name, ".xlsx"), overwrite = TRUE)
print("saving succeed")
},
set_file_name = function(file_name){
self$file_name = file_name
}
)
)
|
18044cf81f8957f0a9625603adde28f58486b379
|
b32f22ae43046016ad205f09f6d2fcde75394d8e
|
/R/io.R
|
c005d70622407e49271d185793d4e7b2490d4222
|
[] |
no_license
|
jrdnmdhl/StoreR
|
4c86818e55332974a65d445553f62944151ff843
|
04f6c7f500a9da35fa4b538adb737b965a42e744
|
refs/heads/master
| 2021-01-13T00:04:06.655876
| 2017-09-04T22:14:08
| 2017-09-04T22:14:08
| 49,844,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 664
|
r
|
io.R
|
read_name_from_lib <- function(libpath, name) {
readRDS(paste0(libpath, name, ".rstore"))
}
read_names_from_lib <- function(libpath, names) {
names %>%
setNames(., .) %>%
purrr::map(~read_name_from_lib(libpath, .))
}
write_name_to_lib <- function(libpath, name, value) {
saveRDS(value, paste0(libpath, name, ".rstore"))
}
write_names_to_lib <- function(libpath, list) {
purrr::walk2(names(list), list, ~write_name_to_lib(libpath, .x, .y))
}
delete_name_from_lib <- function(libpath, name) {
file.remove(libpath, name, ".rstore")
}
delete_names_from_lib <- function(libpath, names) {
purrr::map(names, ~delete_name_from_lib(libpath, .))
}
|
553d61be89bc3ebc97da1170a39209401f34b11d
|
4806763a1f25c39e5cd250f4d58e758981b125a7
|
/R/stream.r
|
0994948702c0f149cddc15fe87a8693a60969965
|
[
"MIT"
] |
permissive
|
frawalther/WatershedTools
|
a02d9abc680e1a9cb5cf0bcfdc908a3f48770f61
|
874f2c82938a3846df3461e6782fd51295e9dbdd
|
refs/heads/master
| 2023-03-25T06:03:23.162801
| 2020-08-03T17:05:57
| 2020-08-03T17:06:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,693
|
r
|
stream.r
|
#' Produce a DEM with basins filled in, as well as a flow direction raster
#'
#' @param dem Raster or character; a digital elevation model. If specified as a character, a layer with that name from the existing [GrassSession()] given by gs will be used.
#' @param gs An optional [GrassSession()]; if missing a new one will be created
#' @param filledDEM Optional character; if missing, a raster stack is returned, otherwise the filled DEM is written to the grass session with this name and the modified GrassSession is returned.
#' @param probs Optional character; as with filled DEM, for problem areas
#' @param file The file name of the raster to be returned (if no `outputName` is provided), see `details`.
#' @param ... Additional parameters to pass to [GrassSession()]
#'
#' @details This is a wrapper for [r.fill.dir](https://grass.osgeo.org/grass74/manuals/r.fill.dir.html)
#'
#' It is recommended to specify the `file` parameter (including the extension to specify
#' file format; e.g., .tif, .grd). If not specified, a temp file will be created and will be
#' lost at the end of the R session
#'
#' @return A [raster::stack], optionally written to `file` with 2 layers: the filled DEM and a list of problem areas (if outputName is missing), or a GrassSession otherwise
#' @export
fillDEM <- function(dem, gs, filledDEM, probs, file = NULL, ...)
{
if(missing(gs)) {
gs <- GrassSession(dem, layerName = "dem", ...)
dem <- "dem"
} else if(!is.character(dem)) {
gs <- GSAddRaster(dem, layerName = "dem", gs)
dem <- "dem"
}
filledDEMName <- if(missing(filledDEM)) "filledDEM" else filledDEM
probsName <- if(missing(probs)) "problem_areas" else probs
flowDirection <- "flow_direction"
rgrass7::execGRASS("r.fill.dir", flags=c("overwrite", "quiet"), input=dem,
output = filledDEMName, direction = flowDirection, areas = probsName)
gs <- GSAppendRasterName(c(probsName, filledDEMName), gs)
# if both missing, return raster stack; if only one return the other, if neither return session
if(missing(filledDEM) & missing(probs)) {
res <- GSGetRaster(c(filledDEMName, probsName), gs)
} else if(missing(filledDEM)) {
res <- GSGetRaster(filledDEMName, gs)
} else if(missing(probs)) {
res <- GSGetRaster(probsName, gs)
} else {
res <- gs
}
return(res)
}
#' Watershed analysis tools
#'
#' @param dem Raster or character; a digital elevation model, preferably one filled using [fillDEM()]. If specified as a character, a layer with that name from the existing [GrassSession()] given by gs will be used.
#' @param threshold Minimum size of an exterior watershed, in *cells*
#' @param gs An optional [GrassSession()]; if missing a new one will be created
#' @param outputName Optional character vector; if missing, a raster layer is returned, otherwise the raster is written to the grass session with this name and the modified GrassSession is returned.
#' @param file The file name of the raster to be returned (if no `outputName` is provided), see `details`.
#' @param ... Additional parameters to pass to [GrassSession()]
#' @details #' @details This is a wrapper for [r.watershed](https://grass.osgeo.org/grass74/manuals/r.watershed.html)
#'
#' It is recommended to specify the `file` parameter (including the extension to specify
#' file format; e.g., .tif, .grd). If not specified, a temp file will be created and will be
#' lost at the end of the R session
#'
#' @return A [raster::stack()] with two layers, flow accumulation ('accumulation') and drainage
#' direction ('drainage') (if outputName is missing), or a GrassSession otherwise.
#' Drainage direction values can be negative (indicating water flowing out of the map region),
#' zero (indicating a portion of a natural depression), or positive. Positive values are
#' measured counterclockwise from northeast; 1 is northeast, 2 north,, etc through 8 flowing #' due east.
#' @export
drainageAccumulation <- function(dem, threshold = 250, gs, accumulation, drainage,
file = NULL, ...)
{
if(missing(gs)) {
gs <- GrassSession(dem, layerName = "dem", ...)
dem <- "dem"
} else if(!is.character(dem)) {
gs <- GSAddRaster(dem, layerName = "dem", gs)
dem <- "dem"
}
accumulationName <- if(missing(accumulation)) "accumulation" else accumulation
drainageName <- if(missing(drainage)) "drainage" else drainage
rgrass7::execGRASS("r.watershed", flags=c("overwrite", "quiet"), elevation = dem,
accumulation = accumulationName, drainage = drainageName)
gs <- GSAppendRasterName(c(accumulationName, drainageName), gs)
# if both missing, return raster stack; if only one return the other, if neither return session
if(missing(accumulation) & missing(drainage)) {
res <- GSGetRaster(c(accumulationName, drainageName), gs)
} else if(missing(accumulation)) {
res <- GSGetRaster(accumulationName, gs)
} else if(missing(drainage)) {
res <- GSGetRaster(drainageName, gs)
} else {
res <- gs
}
return(res)
}
#' Produce a map of a stream network
#'
#' @param dem Raster or character; a digital elevation model, preferably one filled using [fillDEM()]. If specified as a character, a layer with that name from the existing [GrassSession()] given by gs will be used.
#' @param accumulation Raster or character; flow accumulation later; see details.
#' @param threshold Accumulation threshold; optional, see details
#' @param qthresh Quantile threshold to use if `threshold` is missing
#' @param weights A raster layer specifying the weights to apply to flow accumulation before comparing to `threshold`.
#' @param gs An optional [GrassSession()]; if missing a new one will be created
#' @param outputName Optional character; if missing, a raster layer is returned, otherwise the raster is written to the grass session with this name and the modified GrassSession is returned.
#' @param file The file name of the raster to be returned (if no `outputName` is provided), see `details`.
#' @param ... Additional parameters to pass to [GrassSession()] or r.stream.extract
#' @details A flow accumulation raster can be provided (as a rasterLayer or as the name of a raster in an existing Grass Session). If not provided, it will be computed.
#'
#' If no threshold is used, then one will be set automatically using the quantiles of the accumulation raster
#'
#' This is a wrapper for [r.stream.extract](https://grass.osgeo.org/grass74/manuals/r.stream.extract.html)
#'
#' It is recommended to specify the `file` parameter (including the extension to specify
#' file format; e.g., .tif, .grd). If not specified, a temp file will be created and will be
#' lost at the end of the R session
#' @return If `outputName` is missing and `type=='raster'`; a [raster::raster]; if
#' `outputName` is missing and `type=='vector'`; a [sp::SpatialLinesDataFrame];
#' otherwuse a `GrassSession`
#' @export
extractStream <- function(dem, accumulation, threshold, qthresh = 0.95, weights, gs,
outputName, type = c('raster', 'vector', 'both'), file = NULL, ...)
{
type <- match.arg(type)
if((is.character(dem) | is.character(accumulation)) & missing(gs))
stop("If gs is missing, dem and accumulation must be a Raster or SpatialPixelsDataFrame")
if(missing(gs)) {
gs <- GrassSession(dem, layerName = "dem", ...)
dem <- "dem"
} else if(!is.character(dem)) {
gs <- GSAddRaster(dem, layerName = "dem", gs)
dem <- "dem"
}
if(!is.character(accumulation)) {
if(!missing(weights))
accumulation <- accumulation * weights
gs <- GSAddRaster(accumulation, layerName = "accumulation", gs)
if(missing(threshold))
accTh <- raster::values(accumulation)
accumulation <- "accumulation"
} else if(!missing(weights)) {
accu <- GSGetRaster(accumulation, gs)
accu <- accu * weights
accumulation <- paste0(accumulation, "_weighted")
gs <- GSAddRaster(accu, accumulation, gs)
}
if(missing(threshold)) {
if(!exists("accTh"))
accTh <- raster::values(GSGetRaster(accumulation, gs))
accTh[accTh < 0] <- NA
threshold <- quantile(accTh, qthresh, na.rm=TRUE)
}
streamName <- if(missing(outputName)) "streams" else outputName
rgrass7::execGRASS("r.stream.extract", flags=c("overwrite", "quiet"), elevation=dem,
accumulation = accumulation, threshold = threshold, stream_raster = streamName, stream_vector = streamName)
gs <- GSAppendRasterName(streamName, gs = gs)
if(!missing(outputName)) {
res <- gs
} else if(type == 'raster') {
res <- GSGetRaster(streamName, gs, file = file)
} else if(type == 'vector') {
res <- rgrass7::readVECT(streamName, ignore.stderr=TRUE, type="line")
} else {
res <- list(raster = GSGetRaster(streamName, gs, file = file),
vector = rgrass7::readVECT(streamName, ignore.stderr=TRUE, type="line"))
}
gs <- GSClean(streamName, gs, "vector")
return(res)
}
#' Snap points to a stream network raster
#'
#' @param x SpatialPoints or similar object
#' @param stream Raster or character; a stream network raster (e.g., from [extractStream()]).
#' If specified as a character, a layer with that name from the existing [GrassSession()]
#' given by gs will be used.
#' @param buff The distance (in meters if x is in geographic coordinates, map units otherwise) to restrict the search from points in x
#' @details If buff is too small, a nearby stream may not be found, in which case the original coordinates are returned
#' @return A SpatialPointsDataFrame, with the attributes from `x` and new coordinates
#' @export
snapToStream <- function(x, stream, buff)
{
if(grepl("longlat", sp::proj4string(x))) {
warning("x has geographic coordinates; reprojecting to epsg:32632")
projOrig <- sp::proj4string(x)
x <- sp::spTransform(x, sp::CRS("+init=epsg:32632"))
stream <- raster::projectRaster(stream, crs = sp::proj4string(x))
}
newCoords <- t(sapply(1:length(x), function(i) findClosest(x[i,], stream, buff = buff)))
result <- raster::as.data.frame(x)
result <- cbind(newCoords, result)
sp::coordinates(result) <- c(1,2)
sp::proj4string(result) <- sp::proj4string(x)
if(exists("projOrig"))
result <- sp::spTransform(result, projOrig)
return(result)
}
#' Find the closest non NA point in a raster
#' @param x a spatial object
#' @param y a raster
#' @param buff The distance (in map units) to restrict the search from points in x
#' @details Finds the points in y that are closest to x
#' @return The index in y of the closest point
#' @keywords internal
findClosest <- function(x, y, buff)
{
if(length(x) > 1)
stop("findClosest is not vectorised at this time")
smRas <- CropPtBuff(x, y, buff)
xc <- sp::coordinates(x)
inds <- which(!is.na(raster::values(smRas)))
if(length(inds) == 0)
return(xc)
yc <- sp::coordinates(smRas)[inds,,drop=FALSE]
xy <- rbind(xc,yc)
dists <- dist(xy)[1:nrow(yc)]
yc[which.min(dists),,drop=FALSE]
}
#' Crop raster around a point
#' @param pt a spatial point
#' @param ras a raster
#' @param buff distance to crop around point, in map units
#' @return Cropped raster
#' @keywords internal
CropPtBuff <- function(pt, ras, buff)
{
xc <- sp::coordinates(pt)
newextent <- raster::extent(xc[1] - buff, xc[1] + buff, xc[2] - buff, xc[2] + buff)
raster::crop(ras, newextent)
}
#' Compute strahler order for pixel x
#' @param ws A watershed
#' @param x A pixel id
#' @param streamOrder Vector of streamOrders already computed (should be NA if not done yet)
#' @return A matrix, giving pixel IDs in the first column and computed stream order in the second
#' @keywords internal
doStrahler <- function(ws, x, streamOrder) {
upIDs <- which(ws$adjacency[x,] == 1)
if(length(upIDs) < 1) {
val <- 1
} else {
upOrders <- streamOrder[upIDs]
if(any(is.na(upOrders))) {
val <- NA
} else if(sum(upOrders == max(upOrders)) == 1) {
val <- max(upOrders)
} else
val <- max(upOrders) + 1
}
pixes <- ws$data$id[ws$data$reachID == ws$data$reachID[x]]
cbind(pixes, rep(val, length(pixes)))
}
#' Compute strahler stream order
#'
#' @param ws A watershed
#' @param parallel If TRUE, will compute in parallel to speed things up. Defaults to TRUE
#' if parallel package is installed
#' @details Computes Strahler stream order
#' @return a vector, with one element per pixel in the watershed.
strahler <- function(ws, parallel = TRUE) {
if(parallel && requireNamespace("parallel")) {
FUN <- parallel::mclapply
} else
FUN <- lapply
streamOrder <- rep(NA, length=nrow(ws$data))
pix <- c(headwaters(ws)$id, confluences(ws)$id)
while(any(is.na(streamOrder))) {
ord <- FUN(pix, function(x) doStrahler(ws, x, streamOrder))
ord <- do.call(rbind, ord)
streamOrder[ord[,1]] <- ord[,2]
prev <- pix
pix <- pix[is.na(streamOrder[pix])]
if(length(pix) == length(prev))
stop("No progress being made, check topology")
}
streamOrder
}
|
fafe731be9f066efa6c6b507c634ccb33a0b107f
|
e801994a21c2dbb5fd290caae49b1edd45f029c1
|
/Genotyping/Rscripts/combined_concordance.R
|
217bb0410a686e4e4d86ce06b8a4b4654cec3c0e
|
[] |
no_license
|
GreallyLab/Rastogi_et_al_2019
|
a0fca08eafa1a1d6b82599d07c3a3ce6be1c8055
|
00560da44e170ec5112afa17f7de67c94395ccff
|
refs/heads/master
| 2020-05-18T14:18:06.011088
| 2020-03-17T18:51:57
| 2020-03-17T18:51:57
| 184,466,080
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 517
|
r
|
combined_concordance.R
|
# Looking at the concordance of ZCall and Genome Studio using the calibrate thresholds
# load options
options(scipen = 999, stringsAsFactors = FALSE)
concord_list <- data.frame()
for (i in 3:15) {
temp_list <- read.table(paste("Combined_zcall_noMM_noNA_NCfil.concordance.stats.",
i,".txt", sep = ""), sep=":", skip =3)
concord_list<- rbind(concord_list,temp_list[1,])
}
concord_list <- cbind(seq(3,15),concord_list)
concord_list
concord_list[which.max(concord_list$V2),1] # 6
|
2d11bc51807fe1c1a4cf44c6639c79569c270fbd
|
28ead05c25ebff34a03495c8c315983e068d49d9
|
/R/RMSSD.R
|
567044727b4a83845f2c764dcfd719a414769046
|
[
"MIT"
] |
permissive
|
seanchrismurphy/relativeVariability
|
7e624c6505601d5094b53e96a1fb27591caaf60e
|
3a5e84a2b46ebb622d2a3a15008f396a88e7a0e0
|
refs/heads/master
| 2020-04-07T06:31:17.041941
| 2018-11-19T00:05:30
| 2018-11-19T00:05:30
| 158,138,996
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 66
|
r
|
RMSSD.R
|
RMSSD <-
function(X) {
v <- sqrt(MSSD(X))
RMSSD<-v
}
|
3797512301c2baad8a677bcfeb9e7bb1a6787d10
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/effsize/examples/cohen.d.Rd.R
|
f132a4e62085324c4c185d49f10803db55ab16d1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
cohen.d.Rd.R
|
library(effsize)
### Name: cohen.d
### Title: Cohen's d and Hedges g effect size
### Aliases: cohen.d cohen.d.default cohen.d.formula
### Keywords: effect size Hedges Cohen
### ** Examples
treatment = rnorm(100,mean=10)
control = rnorm(100,mean=12)
d = (c(treatment,control))
f = rep(c("Treatment","Control"),each=100)
## compute Cohen's d
## treatment and control
cohen.d(treatment,control)
## data and factor
cohen.d(d,f)
## formula interface
cohen.d(d ~ f)
## compute Hedges' g
cohen.d(d,f,hedges.correction=TRUE)
|
8105bc2257bf44409868152ac0c9a1925f9ff4b4
|
8dc96b914bc558c52e320b6d9debf0a6458d4504
|
/man/animate_vadt.Rd
|
71d87b8c9e72e640aef4a479ac4a555439f38c4c
|
[] |
no_license
|
cddesja/vat
|
c77e49a1d7fe8c5213f1720516051525dfba6814
|
24fa6f970b6f7984df29db7197debce364a717e8
|
refs/heads/master
| 2021-01-21T05:02:30.095034
| 2019-01-30T16:55:29
| 2019-01-30T16:55:29
| 31,027,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,327
|
rd
|
animate_vadt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/animate_vadt.R
\name{animate_vadt}
\alias{animate_vadt}
\title{Animated Atlantis plots for vadt}
\usage{
animate_vadt(bgm, bmass, interval = 0.3, codes, savedir)
}
\arguments{
\item{bgm}{An Atlantis bgm file}
\item{bmass}{BoxBiomass.txt file from Atlantis out}
\item{interval}{Speed of animination (unit in seconds)}
\item{codes}{Vector of containing all the Atlantis functional group codes needed to create GIFs}
\item{savedir}{Where should the gifs be saved?}
}
\description{
Create animated Atlantis plots of all the tracers
}
\details{
This function will create GIFs of all Atlantis tracers by default. WARNING: This can take a while and is not necessary to use \code{vadt}. It is probably much, much better to specify certain groups! This function depends on having the unix command convert installed locally. Convert is part of imagemagick and can be downloaded here: http://www.imagemagick.org/.
}
\examples{
\dontrun{
bgm <- "/path/to/atlantis.bgm"
bmass <- "/path/to/outputBoxBiomass.txt"
codes <- read.csv("functionalGroup.csv", header = T, stringsAsFactors = FALSE)
savedir <- "/home/chris/"
animate_vadt(bgm = bgm, bmass = bmass, codes = codes$Code, savedir = savedir)
}
}
\seealso{
\code{\link{create_vadt}}, \code{\link{vadt}}
}
|
dedc098305aebfccf5d9fc022ffb1c94d8aab2d5
|
8c11b33e3164a61f647611845c433f3d7779ec65
|
/R_code.R
|
53c55f653722bac09abc7d86f6c848afc03510da
|
[] |
no_license
|
iamyumang/Santander-customer-transaction
|
c2a52c5849c6fe8201b203070d3f758c17d124e4
|
47e84fd6088f4a1decac534ffb3a7f34e8fedc50
|
refs/heads/master
| 2020-09-11T14:44:13.728767
| 2019-11-16T13:12:11
| 2019-11-16T13:12:11
| 222,100,664
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,831
|
r
|
R_code.R
|
x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest", "unbalanced", "C50", "dummies", "e1071", "Information",
"MASS", "rpart", "gbm", "ROSE", 'sampling', 'DataCombine', 'inTrees', "plyr","dplyr", "rpart", "usdm", "DataCombine", "sp", "raster", "usdm")
install.packages(x)
lapply(x, require, character.only = TRUE)
install.packages("party")
library(party)
library(data.table)
library(dplyr)
library(lightgbm)
rm(list = ls())
getwd()
train = read.csv("train_san.csv", header = TRUE, sep = ",")
df_test = read.csv("test_san.csv", header = TRUE, sep = ",")
class(train$target)
sum(is.na(train))
train$target = as.factor(train$target)
class(train$target)
summary(train)
head(train)
dim(train)
str(train)
names(train)
###########################Check for missing values###########################
sum(is.na(train))
table(train$target)
#after extracting the unique values in target column, we can clearly say that the data is imbalanced
length(which(train$target == 1))/length(train$target) *100
# 10.049% of target values is equal to 1
par(mar=c(1,1,1,1))
par(mfrow=c(2,2))
for (col in 3:ncol(train)) {
hist(train[,col])
}
for (col in 3:ncol(train)) {
boxplot(train[,col])
}
df_train = copy(train)
df_train$ID_code = NULL
class(df_train$target)
for(i in 2:ncol(df_train)){
if(class(df_train[,i]) == 'factor'){
df_train[,i] = factor(df_train[,i], labels=(2:length(levels(factor(df_train[,i])))))
}
}
############################################Outlier Analysis#############################################
# ## BoxPlots - Distribution and Outlier Check
numeric_index = sapply(df_train,is.numeric) #selecting only numeric
numeric_data = df_train[,numeric_index]
cnames = colnames(numeric_data)
# #Remove outliers using boxplot method
#df = train
#train = df
fun = function(x){
quantiles = quantile( x, c(.05, .95 ) )
x[ x < quantiles[1] ] = quantiles[1]
x[ x > quantiles[2] ] = quantiles[2]
x
}
df_train[,cnames] = lapply(df_train[,cnames], fun)
for (col in 2:ncol(df_train)) {
boxplot(df_train[,col])
}
sum(is.na(df_train))
df_train_final = copy(df_train)
## Correlation Plot
#corrgram(df_train[,numeric_index], order = F,
# upper.panel=panel.pie, text.panel=panel.txt, main = "Correlation Plot")
#correlation = cor(df_train[,c(2:201)])
#class(correlation)
aov(df_train$target~df_train[,cnames])
# for all categorical variables
formula = as.formula(paste0("cbind(", paste(names(df_train)[-1], collapse = ","), ") ~ target"))
Anova_test_result = aov(formula, data=df_train)
summary(Anova_test_result)
class(Anova_test_result)
#since the dataset is imbalance, we have to do oversampling or undersampling to overcome this problem
set.seed(1234)
d_train.index = createDataPartition(df_train$target, p = .75, list = FALSE)
d_train = df_train[ d_train.index,]
test = df_train[-d_train.index,]
table(d_train$target)
over = ovun.sample(target~., data= d_train, method = "over", N = 269854)$data
table(over$target)
under = ovun.sample(target~., data= d_train, method = "under", N = 30148)$data
table(under$target)
#*******************************predict using Logistic Regression(for upsampling and downsamplng both)********************
LR_under = glm(target ~.,data = under, family = binomial)
predict_LR_under = predict(LR_under, test, type="response")
probabilities = LR_under %>% predict(test, type = "response")
predict_LR_under = ifelse(probabilities > 0.5, "1", "0")
predict_LR_under = as.factor(predict_LR_under)
confusionMatrix(predict_LR_under, test$target,positive = "1")
#---------------------------------------------------------------------------------
LR_over = glm(target ~.,data = over, family = binomial)
predict_LR_over = predict(LR_over, test, type="response")
probabilities = LR_over %>% predict(test, type = "response")
predict_LR_over = ifelse(probabilities > 0.5, "1", "0")
predict_LR_over = as.factor(predict_LR_over)
confusionMatrix(predict_LR_over, test$target,positive = "1")
#*******************************predict using decision tree(for upsampling and downsamplng both)********************
DT_under = ctree(target~., data = under)
confusionMatrix(predict(DT_under,test), test$target,positive = "1")
#--------------------------------------------------------------------------------
#DT_over = ctree(target ~ . , data = over)
#confusionMatrix(predict(DT_over,test), test$target,positive = "1")
#*******************************predict using random forest(for upsampling and downsamplng both)********************
rf_under = randomForest(target~. , data = under , importance = TRUE , ntree = 320)
confusionMatrix(predict(rf_under,test), test$target,positive = "1")
#---------------------------------------------------------------------------------
#rf_over = randomForest(target~. , data = over , importance = TRUE , ntree = 300)
#confusionMatrix(predict(rf_over,test), test$target,positive = "1")
#**************************************************************************************
#*************************************************************************************************
#***********************manipulating test data for prediction****************************************************
getwd()
df_test1 = copy(df_test)
df_test1$ID_code = NULL
head(df_test1)
###############################predict for test data#######################################
predict_LR_test = predict(LR_over, df_test1, type="response")
probabilities_test = LR_over %>% predict(df_test1, type = "response")
predict_LR_test = ifelse(probabilities_test > 0.5, "1", "0")
df_test1$predicted_target = predict_LR_test
test_final = cbind(df_test, df_test1$predicted_target)
head(test_final)
table(test_final$`df_test1$predicted_target`)
#write.csv(test_final,file = file.choose(new = T))
|
0dfad0691d968b96426d95bc10c5c21f6702f615
|
90e11c8ac3dc6dc090140843504c12ba6273ebdd
|
/Calculate_Collaboration.R
|
6c2299f112e120566b5e9c0150a7ffb295e90f9b
|
[] |
no_license
|
ckummer/mw-collaboration
|
334323c6333c364cf449d4319655eb2c84f66730
|
e82a3887e5d4ffd337bde7fa6e722b664da0c430
|
refs/heads/master
| 2021-01-18T10:37:44.179782
| 2018-01-17T16:20:54
| 2018-01-17T16:20:54
| 21,331,412
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 23,482
|
r
|
Calculate_Collaboration.R
|
#Dieses Skript dient der Messung von Kollaboration in Wikis.
#Es ist Teil der Diplomarbeit "Learning Analytics zur Messung von Kollaboration in Wikis" von Stefan Jaeschke.
#Technische Universitรคt Dresden - Wirtschaftsinformatik - Lehrstuhl fรผr Informationsmanagement
#2013
#Sollten die benรถtigten Packages fรผr die Analyse noch nicht installiert sein, so kann dies durch Entfernen der Kommentarzeichen und einmaligem Ausfรผhren der folgenden Zeilen erledigt werden.
#Nach der Installation sind die Zeilen wieder auskommentiert werden.
#install.packages("RMySQL")
#install.packages("QCA")
#install.packages("igraph")
#Laden des Paketes RMySQL - Dieses wird benรถtigt, um Datenbankverbindung zu MySQL-Datenbank aufzubauen.
library(RMySQL)
library(QCA)
library(igraph)
#Das Skript ist in der Standardversion dafรผr ausgelegt, stรผndlich in einem Batch Job ausgefรผhrt zu werden.
#Wenn Sie planen das R-Srkipt in einem Abstand von weniger als einer Stunde ausfรผhren zu lassen, so passen Sie bitte die folgende Variable an.
#In diesem Fall muss der String um "-%M" ergรคnzt werden.
timeformat <- "%Y-%m-%d-%H"
#Bitte legen Sie an dieser Stelle den Pfad fest, unter welchem die Abbildungen der sozialen Netzwerke gespeichert werden sollen.
image_path <- "/path_to_wiki/images/collaboration_plots/"
#Die einzelnen Parameter der Kalibrierung lassen sich beliebig modifizieren.
#Bitte passen Sie die nachfolgenden Variablen entsprechend Ihren Bedรผrfnissen an.
#Hier erfolgt die Festlegung der Parameter der Kalibrierung
#Transformational Assignment fรผr die Variablen CENT, DEG und DIS
#Es mรผssen die Werte fรผr Full-Membership (1), der Crossover-Punkt (0.5) und fรผr Non-Mempership (0) angegeben werden.
#Parameter der Kalibrierung der Zentralisierung (CENT)
cent_full_membership <- 0
cent_crossover_point <- 0.1
cent_non_membership <- 1
#Parameter der Kalibrierung des Median Weighted Degree (DEG)
deg_full_membership <- 10
deg_crossover_point <- 7
deg_non_membership <- 5
#Parameter der Kalibrierung der Discussion-Posts (DIS)
dis_full_membership <- 30
dis_crossover_point <- 18
dis_non_membership <- 6
#Fรผr die Parameter der Kalibrierung von DEN wird die Form des Direct Assignments verwendet.
#Hierfรผr mรผssen unterschiedliche Grenzwerte angegeben werden.
#Die Berechnung erfolgt weiter unten im Quelltext.
den_threshold1 <- 0.6
den_threshold2 <- 1
#Zu jedem Grenzwert von DEN gehรถrt ein Wert welcher DEN zugewiesen wird, wenn DEN zu einer bestimmten Stufe gehรถrt.
den1_calibrate_to <- 0.1
den2_calibrate_to <- 0.7
den3_calibrate_to <- 1
#Herstellen der Datenbankverbindung
#Die Verbindung ist an die jeweilige Datenbankkonfiguration anzupassen.
#User: Datenbanknutzer
#Password: Passwort des Datenbanknutzers
#Dbname: Datenbank, in welcher sich sรคmtliche Tabellen von MediaWiki befinden.
con <- dbConnect(MySQL(), user="", password="", dbname="", host="localhost" )
#SQL-Statement, um die definierten Gruppennummern abzufragen.
sql_groups <- paste ("SELECT DISTINCT user_wiki_group FROM `user` WHERE user_wiki_group IS NOT NULL ORDER BY user_wiki_group ASC")
#Das definierte SQL-Statement zur Abfrage vorhandener Gruppen wird hier mit der Datenbankverbindung an die Datenbank gesendet.
#Das Ergebnis der Abfrage wird auf den DataFrame group_numbers geschrieben.
group_numbers <- dbGetQuery(con, sql_groups)
group_edges <- list()
#รberprรผfung, ob in der Datenbank mindestens eine Gruppe vorhanden ist.
if(nrow(group_numbers)!=0){
#Die nachfolgende for-Schleife dient der Berechnung aller Kennzahlen und der Feststellung, ob Kollaboration in einer Gruppe vorhanden ist.
#Die Berechnung wird fรผr alle vergebenen Gruppennummern ausgefรผhrt.
#Zusรคtzlich wird eine Liste erstellt, in der die Ergebnisse fรผr jede Gruppe gespeichert werden
for ( pa in 1:nrow(group_numbers) ) {
#SQL-Statement, das fรผr die Gruppe mit der Nummer, welche der aktuellen Laufvariablen entspricht, die Daten aus der Datenbank ausliest.
sql_data_group <- paste ("SELECT distinct rev_page, rev_user FROM revision,page,user WHERE rev_page = page_id and rev_user = user_id and page_namespace in (1, 3,5,7,9,11,13,15) and rev_user > 0 and user_inactivity IS NULL and user_wiki_group=",group_numbers[pa,],sep="")
#Das definierte SQL-Statement zur Abfrage der Gruppendaten wird hier mit der Datenbankverbindung an die Datenbank gesendet.
#Ergebnis der Abfrage wird auf den DataFrame group_data geschrieben.
group_data <- dbGetQuery(con, sql_data_group)
#Die Vertices werden aus den Gruppendaten extrahiert.
#Die Matrix mit den Vertices wird um Spalte erweitert, welche neue die User_ID enthรคlt.
#Die erweiterte Matrix mit Vertices dient spรคter als Mapping-Tabelle.
if(nrow(group_data)!=0){
sql_vertices <- paste ("SELECT distinct rev_user, CONVERT(rev_user_text USING utf8) as rev_user_text FROM revision,user WHERE rev_user = user_id and rev_user > 0 and user_inactivity IS NULL and user_wiki_group=",group_numbers[pa,],sep="")
vertices_data <- dbGetQuery(con, sql_vertices)
vertices_data <- cbind(vertices_data, 1:nrow(vertices_data))
}else{
#Sollten in der Revisionstabelle keine Eintrรคge vorliegen, so werden Informationen aus der Usertabelle gezogen.
sql_vertices <- paste ("SELECT distinct user_id, CONVERT(user_name USING utf8) as user_name, CONVERT(user_real_name USING utf8) as user_real_name from user where user_inactivity IS NULL and user_wiki_group=",group_numbers[pa,],sep="")
vertices_data <- dbGetQuery(con, sql_vertices)
vertices_data <- cbind(vertices_data, 1:nrow(vertices_data))
}
#Abfrage, ob Revisionsdaten ermittelt werden konnten.
if(nrow(group_data)!=0){
#Berechnung der Edges
#Vor einem Eintrag in die Zielmatrix erfolgt das Mapping der User IDs. Das ist nรถtig, da sonst nachfolgend Graphen nicht korrekt erstellt werden.
#Es wรผrde sonst nach der hรถchsten User ID geschaut werden und nachfolgend werden so viele Vertices erstellt, wie die hรถchste User ID lautet.
#Nach dem Betrachten einer Zeile in pages[,] werden alle Zeilen, welche die gleiche Seitennummer beinhalten, ermittelt und auf einen Counter geschrieben.
#Die einzelnen User, welche auf die Seite zugegriffen haben werden anschlieรยend miteinander in Verbindung gesetzt.
#Damit die Verbindungen nicht mehrmals berechnet werden, erfolgt nach der ersten Berechnung die Vergabe des Status 'processed'.
pages <- group_data[,1:2]
edges <- matrix(ncol=2)
for ( i in 1:length(pages[,1])){
counter <- which(pages[i,1] == pages[,1])
if(pages[i,1]!= "processed"){
if (length(counter) > 1 ) {
for (j in 1:(length(counter)-1)) {
for (k in (j+1):length(counter)){
#User kรถnnen nicht mit sich selbst verbunden werden.
if(pages[counter[j],2] != pages[counter[k],2]){
#Mapping der ermittelten User.
mapping1 <- which (pages[counter[j],2] == vertices_data[,1])
mapping2 <- which (pages[counter[k],2] == vertices_data[,1])
#Sortierung der ermittelten User, um spรคter die Verbindungen aggregieren zu kรถnnen. Dies ist mรถglich, da der Graph undirected ist.
if(mapping1 > mapping2){
edges <- rbind(edges, c(vertices_data[mapping1,3],vertices_data[mapping2,3]))
} else{
edges <- rbind(edges, c(vertices_data[mapping2,3],vertices_data[mapping1,3]))
}
}
}
}
}
}
#Vergabe des Status 'processed' fรผr alle Zeilen, welche die betrachtete Seite beinhalten.
for(once in 1:length(counter)){
pages[counter[once],1] <- "processed"
}
}
#SQL-Abfrage, um die Daten der Gruppe mit Zeitstempel und Parent_ID abzufragen.
#Die Daten werden nach der Page ID und der Parent ID aufsteigend angeordnet.
sql_data_dis1 <- paste(group_numbers[pa,]," ORDER BY rev_page, rev_parent_id ASC",sep="")
sql_data_dis <- paste ("SELECT distinct rev_page, rev_user, rev_timestamp, rev_parent_id FROM revision,page,user WHERE rev_page = page_id and rev_user = user_id and page_namespace in (1, 3,5,7,9,11,13,15) and rev_user > 0 and user_inactivity IS NULL and user_wiki_group=",sql_data_dis1,sep="")
#Die Abfrage fรผr die erweiterten Gruppendaten wird an die Datenbank gesendet und das Ergebnis auf einen DataFrame geschrieben.
data_discussion <- dbGetQuery(con, sql_data_dis)
#Auswahl der ersten beiden Spalten und Erstellen einer neuen leeren Matrix.
pages_dis <- data_discussion[,1:2]
edges_dis <- matrix(ncol=2)
#Hier werden die Discussion-Posts fรผr jede einzelne Seite ermittelt. Dafรผr wird geprรผft, ob aufeinander folgende Discussion-Posts von unterschiedlichen Usern verfasst wurden.
#Nach dem Betrachten einer Zeile in pages[,] werden alle Zeilen, welche die gleiche Seitennummer beinhalten, ermittelt und auf einen Counter geschrieben.
#Damit die Discussion-Posts nicht mehrmals berechnet werden, erfolgt nach der ersten Berechnung die Vergabe des Status 'processed'.
for ( x in 1:length(pages_dis[,1])){
counter_dis <- which(pages_dis[x,1] == pages_dis[,1])
if(pages_dis[x,1]!= "processed"){
if (length(counter_dis) > 1 ) {
for (y in 1:(length(counter_dis)-1)) {
#Prรผfen ob User in aufeinander folgenden Posts unterschiedlich sind
if(pages_dis[counter_dis[y],2] != pages_dis[counter_dis[y+1],2]){
mapping1 <- which (pages_dis[counter_dis[y],2] == vertices_data[,1])
mapping2 <- which (pages_dis[counter_dis[y+1],2] == vertices_data[,1])
if(mapping1 > mapping2){
edges_dis <- rbind(edges_dis, c(vertices_data[mapping1,3],vertices_data[mapping2,3]))
} else{
edges_dis <- rbind(edges_dis, c(vertices_data[mapping2,3],vertices_data[mapping1,3]))
}
}
}
}
}
#Vergabe des Status 'processed' fรผr alle Zeilen, welche die betrachtete Seite beinhalten.
for(once_dis in 1:length(counter_dis)){
pages_dis[counter_dis[once_dis],1] <- "processed"
}
}
#Erste Zeile wird nicht mit รผbertragen, da diese NA-Werte enthรคlt.
edges_dis <- edges_dis[-1,]
#Umwandeln des Dataframes in eine Matrix.
edges_dis <- matrix(edges_dis, ncol=2)
if(nrow(edges_dis)!=0){
#Umwandeln des Dataframes in eine Matrix.
edges_dis <- matrix(edges_dis, ncol=2)
#Zusammenfรผhren der berechneten Verbindungen und Discussion-Posts in einer gemeinsamen Matrix.
for(z in 1:nrow(edges_dis)){
edges <- rbind(edges, c(edges_dis[z,1],edges_dis[z,2]))
}
#Hier edges in Liste schreiben fรผr jede Gruppe
group_edges[[pa]] <- edges[-1,]
#Initialisierung des Netzwerkes und Hinzufรผgen der berechneten Edges.
#Das Netzwerk ist undirected.
network <- graph.edgelist(matrix(group_edges[[pa]], ncol=2), directed=FALSE)
#Edgelist erneut aus dem eben erstellten Netzwerk abfragen.
edgelist <- get.edgelist(network,names=TRUE)
#รberprรผfung, ob die abgefragte Edgelist leer ist und damit keine Verbindungen zwischen den Vertices besteht.
if (nrow(edgelist)!=0) {
#Die Edgelist wird in einen DataFrame umgewandelt.
edgesdf <- as.data.frame(edgelist)
#Hinzufรผgen einer Spalte Weight zum DataFrame.
#Allen Zeilen wird das Gewicht 1 zugewiesen.
edgesdf$Weight <- 1
#Die Gewichte werden รผber die Aggegate-Funktion aggregiert und fรผr jedes vorhandene Paar an Werten summiert.
edgesdf <- aggregate( edgesdf["Weight"], list(V1=edgesdf$V1,V2=edgesdf$V2), FUN=sum )
#Der DataFrame edgesdf mit den aggregierten Daten wird wieder in eine Matrix umgewandelt.
edgesmatrix <- as.matrix(edgesdf)
#Der Graph wird auf Grundlage der neuen Edges erneut erstellt.
network <- graph.edgelist(matrix(edgesmatrix[,-3], ncol=2), directed=FALSE)
#Sollte es User geben, welche nicht mit ihrer ID in der Edgelist auftauchen und eine User ID haben, welche hรถher ist als hรถchste in der Edgelist verwendete, so werden fรผr diese User hier Vertices hinzugefรผgt.
network <- add.vertices(network, (nrow(vertices_data) - vcount(network)))
#Hinzufรผgen der Gewichte zum Graphen
E(network)$weight=as.numeric(edgesmatrix[,3])
#Hinzufรผgen der richten Namen der Gruppenmitglieder zum Graphen.
V(network)$name <- vertices_data[,2]
} else{
#Dieser Teil wird aufgerufen, wenn keine Verbindungen zwischen den einzelnen Gruppenmitgliedern ermittelt wurde.
#Erstellen eines leeren Graphen. Die Anzahl der Vertices im Graphen ist die Anzahl der Gruppenmitglieder.
#Der Graph ist undirected.
network <- graph.empty(n=nrow(vertices_data),directed=FALSE)
#Sollte es User geben, welche nicht mit ihrer ID in der Edgelist auftauchen und eine User ID haben, welche hรถher ist als hรถchste in der Edgelist verwendete, so werden fรผr diese User hier Vertices hinzugefรผgt.
network <- add.vertices(network, (nrow(vertices_data) - vcount(network)))
#Hinzufรผgen der richten Namen der Gruppenmitglieder zum Graphen.
V(network)$name <- vertices_data[,2]
}
#Grafische Darstellung des Netzwerkes der Gruppe
#Aus diesem Grund muss der Pfad zum Bilderordner fรผr MediaWiki angegeben werden.
#Es wird fรผr jede Gruppe ein Bild des aktuellen Netzwerkes fรผr jede Stunde gespeichert.
#Bild wird mit Timestamp und Gruppennummer unter angegebenen Pfad abgespeichert.
time <- format(Sys.time(), timeformat)
full_path <- paste (image_path,time,"-Gruppe-",group_numbers[pa,],".png",sep="")
#Festlegen des Layouts fรผr das Netzwerk
#Das Layout wird automatisch ermittelt. Es ist dabei abhรคngig von der Anzahl der Vertices. Dadurch wird eine optimale Darstellung gewรคhrleistet.
layout_test <- layout.auto(network)
png(filename=full_path, width=800, height=600)
plot(network, layout=layout_test, edge.width=E(network)$weight/4, vertex.label.dist=0.6, vertex.label.font=1, vertex.label.cex=1.5,vertex.size=7, vertex.color="red", edge.label=as.numeric(edgesmatrix[,3]-1), edge.label.cex=1.5)
dev.off()
#Es folgt die Berechnung der einzelnen Kennzahlen, welche zur Bestimmung von vorhandener Kollaboration benรถtigt werden.
#Berechnung der Dichte (Density (DEN)) des Netzwerkes
den <- graph.density(network)
#Berechnung der Zentralisierung (centralization (CENT))
cent <- centralization.degree(network)$centralization
#Berechnung der Diskussionsposts (Discussion-Posts(DIS))
dis <- nrow(matrix(edges_dis, ncol=2))
#Berechnung des Median Weighted Degrees (DEG)
degree <- list()
for(m in 1:nrow(vertices_data)) {
degree[m] <- length(which(matrix(edges_dis, ncol=2)==vertices_data[m,3]))
}
deg <- sum(unlist(degree))/length(degree)
#An dieser Stelle erfolgt die Kalibrierung der berechneten Kennzahlen.
#Kalibrierung der Zentralisierung (CENT)
calibrated_cent <- calibrate(cent, type = "fuzzy", thresholds = c(cent_non_membership, cent_crossover_point, cent_full_membership))
#Kalibrierung Median Weighted Degree (DEG)
calibrated_deg <- calibrate(deg, type = "fuzzy", thresholds = c(deg_non_membership, deg_crossover_point, deg_full_membership))
#Kalibrierung der Discussion-Posts (DIS)
calibrated_dis <- calibrate(dis, type = "fuzzy", thresholds = c(dis_non_membership, dis_crossover_point, dis_full_membership))
#Kalibrierung der Dichte (Density(DEN))
calibrated_den <- ifelse(den < den_threshold1, den1_calibrate_to, ifelse(den < den_threshold2, den2_calibrate_to, den3_calibrate_to))
#Berechnung der Formel, welche sich aus der booleschen Minimierung ergibt.
#Hierfรผr mรผssen zunรคchst die anderen beiden Faktoren berechnet werden, welche laut Dillenbourg (1999) Kollaboration ausmachen.
#Anschlieรยend wird eine Wahrheitstabelle erstellt und auf Grundlage dieser die boolesche Minimierung durchgefรผhrt.
#Auf diese Schritte wird in diesem Skript verzichtet. Es wird die von Kummer(2013) berechnete Formel zur Bestimmung von Kollaboration verwendet.
#Sollte es gewรผnscht sein, eine andere Formel zu verwenden, muss dieser Teil des Skriptes entsprechend angepasst werden.
#Die Formel lautet: DEN*DIS+cent*DEG*DIS -> COLL
#
#In boolescher Algebra steht * fรผr ein UND und + fรผr ein ODER
collaboration_1 <- ifelse(calibrated_den>calibrated_dis,calibrated_dis,calibrated_den)
collaboration_2 <- ifelse(calibrated_cent>calibrated_deg, ifelse(calibrated_deg>calibrated_dis,calibrated_dis,calibrated_deg),ifelse(calibrated_cent>calibrated_dis,calibrated_dis,calibrated_cent))
collaboration_final <- ifelse(collaboration_1>collaboration_2,collaboration_1,collaboration_2)
#Ergebnisse der einzelnen Gruppen in die Datenbank schreiben.
#Es wird die oben angegebene Verbindung verwendet.
df <- data.frame(time,pa,den,cent,deg,dis,calibrated_den,calibrated_cent,calibrated_deg,calibrated_dis,collaboration_final)
dbWriteTable(con, name="collaboration", value=df, append=TRUE)
}else{
#Dieser Teil wird abgearbeitet, wenn in der Revisionstabelle keine Eintrรคge fรผr die Gruppe ermittelt werden konnte.
network <- graph.empty(n=nrow(vertices_data),directed=FALSE)
network <- add.vertices(network, (nrow(vertices_data) - vcount(network)))
V(network)$name <- vertices_data[,2]
#Grafische Darstellung des Netzwerkes der Gruppe
#Aus diesem Grund muss der Pfad zum Bilderordner fรผr MediaWiki angegeben werden.
#Es wird fรผr jede Gruppe ein Bild des aktuellen Netzwerkes fรผr jede Stunde gespeichert.
#Bild wird mit Timestamp und Gruppennummer unter angegebenen Pfad abgespeichert.
time <- format(Sys.time(), "%Y-%m-%d-%H")
full_path <- paste (image_path,time,"-Gruppe-",group_numbers[pa,],".png",sep="")
#Festlegen des Layouts fรผr das Netzwerk
#Das Layout wird automatisch ermittelt. Es ist dabei abhรคngig von der Anzahl der Vertices. Dadurch wird eine optimale Darstellung gewรคhrleistet.
layout_test <- layout.auto(network)
png(filename=full_path, width=800, height=600)
plot(network, layout=layout_test, edge.width=E(network)$weight/4, vertex.label.dist=0.6, vertex.label.font=1, vertex.label.cex=1.5,vertex.size=7, vertex.color="red", edge.label=as.numeric(edgesmatrix[,3]), edge.label.cex=1.5)
dev.off()
#Es folgt die Berechnung der einzelnen Kennzahlen, welche zur Bestimmung von vorhandener Kollaboration benรถtigt werden.
#Berechnung der Dichte (Density (DEN)) des Netzwerkes
den <- 0
#Berechnung der Zentralisierung (centralization (CENT))
cent <- 0
#Berechnung der Diskussionsposts (Discussion-Posts(DIS))
dis <- 0
#Berechnung des Median Weighted Degrees (DEG)
deg <- 0
#Kalibrierung der Zentralisierung (CENT)
calibrated_cent <- 0
#Kalibrierung Median Weighted Degree (DEG)
calibrated_deg <- 0
#Kalibrierung der Discussion-Posts (DIS)
calibrated_dis <- 0
#Kalibrierung der Dichte (Density(DEN))
calibrated_den <- 0
#Berechnung der Formel, welche sich aus der booleschen Minimierung ergibt.
#Hierfรผr mรผssen zunรคchst die anderen beiden Faktoren berechnet werden, welche laut Dillenbourg (1999) Kollaboration ausmachen.
#Anschlieรยend wird eine Wahrheitstabelle erstellt und auf Grundlage dieser die boolesche Minimierung durchgefรผhrt.
#Auf diese Schritte wird in diesem Skript verzichtet. Es wird die von Kummer(2013) berechnete Formel zur Bestimmung von Kollaboration verwendet.
#Sollte es gewรผnscht sein, eine andere Formel zu verwenden, muss dieser Teil des Skriptes entsprechend angepasst werden.
#Die Formel lautet: DEN*DIS+cent*DEG*DIS -> COLL
#
#In boolescher Algebra steht * fรผr ein UND und + fรผr ein ODER
collaboration_1 <- 0
collaboration_2 <- 0
collaboration_final <- ifelse(collaboration_1>collaboration_2,collaboration_1,collaboration_2)
#Ergebnisse der einzelnen Gruppen in die Datenbank schreiben.
#Es wird die oben angegebene Verbindung verwendet.
df <- data.frame(time,pa,den,cent,deg,dis,calibrated_den,calibrated_cent,calibrated_deg,calibrated_dis,collaboration_final)
dbWriteTable(con, name="collaboration", value=df, append=TRUE)
}
}else{
#Dieser Teil wird abgearbeitet, wenn in der Revisionstabelle keine Eintrรคge fรผr die Gruppe ermittelt werden konnte.
network <- graph.empty(n=nrow(vertices_data),directed=FALSE)
network <- add.vertices(network, (nrow(vertices_data) - vcount(network)))
V(network)$name <- vertices_data[,2]
#Grafische Darstellung des Netzwerkes der Gruppe
#Aus diesem Grund muss der Pfad zum Bilderordner fรผr MediaWiki angegeben werden
#Es wird fรผr jede Gruppe ein Bild des aktuellen Netzwerkes fรผr jede Stunde gespeichert
#Bild wird mit Timestamp und Gruppennummer unter angegebenen Pfad abgespeichert
time <- format(Sys.time(), "%Y-%m-%d-%H")
full_path <- paste (image_path,time,"-Gruppe-",group_numbers[pa,],".png",sep="")
#Festlegen des Layouts fรผr das Netzwerk
#Das Layout wird automatisch ermittelt. Es ist dabei abhรคngig von der Anzahl der Vertices. Dadurch wird eine optimale Darstellung gewรคhrleistet.
layout_test <- layout.auto(network)
png(filename=full_path, width=800, height=600)
plot(network, layout=layout_test, edge.width=E(network)$weight/4, vertex.label.dist=0.6, vertex.label.font=1, vertex.label.cex=1.5,vertex.size=7, vertex.color="red", edge.label=as.numeric(edgesmatrix[,3]), edge.label.cex=1.5)
dev.off()
#Es folgt die Berechnung der einzelnen Kennzahlen, welche zur Bestimmung von vorhandener Kollaboration benรถtigt werden.
#Berechnung der Dichte (Density (DEN)) des Netzwerkes
den <- 0
#Berechnung der Zentralisierung (centralization (CENT))
cent <- 0
#Berechnung der Diskussionsposts (Discussion-Posts(DIS))
dis <- 0
#Berechnung des Median Weighted Degrees (DEG)
deg <- 0
#Kalibrierung der Zentralisierung (CENT)
calibrated_cent <- 0
#Kalibrierung Median Weighted Degree (DEG)
calibrated_deg <- 0
#Kalibrierung der Discussion-Posts (DIS)
calibrated_dis <- 0
#Kalibrierung der Dichte (Density(DEN))
calibrated_den <- 0
#Berechnung der Formel, welche sich aus der booleschen Minimierung ergibt.
#Hierfรผr mรผssen zunรคchst die anderen beiden Faktoren berechnet werden, welche laut Dillenbourg (1999) Kollaboration ausmachen.
#Anschlieรยend wird eine Wahrheitstabelle erstellt und auf Grundlage dieser die boolesche Minimierung durchgefรผhrt.
#Auf diese Schritte wird in diesem Skript verzichtet. Es wird die von Kummer(2013) berechnete Formel zur Bestimmung von Kollaboration verwendet.
#Sollte es gewรผnscht sein, eine andere Formel zu verwenden, muss dieser Teil des Skriptes entsprechend angepasst werden.
#Die Formel lautet: DEN*DIS+cent*DEG*DIS -> COLL
#
#In boolescher Algebra steht * fรผr ein UND und + fรผr ein ODER
collaboration_1 <- 0
collaboration_2 <- 0
collaboration_final <- ifelse(collaboration_1>collaboration_2,collaboration_1,collaboration_2)
#Ergebnisse der einzelnen Gruppen in die Datenbank schreiben.
#Es wird die oben angegebene Verbindung verwendet.
df <- data.frame(time,pa,den,cent,deg,dis,calibrated_den,calibrated_cent,calibrated_deg,calibrated_dis,collaboration_final)
dbWriteTable(con, name="collaboration", value=df, append=TRUE)
}
}
}
#Beenden der Datenbankverbindung.
dbDisconnect(con)
|
a1c20c6d62b01e289a5796bb6305125ec0f39972
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/mistat/inst/scripts/ch08.R
|
7c1a0140f57ecd2d9a9d4d816053f22aa1e37508
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,108
|
r
|
ch08.R
|
###################################################
### Chap08Start
###################################################
library(mistat)
library(qcc)
qcc.options(bg.margin = "white",
violating.runs = list(pch=15, col=1),
beyond.limits = list(pch=15, col=1))
###################################################
### PlotRunChart50PistonCycleTimes
###################################################
Ps <- pistonSimulation(seed=123)
plot(Ps$seconds,
type="b",
ylab="sec.")
###################################################
### PlotHist50PistonCycelTimes
###################################################
hist(Ps$seconds,
main = "",
xlab="sec.")
###################################################
### PlotqqNorm50PistonCycleTimes
###################################################
library(car)
qqPlot(Ps$seconds,
distribution="norm",
col.lines=1,
ylab = "sec.",
envelope=FALSE)
###################################################
### Turb2Example
###################################################
Ps <- pistonSimulation(seed=123)
Ps <- simulationGroup(Ps, 5)
head(Ps, 3)
aggregate(x=Ps["seconds"],
by=Ps["group"],
FUN=mean)
###################################################
### PlotXbarChart50PistonCycleTimes
###################################################
Ps <- pistonSimulation(seed=123,
each=5*20)
Ps <- simulationGroup(Ps, 5)
CycleTime <- qcc.groups(Ps$seconds,
Ps$group)
PsXbar <- invisible(qcc(CycleTime,
type="xbar"))
###################################################
### PlotSChart50PistonCycleTimes
###################################################
PsS <- invisible(qcc(CycleTime,
type="S"))
rm(CycleTime, Ps)
###################################################
### PlotXbarChart50PistonCycleTimesTrendAmbientTemperature
###################################################
PsTtm <- pistonSimulation(m = rep(60, 105),
s = rep(0.02, 105),
v0 = rep(0.01, 105),
k = rep(5000, 105),
p0 = rep(110000, 105),
t = c(rep(296,45),
296*1.1^(1:60)),
t0 = rep(360, 105),
each = 1,
seed = 123,
check = FALSE)
PsTtm <- simulationGroup(PsTtm, 5)
CycleTime <- qcc.groups(PsTtm$seconds,
PsTtm$group)
invisible(qcc(CycleTime,
type="xbar",
center=PsXbar$center,
limits=PsXbar$limits))
###################################################
### PlotSChart50PistonCycleTimesTrendAmbientTemperature
###################################################
invisible(qcc(CycleTime,
type="S",
center=PsS$center,
limits=PsS$limits))
rm(CycleTime, PsTtm)
###################################################
### PlotXbarChart50PistonCycleTimesTrendSpringCoefficient
###################################################
PsTk <- pistonSimulation(m = rep(60, 105),
s = rep(0.02, 105),
v0 = rep(0.01, 105),
k = c(rep(5000, 45),
5000*0.985^(1:60)),
p0 = rep(110000, 105),
t = rep(296,105),
t0 = rep(360, 105),
each = 1,
seed = 123,
check = FALSE)
PsTk <- simulationGroup(PsTk, 5)
CycleTime <- qcc.groups(PsTk$seconds,
PsTk$group)
invisible(qcc(CycleTime,
type="xbar",
center=PsXbar$center,
limits=PsXbar$limits))
###################################################
### PlotSChart50PistonCycleTimesSpringCoefficient
###################################################
invisible(qcc(CycleTime,
type="S",
center=PsS$center,
limits=PsS$limits))
rm(CycleTime, PsXbar, PsS, PsTk)
###################################################
### PlotXbarPatternsToDetectSpecialCauses00
###################################################
set.seed(123)
X <- rnorm(100, 10, 1)
G <- rep(1:20, each=5)
X[51:55] <- X[51:55]*1.3
Process <- qcc.groups(X, G)
layout(matrix(1:2, 2, byrow=TRUE))
invisible(qcc(Process,
type="xbar",
center=10,
std.dev=2.236071))
set.seed(123)
X <- rnorm(100, 10, 1)
X[21:55] <- X[21:55]*1.05
Process <- qcc.groups(X, G)
invisible(qcc(Process,
type="xbar",
center=10,
std.dev=2.236071))
layout(1)
###################################################
### PlotXbarPatternsToDetectSpecialCauses01
###################################################
set.seed(123)
X <- rnorm(100, 10, 1)
X[21:50] <- seq(12.5, 9.5, length.out=30)
Process <- qcc.groups(X, G)
layout(matrix(1:2, 2, byrow=TRUE))
invisible(qcc(Process,
type="xbar",
center=10,
std.dev=2.236071))
set.seed(123)
X <- rnorm(100, 10, 1)
X[c(21:25, 31:35)] <- 12.5
Process <- qcc.groups(X, G)
invisible(qcc(Process,
type="xbar",
center=10,
std.dev=2.236071))
points(c(5,7), c(12.5, 12.5), pch=15)
abline(h=10+2*2.236071/sqrt(5), lty="dotdash")
layout(1)
rm(Process, X, G)
###################################################
### Capability
###################################################
Ps <- pistonSimulation(seed=123)
Ps <- simulationGroup(Ps, 5)
CycleTime <- qcc.groups(data=Ps$seconds,
sample=Ps$group)
PsXbar <- qcc(CycleTime,
type="xbar",
nsigmas=3,
plot=FALSE)
process.capability(PsXbar,
spec.limits=c(0.1, 0.5))
###################################################
### PlotParetoChartSoftwareErrors
###################################################
data(PBX)
invisible(
pareto.chart(PBX,
col=gray.colors(
n=length(PBX)),
main ="PBX software errors"))
###################################################
### PlotXbarChartShewartControlChart
###################################################
data(GASTURBINE)
invisible(qcc(GASTURBINE))
abline(h=0.4508481 + 0.04601819*2/sqrt(5), lty="dotdash")
abline(h=0.4508481 - 0.04601819*2/sqrt(5), lty="dotdash")
###################################################
### PlotPChartJanuaryData
###################################################
data(JANDEFECT)
invisible(qcc(JANDEFECT,
type="p",
sizes=100,
center=0.048,
std.dev=sqrt(0.048*(1-0.048))))
###################################################
### PlotXbarChartContactLengths
###################################################
data(CONTACTLEN)
invisible(qcc(CONTACTLEN, type="xbar"))
###################################################
### PlotSChartContactLengths
###################################################
invisible(qcc(CONTACTLEN, type="S"))
###################################################
### PlotRChartContactLengths
###################################################
invisible(qcc(CONTACTLEN, type="R"))
###################################################
### Chap08End
###################################################
rm(CONTACTLEN, CycleTime, GASTURBINE, Ps,
JANDEFECT, PBX, PsXbar)
detach(package:qcc)
detach(package:car)
detach(package:mistat)
|
af86945c4e4939761460e5ea7d4f465fd6ace67b
|
515f4cd97befa78ac1c18ae5e752aaaa5133bfdc
|
/plot1.R
|
82c1116fc9dda39e349e4156141593b22e104a82
|
[] |
no_license
|
ga02h35x/coursera_exploratory
|
d386143ebaa9eca2b86ae0944336ca8bdf914bf5
|
cf3a823628aac3b83143f6001b30f9ed47036cb2
|
refs/heads/master
| 2020-03-13T16:05:20.891983
| 2018-04-26T17:55:42
| 2018-04-26T17:55:42
| 131,189,031
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,002
|
r
|
plot1.R
|
# Packages used
require("data.table")
# Download the data
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, file.path(path, "data.zip"))
unzip(zipfile = "data.zip")
# Read the data
data_power <- data.table::fread(input = "household_power_consumption.txt", na.strings="?")
# Alert with scientific notation in histogram
data_power[, Global_active_power := lapply(.SD, as.numeric), .SDcols = c("Global_active_power")]
# Using data format and class
data_power[, Date := lapply(.SD, as.Date, "%d/%m/%Y"), .SDcols = c("Date")]
# Select the data to process: dates for 2007-02-01 to 2007-02-02
data <- data_power[(Date >= "2007-02-01") & (Date <= "2007-02-02")]
# Create graphic
png("plot1.png", width=480, height=480)
## Plot 1
hist( data[, Global_active_power],
main="Global Active Power",
xlab="Global Active Power (kilowatts)",
ylab="Frequency", col="Red")
dev.off()
|
39aba44e74aa16328837dbb38a694385f9ffcf8f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/B2Z/examples/B2ZM_MCMC.Rd.R
|
5628f66b796577333bbdfaabc18b536d4e96c668
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,494
|
r
|
B2ZM_MCMC.Rd.R
|
library(B2Z)
### Name: B2ZM_MCMC
### Title: Bayesian Two-Zone Models: using Gibbs with Metropolis step
### Aliases: B2ZM_MCMC
### Keywords: models
### ** Examples
##################
#Dependent Model#
################
#Data 1: 100 simulated concentrations during the times
#between 0 and 4, using the parameters Beta = 5, Q = 13.8,
#G = 351.5, VN = pi*10^-3, VF = 3.8, Tau_N = 1,
#Tau_NF = 0.5 and Tau_F = 0.64.
data(ex1)
#Without specifying the initial values and
#the covariance matrix in the proposal distribution
## Not run:
##D r <- B2ZM_MCMC(data = ex1, priorBeta = "unif(0,10)",
##D priorQ="unif(11,17)", priorG = "unif(281,482)", S = diag(10,2),
##D v = 4, VN = pi*10^-3, VF = 3.8, NUpd = 10000, burnin = 1000,
##D lag = 1, m = 5000)
##D
##D summary(r)
##D plot(r)
## End(Not run)
#Specifying the initial values and the covariance matrix in the proposal distribution
initial <- c(5.338671, 14.147149, 379.591927)
Sigma.Cand <- matrix(c(0.51306, 0.54981, 14.4306,
0.54981, 1.75525, 35.5525,
14.4306, 35.5525, 1360.5119),3,3)
r <- B2ZM_MCMC(data = ex1, priorBeta = "unif(0,10)",
priorQ = "unif(11,17)", priorG = "unif(281,482)", S = diag(10,2),
v = 4, VN = pi*10^-3, VF = 3.8, NUpd = 1000, burnin = 100,
lag = 1, m = 5000, initial = initial, Sigma.Cand = Sigma.Cand)
summary(r)
plot(r)
## Not run:
##D #Saving figures with .jpg extension
##D r <- B2ZM_MCMC(data = ex1, priorBeta = "unif(0,10)",
##D priorQ = "unif(11,17)", priorG = "unif(281,482)", S = diag(10,2),
##D v = 4, VN = pi*10^-3, VF = 3.8, NUpd = 10000, burnin = 1000,
##D lag = 1, m = 5000, figures = list(save = TRUE, type ="jpg") )
## End(Not run)
#####################
#Independent Model #
###################
#Data 2: 100 simulated concentrations during the times
#between 0 and 4, using the parameters Beta = 5, Q = 13.8,
#G = 351.5, VN = pi*10^-3, VF = 3.8, Tau_N = 1,
#Tau_NF = 0 and Tau_F = 0.64.
## Not run:
##D data(ex2)
##D
##D #Without specifying the initial values and the
##D #covariance matrix in the proposal distribution
##D
##D r <- B2ZM_MCMC(data = ex2, indep.model = TRUE,
##D priorBeta = "unif(0,10)", priorQ="unif(11,17)",
##D priorG = "unif(281,482)", tauN.sh = 5 , tauN.sc = 4 ,
##D tauF.sh = 5, tauF.sc = 7 , VN = pi*10^-3, VF = 3.8,
##D NUpd = 10000, burnin = 1000, lag = 1, m = 1000)
##D
##D summary(r)
##D plot(r)
## End(Not run)
|
83827e2fc4d04721e06da1bc0cb858671313b03c
|
f400e65fec959b0bf45cce6cbbf1fa4d73d69324
|
/computational statistics/EM-alogirthm - BN with missing.R
|
76adba54bd830210fc77178cf0743b71502ee89a
|
[] |
no_license
|
miniii222/study_in_graduate
|
afd726226d01fa7dceeadaed421d0e2bb862ea3b
|
70d3a2e29cf9497298c7505c883d78eab8d29bd7
|
refs/heads/master
| 2020-04-10T08:28:59.503539
| 2019-06-25T13:41:42
| 2019-06-25T13:41:42
| 160,906,960
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
EM-alogirthm - BN with missing.R
|
library(ggplot2)
#Bivariate Normal with missing values
w1 <- c(8,11,16,18,6,4,20,25,9,13)
w2 <- c(10,14,16,15,20,4,18,22,NA,NA)
binorm_missing<-function(w1,w2,e=10^(-10),maxiter=1000){
mu1 = mean(w1); mu2 = mean(w2, na.rm=T)
v1<-var(w1); v2<-var(w2, na.rm=T); v12<-cov(w1[1:8],w2[1:8])
n<-length(w1)
err<-1; niter<-0
while(niter<=maxiter && err>e){
#E-step
w2[9] <- mu2 + (v12/v1)*(w1[9] - mu1)
w2[10] <- mu2 + (v12/v1)*(w1[10] - mu1)
#M-step
mu2 <- mean(w2);
new_v12 <- cov(w1,w2); v2 <- var(w2)
err<-abs(new_v12 - v12); v12<-new_v12
niter<-niter+1
}
result<-list(mu = c(mu1,mu2),
var = matrix(c(v1,v12,v12,v2),ncol=2))
return(result)
}
binorm_missing(w1,w2)
|
52f4e12023aba6c91a9ec58f9b9afe0df2757f89
|
297bce564ffe3dd6f0f043f41d7944610193662b
|
/R/extendr-wrappers.R
|
1a145b9a99cbf30012490e69a72d5174cba623c6
|
[
"MIT"
] |
permissive
|
chaoshengt/string2path
|
62041d331edd12968cae836c3db1676ffa86815c
|
226c2f7166ce257465b5c0fc49481373eb485b26
|
refs/heads/main
| 2023-06-29T05:52:14.286907
| 2021-08-01T00:53:23
| 2021-08-01T00:53:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 656
|
r
|
extendr-wrappers.R
|
# Generated by extendr: Do not edit by hand
#
# This file was created with the following call:
# .Call("wrap__make_string2path_wrappers", use_symbols = TRUE, package_name = "string2path")
#' @docType package
#' @usage NULL
#' @useDynLib string2path, .registration = TRUE
NULL
string2path_impl <- function(text, font_file, tolerance) .Call(wrap__string2path_impl, text, font_file, tolerance)
string2stroke_impl <- function(text, font_file, tolerance, line_width) .Call(wrap__string2stroke_impl, text, font_file, tolerance, line_width)
string2fill_impl <- function(text, font_file, tolerance) .Call(wrap__string2fill_impl, text, font_file, tolerance)
|
02fcadfc18b63a049f1900f75c566ef40533576b
|
7a95abd73d1ab9826e7f2bd7762f31c98bd0274f
|
/mcga/inst/testfiles/ByteCodeMutation/libFuzzer_ByteCodeMutation/ByteCodeMutation_valgrind_files/1612802390-test.R
|
2af0d3c2f06312f7047de660c3fe55dbdd434aaf
|
[] |
no_license
|
akhikolla/updatedatatype-list3
|
536d4e126d14ffb84bb655b8551ed5bc9b16d2c5
|
d1505cabc5bea8badb599bf1ed44efad5306636c
|
refs/heads/master
| 2023-03-25T09:44:15.112369
| 2021-03-20T15:57:10
| 2021-03-20T15:57:10
| 349,770,001
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 730
|
r
|
1612802390-test.R
|
testlist <- list(bytes1 = c(-2030043136L, 37748739L, -16056541L, 562069632L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2139062144L, -2137877870L, -1837105152L, 515L, 14015745L, -309161326L, -1835887982L, -1835887982L, -1835888128L, 0L, 146L, -1835887982L, -1835887982L, -105L, -1835887982L, -1835887982L, -1835887982L, -1835925451L, 891630901L, 154238976L, 32768L, 822083712L, 805306368L, 4259840L, 119668608L, -105L, 255L, -14016201L, 721419777L, -587076224L, 125L, 587202780L, 33686271L, -2139226368L, -2147483520L, 0L, 33751040L, NA), pmutation = 8.28904605845809e-317)
result <- do.call(mcga:::ByteCodeMutation,testlist)
str(result)
|
d17e376f80e5da276696c377419f986602a25720
|
fbc250539c000ea58165cfee199afe8d9fbe57da
|
/man/stamppPhylip.Rd
|
9d031c81c1e1d227d875b1f6ee924815d3ec0835
|
[] |
no_license
|
cran/StAMPP
|
3fce0a4b68a29a4f1888611dfa019dfeff4f98b2
|
d521ba65370dca3336fc87a2a20c5ba61e446dee
|
refs/heads/master
| 2021-08-31T12:58:51.905294
| 2021-08-08T03:20:05
| 2021-08-08T03:20:05
| 17,693,767
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,378
|
rd
|
stamppPhylip.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stamppPhylip.R
\name{stamppPhylip}
\alias{stamppPhylip}
\title{Export to Phylip Format}
\usage{
stamppPhylip(distance.mat, file = "")
}
\arguments{
\item{distance.mat}{the matrix containing the genetic distances generated from stamppNeisD to be converted into Phylip format}
\item{file}{the file path and name to save the Phylip format matrix as}
}
\description{
Converts the genetic distance matrix generated with stamppNeisD into Phylip format and exports it as a text file
}
\details{
The exported Phylip formated text file can be easily imported into sofware packages such as DARWin (Perrier & Jacquemound-Collet 2006) to be used to generate neighbour joining trees
}
\examples{
# import genotype data and convert to allele frequecies
data(potato.mini, package="StAMPP")
potato.freq <- stamppConvert(potato.mini, "r")
# Calculate genetic distance between populations
potato.D.pop <- stamppNeisD(potato.freq, TRUE)
# Export the genetic distance matrix in Phylip format
\dontrun{stamppPhylip(potato.D.pop, file="potato_distance.txt")}
}
\references{
Perrier X, Jacquemound-Collet JP (2006) DARWin - Dissimilarity Analysis and Representation for Windows. Agricultural Research for Development
}
\author{
Luke Pembleton <luke.pembleton at agriculture.vic.gov.au>
}
|
e41c6d4cc319586886728e17958581749be33a2a
|
144637e7e9fd67854a74bd5d76e5b6207a74cc66
|
/ui.R
|
ccd38d7b5e4b6dea586e23ea3fbe229edc1bc857
|
[] |
no_license
|
felixgerlsbeck/Next_Word_Predictor
|
3351d95f0e8b7328faa56a0247dfb66eca487e67
|
3076a8b05a6b1ef3d9626bfc797872aafbc5557d
|
refs/heads/master
| 2021-01-23T00:34:39.069985
| 2017-03-21T20:54:51
| 2017-03-21T20:54:51
| 85,741,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 864
|
r
|
ui.R
|
library(shiny)
library(shinythemes)
shinyUI(fluidPage(theme = shinytheme("superhero"),
titlePanel("Next Word Prediction App"),
##Simple UI layout with entry field in the sidebar and output in the main panel.
sidebarLayout(
sidebarPanel(
h4("This application takes any text input and predicts the most likely next word in the sequence"),
textInput("text", label = "Enter text here", value = "", width = NULL, placeholder = "Your words here")
),
##Main panel outputs concatenated sentence with input + predicted word as well as wordcloud of five most likely words.
mainPanel(
h4("Is this what you wanted to write?"),
tags$blockquote(textOutput("MostLikelySentence")),
tags$hr(),
h4("These are the five most likely next words"),
plotOutput("wordcloud", height = "300px")
)
)
))
|
211bac4f9d8da2cde01555dc5fd060ddddc38ab6
|
f49d4f14b515c6e0403e69428e495e22f6f0fb2e
|
/tests/testthat.R
|
b6952876c09b81488acaa675ae7873f62b41e724
|
[] |
no_license
|
szugat/predfat
|
2bdb92f87de04c0f7a1f7786381390718e0b3ec8
|
ea390720d56fa6ecd76adb5ffb1823d97d493e6c
|
refs/heads/master
| 2020-12-24T06:59:05.292260
| 2016-06-06T11:27:35
| 2016-06-06T11:27:35
| 34,121,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50
|
r
|
testthat.R
|
library(testthat)
test_check(package = "predfat")
|
19842feab6ed1c5555e0e5bfbdddc6c30f09a810
|
3ba7e702b4d05cd8fbea4c165d64ef04552eeeba
|
/replicacao.R
|
65c9489b10cccb812af7373f16c3c8551e1b92df
|
[] |
no_license
|
meirelesff/monte_carlo_rbcp
|
d3292079c1c59bdec211646d958e794ac3639db4
|
70349e77a6cac10d9910c6dff958d1e1175de0d0
|
refs/heads/master
| 2021-01-23T10:45:23.288562
| 2017-06-01T20:11:50
| 2017-06-01T20:11:50
| 93,092,326
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,267
|
r
|
replicacao.R
|
# ---
# REPLICACAO 'SIMULACOES DE MONTE CARLO NO ENSINO DE CIENCIA POLITICA
# ---
# Carrega os pacotes necessarios
library(stargazer)
library(gridExtra)
library(ggplot2)
library(MASS)
# Fixa um seed para reproduzir os resultados
set.seed(2)
# ---
# FUNCOES USADAS
# ---
# Funcao para simular N regressoes com uma variavel omitida (retorna um data.frame com as estimativas)
lmbias <- function(sim = 1000, N = 100, sd.x = 1, beta = 1, beta0 = 1, bias = 0.5){
res_bias <- res <- numeric(sim)
for(i in 1:sim){
x_bias <- mvrnorm(N, mu = c(0, 0), matrix(c(1, bias, bias, 1), nrow = 2), empirical = T)
x <- x_bias[, 1]
x_bias <- x_bias[, 2]
y <- beta0 + beta * x + x_bias + rnorm(N, mean = 0, sd = 1)
res_bias[i] <- coef(lm(y ~ x))[2]
res[i] <- coef(lm(y ~ x + x_bias))[2]
}
out <- data.frame('X com viรฉs' = res_bias, 'X sem viรฉs' = res)
out
}
# Funcao para simular uma votacao com diferentes tipos de regra de maioria
votacao <- function(legisladores, prob_sim, regra = c("unanimidade", "maioria_simples")){
regra <- match.arg(regra)
resultado <- rbinom(legisladores, 1, prob = prob_sim)
if(regra == "unanimidade") resultado <- as.numeric(sum(resultado) == legisladores)
else resultado <- as.numeric(sum(resultado) >= (legisladores %/% 2) + 1)
resultado <- ifelse(resultado == 1, "Aprovadas", "Rejeitadas")
factor(resultado, levels = c("Rejeitadas", "Aprovadas"))
}
# ---
# FIGURAS UTILIZADAS NO PAPER
# ---
# GRAFICO 1 - Simulando a media de altura dos brasileiros
# Define os parametros usados (podem ser alterados)
altura_media <- 170
desvio <- 15
sims <- 1000
amostra <- 10
# Roda a simulacao
resultado <- replicate(sims, mean(rnorm(amostra, altura_media, desvio)))
# Salva o grafico no diretorio corrente
jpeg("graf1.jpeg", width = 6, height = 4, res = 300, units = "in")
qplot(resultado, geom = "histogram", binwidth = 1, ylab = "Frequรชncia", xlab = "Mรฉdia de altura", colour = I("gray"), fill = I("gray90")) +
geom_vline(xintercept = mean(resultado), size = 0.7, linetype = 2) +
scale_y_continuous(expand = c(0, 0))
dev.off()
# GRAFICO 2 - Simulando um processo legislativo
# Define os parametros usados (podem ser alterados)
legisladores <- 5
prob_sim <- .5
sims <- 1000
# Simula 1000 votacoes com 5 legisladores por unanimidade
unam <- replicate(sims, votacao(legisladores, prob_sim, "unanimidade"))
# Simula 1000 votacoes com 5 legisladores por maioria simples
maioria <- replicate(sims, votacao(legisladores, prob_sim, "maioria_simples"))
# Gera os graficos
g2a <- qplot(factor(unam), fill = I("gray90"), col = I("gray"), ylab = "Freqรชncia", xlab = "Votaรงรตes", main = "Unanimidade") +
scale_y_continuous(expand = c(0, 0))
g2b <- qplot(factor(maioria), fill = I("gray90"), col = I("gray"), ylab = NULL, xlab = "Votaรงรตes", main = "Maioria simples") +
scale_y_continuous(expand = c(0, 0))
# Salva o grafico no diretorio corrente
jpeg("graf2.jpeg", width = 6, height = 3, res = 300, units = "in")
grid.arrange(g2a, g2b, ncol = 2)
dev.off()
# GRAFICO 3 - Simulando um processo legislativo com polarizacao (10.000 simulacoes)
sims <- 10000
legisladores <- 10
polarizacao <- seq(0.0, 0.5, by = 0.05)
prob_sim <- vector("list", length(polarizacao))
for(i in 1:length(polarizacao)) {
prob_sim[[i]] <- rep(0.5, 10)
prob_sim[[i]][1:5] <- prob_sim[[i]][1:5] - polarizacao[i]
prob_sim[[i]][6:10] <- prob_sim[[i]][6:10] + polarizacao[i]
}
res <- lapply(prob_sim, function(x){
data.frame(replicate(sims, votacao(legisladores, x, "maioria_simples")), polar = x[6] - x[5])
})
res <- do.call("rbind", res)
names(res) <- c("Resultado", "Polarizacao")
jpeg("graf3.jpeg", width = 6, height = 3.5, res = 300, units = "in")
ggplot(res, aes(x = as.factor(Polarizacao), fill = Resultado)) + geom_bar(color = "gray") +
scale_y_continuous(expand = c(0, 0)) +
scale_fill_manual(values = c("gray90", "gray20")) +
labs(fill = NULL,
x = "Grau de polarizaรงรฃo\n(na prob. de votar sim)",
y = "Frequรชncia")
dev.off()
# GRAFICO 4 - Simulando variaveis omitidas
# Roda 1000 simulacoes
x <- lmbias()
# Gera os graficos
vies <- qplot(x$X.com.viรฉs, geom = "histogram", ylab = "Frequรชncia", xlab = "Coeficientes", colour = I("gray"), fill = I("gray90"), main = "Estimativas enviesadas") +
geom_vline(xintercept = 1, size = 0.7, linetype = 2) +
scale_y_continuous(expand = c(0, 0))
sem_vies <- qplot(x$X.sem.viรฉs, geom = "histogram", ylab = "Frequรชncia", xlab = "Coeficientes", colour = I("gray"), fill = I("gray90"), main = "Estimativas nรฃo-enviesadas") +
geom_vline(xintercept = 1, size = 0.7, linetype = 2) +
scale_y_continuous(expand = c(0, 0))
# Salva o grafico no diretorio corrente
jpeg("graf4.jpeg", width = 6, height = 3, res = 300, units = "in")
grid.arrange(sem_vies, vies, ncol = 2)
dev.off()
# GRAFICO 5 - Simulando variaveis omitidas (outros graus de correlacao entre x e z)
# Roda 1000 simulacoes
x <- lmbias(bias = -.5)
# Gera os graficos
vies1 <- qplot(x$X.com.viรฉs, geom = "histogram", ylab = "Frequรชncia", xlab = "Coeficientes", colour = I("gray"), fill = I("gray90"), main = "Corr. -0.5") +
geom_vline(xintercept = 1, size = 0.7, linetype = 2) +
scale_y_continuous(expand = c(0, 0))
# Roda 1000 simulacoes
x2 <- lmbias(bias = .3)
# Gera os graficos
vies2 <- qplot(x2$X.com.viรฉs, geom = "histogram", ylab = "Frequรชncia", xlab = "Coeficientes", colour = I("gray"), fill = I("gray90"), main = "Corr. 0.3") +
geom_vline(xintercept = 1, size = 0.7, linetype = 2) +
scale_y_continuous(expand = c(0, 0))
# Salva o grafico no diretorio corrente
jpeg("graf5.jpeg", width = 6, height = 3, res = 300, units = "in")
grid.arrange(vies1, vies2, ncol = 2)
dev.off()
# GRAFICO 6 - Simulando variaveis omitidas (resultados extendidos)
cors <- lapply(seq(-1, 1, by = 0.1), function(x) as.data.frame(cbind(lmbias(bias = x), bias = x)))
cors <- do.call("rbind", cors)
jpeg("graf6.jpeg", width = 6.5, height = 3.5, res = 300, units = "in")
ggplot(cors, aes(x = as.factor(bias), y = `X.com.viรฉs`)) +
stat_summary(fun.y = mean, fun.ymin = min, fun.ymax = max) +
geom_hline(yintercept = 1, linetype = 2) +
labs(x = "Grau de viรฉs\n(correlaรงรฃo entre X e Z)", y = "Estimativa do efeito de X")
dev.off()
rm(list = ls())
|
f73b2e71cbd0ad9db1eadc77aad0ef7e37727ccd
|
12cfd9840d2a4eee125d6bcadab6d4450edd52cc
|
/KKT/U_KKTcheckings_using_R.R
|
14a023315699cef20709b3b8beb844ad6f473cb1
|
[] |
no_license
|
boxiang-wang/powerfamily
|
2af9b8aba0c435a26d5d202da7f988171846d9e5
|
abbc12ea2eb3ba5baf442c817a14e6ea3a22ba06
|
refs/heads/master
| 2021-05-28T00:36:11.678273
| 2014-04-12T17:48:55
| 2014-04-12T17:48:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,035
|
r
|
U_KKTcheckings_using_R.R
|
hsvm <- function(v, varlist) {
delta = varlist$delta
r <- v[1]
if (r > 1)
dl <- 0 else if (r <= (1 - delta))
dl <- (1 - r - delta/2) else dl <- (r - 1)^2/delta/2
dl
}
dhsvm <- function(v, varlist) {
delta = varlist$delta
r <- v[1]
if (r > 1)
dl <- 0 else if (r <= (1 - delta))
dl <- -1 else dl <- (r - 1) / delta
dl
}
power <- function(v, varlist) {
qv = varlist$qv
decib = qv / (qv + 1)
r <- v[1]
dl = ifelse(r > decib, r ^ (-qv) * (qv ^ qv) / ((qv + 1) ^ (qv + 1)), 1 - r)
dl
}
dpower <- function(v, varlist) {
qv = varlist$qv
decib = qv / (qv + 1)
r <- v[1]
dl = ifelse(r > decib, (-1) * r ^ (-qv - 1) * decib ^ (qv + 1), -1)
}
margin <- function(b0, beta, y, x, loss = c("hsvm", "power"), delta=2, qv=2) {
loss <- match.arg(loss)
nobs <- nrow(x)
b0MAT <- matrix(rep(b0, nobs), nrow = nobs, byrow = TRUE)
link <- x %*% beta + b0MAT
if (loss %in% c("hsvm", "power")) {
r <- y * link
} else r <- y - link
fun <- paste("d", loss, sep = "")
varlist = list(delta=delta, qv=qv)
dMat <- apply(r, c(1, 2), eval(fun), varlist) #dMat1
if (loss %in% c("hsvm", "power")) {
yxdMat <- t(x) %*% (dMat * y)/nobs
} else yxdMat <- t(x) %*% dMat/nobs
yxdMat
}
# l is the number of lambda
# p is nvars, n is nobs
# dim(dMat) = n by l
# dim(yxdMat) = p by l
#################################################################################
############ KKT, no printing, only give the violation percentage ################
#################################################################################
KKTnp = function(b0, beta, y, x, lambda, lambda2, thr,
loss = c("hsvm", "power"), delta=2, qv=2) {
loss = match.arg(loss)
dl = margin(b0, beta, y, x, loss=loss, delta=delta, qv=qv)
ctr = 0
for (l in 1:length(lambda)) {
p = nrow(beta)
for(j in 1:p)
{
if(beta[j,l]==0)
{
BB = abs(dl[j,l]) - lambda[l]
if (BB > thr)
{
ctr <- ctr + 1
}
} else{
AA = dl[j,l] + lambda[l] * sign(beta[j,l]) + lambda2 * beta[j,l]
if (abs(AA) >= thr)
{
ctr <- ctr + 1
}
}
}
}
ccounts = length(lambda) * p
return(ctr/ccounts*100)
}
#################################################################################
############ KKT, printing violations ################
#################################################################################
KKTp = function(b0, beta, y, x, lambda, lambda2, thr,
loss = c("hsvm", "power"), delta=2, qv=2) {
loss = match.arg(loss)
dl = margin(b0, beta, y, x, loss=loss, delta=delta, qv=qv)
count0 = 0
ctr0 = 0
ctrn0 = 0
for (l in 1:length(lambda)) {
p = nrow(beta)
for(j in 1:p)
{
if(beta[j,l]==0)
{
BB = abs(dl[j,l]) - lambda[l]
count0 = count0 + 1
if (BB > thr)
{
cat("violate at b = 0", BB, "\n")
ctr0 <- ctr0 + 1
}
} else{
AA = dl[j,l] + lambda[l] * sign(beta[j,l]) + lambda2 * beta[j,l]
if (abs(AA) >= thr)
{
cat("violate at b != 0", abs(AA), "\n")
ctrn0 <- ctrn0 + 1
}
}
}
}
ctr = ctrn0 + ctr0
ccounts = length(lambda) * p
countn0 = ccounts - count0
cat("# of checkings is ", ccounts, ".\n", sep="")
cat("# of violations for zero beta is ", ctr0, ".\n", sep="")
cat("% of violations for zero beta is ", ctr0/count0*100, "%", ".\n", sep="")
cat("# of violations for non-zero beta is ", ctrn0, ".\n", sep="")
cat("% of violations for non-zero beta is ", ctrn0/countn0*100, "%", ".\n", sep="")
cat("# of total violations is ", ctr, ".\n", sep="")
cat("% of total violations is ", ctr/ccounts*100, "%", ".\n", sep="")
return(ctr/ccounts*100)
}
#################################################################################
############ KKT, call KKTnp or KKTp by print.out ################
#################################################################################
KKT = function(b0, beta, y, x, lambda, lambda2, thr,
loss = c("hsvm", "power"), delta=2, qv=2, print.out = F)
{
if(print.out == F)
{
KKTnp(b0=b0, beta=beta, y=y, x=x, lambda=lambda,
lambda2=lambda2, thr=thr,
loss = loss, delta=delta, qv=qv)
} else
{
KKTp(b0=b0, beta=beta, y=y, x=x, lambda=lambda,
lambda2=lambda2, thr=thr,
loss = loss, delta=delta, qv=qv)
}
}
#################################################################################
############ Fit model and check KKT conditions ################
#################################################################################
KKTperctg = function(dat, lambda2, qv, eps, thr)
{
if(eps > 1) eps = 10 ^ (-eps)
if(thr > 1) thr = 10 ^ (-thr)
m.temp = GCDpower(x=dat$x, y=dat$y,
lambda2=lambda2, qv=qv, method="power",eps=eps, standardize=F)
KKT(m.temp$b0, m.temp$beta, dat$y, dat$x, m.temp$lambda, lambda2=lambda2, thr=thr,
qv=qv, loss = c("power"), print.out=F)
}
#################################################################################
############ Summarize KKT condition checking tables ################
#################################################################################
KKTtb = function(dat, lambda2, qv, nm, eps.list=c(6:10), thr.list=c(2:5),
file.loc = "D:\\GitHub\\powerfamily\\Outputs\\KKTrda\\")
{
perct.tb = matrix(NA, length(thr.list), length(eps.list))
colnames(perct.tb) = paste("1e-", eps.list, sep="")
rownames(perct.tb) = paste("1e-", thr.list, sep="")
for(i in 1:length(eps.list))
{
for(j in 1:length(thr.list))
{
print(c(i, j))
perct.tb[j, i] = KKTperctg(dat, lambda2=lambda2, qv=qv, eps=eps.list[i],
thr=thr.list[j])
}
}
save("perct.tb",
file=paste(file.loc, nm, ".rda", sep=""))
return(perct.tb)
}
|
6b072ba0b58605fe273dd2f63f01905b3ccc2cae
|
f644050d5616b639c6524447fe71ba7fd9fa3eae
|
/2.R
|
f567dc8cbe2e20314d015528361edde9a3ecbe18
|
[] |
no_license
|
Dikosh/xgboost-1-
|
45e0c1863735caf76d98766d6ef25f8b2e873d05
|
9471fc510da8e1ada363e92675e28946d588e2cd
|
refs/heads/master
| 2021-07-08T19:35:43.121365
| 2017-10-04T06:29:15
| 2017-10-04T06:29:15
| 105,737,835
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,793
|
r
|
2.R
|
install.packages("drat", repos="https://cran.rstudio.com")
drat:::addRepo("dmlc")
install.packages("xgboost", repos="http://dmlc.ml/drat/", type = "source")
data(agaricus.train, package='xgboost')
data(agaricus.test, package='xgboost')
train <- agaricus.train
test <- agaricus.test
bstSparse <- xgboost(data = train$data, label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
bstDense <- xgboost(data = as.matrix(train$data), label = train$label, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bstDMatrix <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic")
# verbose = 0, no message
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic", verbose = 0)
# verbose = 1, print evaluation metric
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic", verbose = 1)
# verbose = 2, also print information about tree
bst <- xgboost(data = dtrain, max.depth = 2, eta = 1, nthread = 2, nround = 2, objective = "binary:logistic", verbose = 2)
pred <- predict(bst, test$data)
# size of the prediction vector
print(length(pred))
# limit display of predictions to the first 10
print(head(pred))
prediction <- as.numeric(pred > 0.5)
print(head(prediction))
err <- mean(as.numeric(pred > 0.5) != test$label)
print(paste("test-error=", err))
dtrain <- xgb.DMatrix(data = train$data, label=train$label)
dtest <- xgb.DMatrix(data = test$data, label=test$label)
watchlist <- list(train=dtrain, test=dtest)
bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nround=2, watchlist=watchlist, objective = "binary:logistic")
bst <- xgb.train(data=dtrain, max.depth=2, eta=1, nthread = 2, nround=2, watchlist=watchlist, eval.metric = "error", eval.metric = "logloss", objective = "binary:logistic")
bst <- xgb.train(data=dtrain, booster = "gblinear", max.depth=2, nthread = 2, nround=2, watchlist=watchlist, eval.metric = "error", eval.metric = "logloss", objective = "binary:logistic")
xgb.DMatrix.save(dtrain, "dtrain.buffer")
# to load it in, simply call xgb.DMatrix
dtrain2 <- xgb.DMatrix("dtrain.buffer")
bst <- xgb.train(data=dtrain2, max.depth=2, eta=1, nthread = 2, nround=2, watchlist=watchlist, objective = "binary:logistic")
label = getinfo(dtest, "label")
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label))/length(label)
print(paste("test-error=", err))
importance_matrix <- xgb.importance(model = bst)
print(importance_matrix)
xgb.plot.importance(importance_matrix = importance_matrix)
xgb.dump(bst, with.stats = T)
xgb.plot.tree(model = bst)
|
c2a47ae47fd9d71cb4b3f8fd1047054775e2f1d1
|
3e216eff033b857262d5392145d6e24df189dc8b
|
/R/myFUN_myGenotypeParametersDrought.R
|
487370ff37595c00567daeeb3cd640e4402571cb
|
[] |
no_license
|
agroflavian/GLEIS
|
5e7acc5c9b353262303f6f03e97d8bb757ac79c6
|
f73f2e18d79b203ff8a62a11bbdb8b0447990c31
|
refs/heads/master
| 2020-04-08T20:58:34.019204
| 2018-11-30T13:46:09
| 2018-11-30T13:46:09
| 159,723,638
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,459
|
r
|
myFUN_myGenotypeParametersDrought.R
|
#' A GLEIS function
#'
#' Calculated the slope/ intercept/ r2 and p value of the regression curve (here in the drought treatment group)
#' @param Geno is a single Genotype
#' @return returns the above named vlaues in a table
#' @keywords regression
#' @export
#' @examples
#' myFUN_myGenotypeParametersDrought()
#'
#'
myFUN_myGenotypeParametersDrought <- function(Geno){
tablePlot <- myGWC[[ent]]%>%
filter(treatment == "drought")%>%
filter(experiment %in% myMaxLcum[[ent]])%>%
filter(daytime== TRUE)%>%
filter(weight < 370)%>%
filter(gwc_dry > 0.5)%>%
filter(gwc_dry < 1.5)%>%
# filter(complete.cases(gwc_dry,ler_smooth))%>%
filter(complete.cases(gwc_dry,ler_smooth_diff_date))%>%
filter(ler_pred_date_smooth > 0)%>%
filter(experiment == Geno)
if(length(tablePlot$ler_smooth_diff_date) > 1){
# fit1 <- lm(formula = ler_smooth ~gwc_dry, data= tablePlot)
fit1 <- lm(formula = ler_smooth_diff_date ~ gwc_dry, data= tablePlot)
Pop <- Geno
Entry <- ent
Treatment <- "drought"
R2 <- signif(summary(fit1)$adj.r.squared, 5)
P <- signif(summary(fit1)$coef[2,4], 5)
Intercept <- signif(fit1$coef[[1]],5 )
Slope <- signif(fit1$coef[[2]], 5)
} else {
Pop <- Geno
Entry <- ent
Treatment <- "drought"
R2 <- "NA"
P <- "NA"
Intercept <- "NA"
Slope <- "NA"
}
return(c(Pop=Pop, Entry= Entry, Treatment=Treatment, R2=R2, P=P, Intercept=Intercept, Slope=Slope))
}
|
f6e1d9f3d273a83bf7cd2639d75c15beac18d56b
|
27cf2d56ebb117703873f45745a2da819b039467
|
/R/to_graph_S3methods.R
|
6cd2a52b47befad174c0ee1f999935ed385f8e4b
|
[] |
no_license
|
cran/cglasso
|
019589498b51044816bed94e93f2349fe588a236
|
4ccfb2d8b3eecd47905f5a50e556ba3ddebb496e
|
refs/heads/master
| 2023-01-20T23:58:16.552881
| 2023-01-17T16:00:15
| 2023-01-17T16:00:15
| 148,851,182
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 491
|
r
|
to_graph_S3methods.R
|
is.cglasso2igraph <- function(x) inherits(x, 'cglasso2igraph')
print.cglasso2igraph <- function(x, ...) print.listof(x, ...)
plot.cglasso2igraph <- function(x, type, ...){
op <- par(no.readonly = TRUE)
on.exit(par(op), add = TRUE)
if(missing(type)) type <- ifelse(is.null(x$Gxy), "Gyy", "both")
gr <- getGraph(x, type = type)
opt <- list(...)
# if(is.null(opt$layout)) opt$layout <- layout_with_kk
do.call(function(...) plot(gr, ...), opt)
invisible(NULL)
}
|
1a4f8ef0a34482d3ddb9f1dd02ca69a4525e502a
|
8c84e313fe5a6016cd92ce1ef18a8ff409f4940a
|
/functions/calc_kp_temp_pa.R
|
40817ce6f1ff5dc412b835417ddcf9f8315b520d
|
[] |
no_license
|
SmithEcophysLab/optimal_vcmax_R
|
56754d2321bb6314b8bcdca6870b0f6c71ca0112
|
72a050d8852456a271c023e380ee1c72be0bd3f7
|
refs/heads/master
| 2022-09-09T12:05:53.883354
| 2022-03-29T13:45:45
| 2022-03-29T13:45:45
| 156,727,566
| 16
| 14
| null | 2022-01-20T15:33:34
| 2018-11-08T15:27:30
|
R
|
UTF-8
|
R
| false
| false
| 337
|
r
|
calc_kp_temp_pa.R
|
calc_kp_temp_pa <- function(temp, z){ # Boyd 2015
patm = calc_patm(z)
rat = patm / calc_patm(0)
R <- 8.31 # in Joules
temp_k <- temp + 273
kp_25 <- 13.9 # Pa Co2
kp_25 <- kp_25 * rat
Ea <- 36.3 # kJ mol-1
Ea_j <- Ea * 1000
kp <- kp_25 * exp(Ea_j * (temp_k - 298.15)/(298.25 * R * temp_k))
return(kp)
}
|
1587c98e8fe623141bdce182a9fc0a304647909b
|
e99ca694bfa7450b51b163106d09b3208fbd266b
|
/man/ConfigFileOpen.Rd
|
a3e9f9d617f917648586b27fffec83b9bc8129f7
|
[] |
no_license
|
andrejsim/s2dverification
|
03fd95f7e0185153a791fd4765da758a0adf8b11
|
e337c847f12642709462951b9f05842418e7081f
|
refs/heads/master
| 2020-07-10T12:39:41.441750
| 2016-02-17T11:19:18
| 2016-02-17T11:19:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,039
|
rd
|
ConfigFileOpen.Rd
|
\name{ConfigFileOpen}
\alias{ConfigFileOpen}
\alias{ConfigFileCreate}
\alias{ConfigFileSave}
\title{
Functions To Create Open And Save Configuration File
}
\description{
These functions help in creating, opening and saving configuration files.
}
\usage{
ConfigFileOpen(file_path, silent = FALSE, stop = FALSE)
ConfigFileCreate(file_path, confirm = TRUE)
ConfigFileSave(configuration, file_path, confirm = TRUE)
}
\arguments{
\item{file_path}{
Path to the configuration file to create/open/save.
}
\item{silent}{
Flag to activate or deactivate verbose mode.\cr
Defaults to FALSE (verbose mode on).
}
\item{configuration}{
Configuration object to save in a file.
}
\item{confirm}{
Flag to stipulate whether to ask for confirmation when saving a configuration file that already exists.\cr
Defaults to TRUE (confirmation asked).
}
\item{stop}{
TRUE/FALSE whether to raise an error if not all the mandatory default variables are defined in the configuration file.\cr
}
}
\details{
ConfigFileOpen() loads all the data contained in the configuration file specified as parameter 'file_path'.\cr
Returns a configuration object with the variables needed for the configuration file mechanism to work.\cr
This function is called from inside the Load() function to load the configuration file specified in 'configfile'.\cr
\cr
ConfigFileCreate() creates an empty configuration file and saves it to the specified path. It may be opened later with ConfigFileOpen() to be edited. Some default values are set when creating a file with this function, you can check these with ConfigShowDefinitions().\cr
\cr
ConfigFileSave() saves a configuration object into a file, which may then be used from Load().\cr
\cr
Two examples of configuration files can be found inside the 'inst/config/' folder in the package:
\itemize{
\item{BSC.conf: configuration file used at BSC-CNS. Contains location data on several datasets and variables.}
\item{template.conf: very simple configuration file intended to be used as pattern when starting from scratch.}
}
How the configuration file works:\cr
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
It contains one list and two tables.\cr
Each of these have a header that starts with '!!'. These are key lines and should not be removed or reordered.\cr
Lines starting with '#' and blank lines will be ignored.
The list should contains variable definitions and default value definitions.\cr
The first table contains information about experiments.\cr
The third table contains information about observations.\cr
Each table entry is a list of comma-separated elements.\cr
The two first are part of a key that is associated to a value formed by the other elements.\cr
The key elements are a dataset identifier and a variable name.\cr
The value elements are the dataset main path, dataset file path, the variable name inside the .nc file, a default suffix (explained below) and a minimum and maximum vaues beyond which loaded data is deactivated.
Given a dataset name and a variable name, a full path is obtained concatenating the main path and the file path.\cr
Also the nc variable name, the suffixes and the limit values are obtained.
Any of the elements in the keys can contain regular expressions[1] that will cause matching for sets of dataset names or variable names.
The dataset path and file path can contain shell globbing expressions[2] that will cause matching for sets of paths when fetching the file in the full path.\cr
The full path can point to an OPeNDAP URL.
Any of the elements in the value can contain variables that will be replaced to an associated string.\cr
Variables can be defined only in the list at the top of the file. \cr
The pattern of a variable definition is\cr
VARIABLE_NAME = VARIABLE_VALUE\cr
and can be accessed from within the table values or from within the variable values as\cr
$VARIABLE_NAME$\cr
For example:\cr
FILE_NAME = tos.nc\cr
!!table of experiments\cr
ecmwf, tos, /path/to/dataset/, $FILE_NAME$\cr
There are some reserved variables that will offer information about the store frequency, the current startdate Load() is fetching, etc:\cr
$START_DATE$, $STORE_FREQ$, $MEMBER_NUMBER$\cr
for observations: $YEAR$, $MONTH$, $DAY$\cr
Additionally, from an element in an entry value you can access the other elements of the entry as:\cr
$EXP_NAME$, $VAR_NAME$, $EXP_MAIN_PATH$, $EXP_FILE_PATH$, \cr$VAR_NAME$, $SUFFIX$, $VAR_MIN$, $VAR_MAX$\cr
The variable $SUFFIX$ is useful because it can be used to take part in the main or file path. For example: '/path/to$SUFFIX$/dataset/'.\cr
It will be replaced by the value in the column that corresponds to the suffix unless the user specifies a different suffix via the parameter 'suffixexp' or 'suffixobs'.\cr
This way the user is able to load two variables with the same name in the same dataset but with slight modifications, with a suffix anywhere in the path to the data that advices of this slight modification.
The entries in a table will be grouped in 4 levels of specificity:
\enumerate{
\item{
General entries:\cr
ยท the key dataset name and variable name are both a regular expression matching any sequence of characters (.*) that will cause matching for any pair of dataset and variable names\cr
Example: .*, .*, /dataset/main/path/, file/path, nc_var_name, suffix, var_min, var_max
}
\item{
Dataset entries:\cr
ยท the key variable name matches any sequence of characters\cr
Example: ecmwf, .*, /dataset/main/path/, file/path, nc_var_name, suffix, var_min, var_max
}
\item{
Variable entries:\cr
ยท the key dataset name matches any sequence of characters\cr
Example: .*, tos, /dataset/main/path/, file/path, nc_var_name, suffix, var_min, var_max
}
\item{
Specific entries:\cr
ยท both key values are specified\cr
Example: ecmwf, tos, /dataset/main/path/, file/path, nc_var_name, suffix, var_min, var_max
}
}
Given a pair of dataset name and variable name for which we want to know the full path, all the rules that match will be applied from more general to more specific.\cr
If there is more than one entry per group that match a given key pair, these will be applied in the order of appearance in the configuration file (top to bottom).
An asterisk (*) in any value element will be interpreted as 'leave it as is or take the default value if yet not defined'.\cr
The default values are defined in the following reserved variables:\cr
$DEFAULT_EXP_MAIN_PATH$, $DEFAULT_EXP_FILE_PATH$, $DEFAULT_NC_VAR_NAME$, $DEFAULT_OBS_MAIN_PATH$, $DEFAULT_OBS_FILE_PATH$, $DEFAULT_SUFFIX$, $DEFAULT_VAR_MIN$, $DEFAULT_VAR_MAX$, \cr
$DEFAULT_DIM_NAME_LATITUDES$, $DEFAULT_DIM_NAME_LONGITUDES$, \cr
$DEFAULT_DIM_NAME_MEMBERS$\cr
Trailing asterisks in an entry are not mandatory. For example\cr
ecmwf, .*, /dataset/main/path/, *, *, *, *, *\cr
will have the same effect as\cr
ecmwf, .*, /dataset/main/path/
A double quote only (") in any key or value element will be interpreted as 'fill in with the same value as the entry above'.
}
\value{
ConfigFileOpen() returns a configuration object with all the information for the configuration file mechanism to work.\cr
ConfigFileSave() returns TRUE if the file has been saved and FALSE otherwise.\cr
ConfigFileCreate() returns nothing.
}
\seealso{ConfigApplyMatchingEntries, ConfigEditDefinition, ConfigEditEntry, ConfigFileOpen, ConfigShowSimilarEntries, ConfigShowTable}
\references{
[1] https://stat.ethz.ch/R-manual/R-devel/library/base/html/regex.html\cr
[2] http://tldp.org/LDP/abs/html/globbingref.html
}
\author{
History:\cr
0.1 - 2015-05 (N. Manubens, \email{nicolau.manubens at ic3.cat}) - First version
1.0 - 2015-11 (N. Manubens, \email{nicolau.manubens at ic3.cat}) - Removed grid column and storage formats
}
\examples{
# Create an empty configuration file
config_file <- paste0(tempdir(), "/example.conf")
ConfigFileCreate(config_file, confirm = FALSE)
# Open it into a configuration object
configuration <- ConfigFileOpen(config_file)
# Add an entry at the bottom of 4th level of file-per-startdate experiments
# table which will associate the experiment "ExampleExperiment2" and variable
# "ExampleVariable" to some information about its location.
configuration <- ConfigAddEntry(configuration, "experiments",
"last", "ExampleExperiment2", "ExampleVariable",
"/path/to/ExampleExperiment2/",
"ExampleVariable/ExampleVariable_$START_DATE$.nc")
# Edit entry to generalize for any variable. Changing variable needs .
configuration <- ConfigEditEntry(configuration, "experiments", 1,
var_name = ".*",
file_path = "$VAR_NAME$/$VAR_NAME$_$START_DATE$.nc")
# Now apply matching entries for variable and experiment name and show the
# result
match_info <- ConfigApplyMatchingEntries(configuration, 'tas',
exp = c('ExampleExperiment2'), show_result = TRUE)
# Finally save the configuration file.
ConfigFileSave(configuration, config_file, confirm = FALSE)
}
\keyword{datagen}
|
16a91bd75624c6354c62288fac8fcd114cd49999
|
cb9094fb90d3b632bbb8173f55b04709624dd091
|
/man/preprocess_dt.Rd
|
4c14628fdeaa088fb214a45043205844551736d3
|
[] |
no_license
|
tw1118/myutilitypackage
|
c28d8e4a9f99e14c5a4beb9ceb098a4f8bc82bd9
|
bb499ea7bd2b9bc1a9e827bfe08a59f893a7ed79
|
refs/heads/master
| 2021-05-11T15:33:16.919875
| 2018-01-29T23:41:36
| 2018-01-29T23:41:36
| 117,732,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
preprocess_dt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocess_dt.R
\name{preprocess_dt}
\alias{preprocess_dt}
\title{Preprocessing data}
\usage{
preprocess_dt(df)
}
\arguments{
\item{a}{data table}
}
\value{
a data table
}
\description{
This function will eliminate zero variance column and unique identifier column
This is a preliminary step for fe_cluster
}
|
7dda9ee9171350f65b97f966c02987bf25a0d245
|
c0e766a6a57e3c5c32f8b0afe130b8df66e6dbf9
|
/rsellPoshmark/R/PM_Sales_Upload_Format_Summary.R
|
065c2eccbd5ef86cc58ece77d08a59e23783d689
|
[] |
no_license
|
t2tech-corp/Rsell-Packages
|
b450fec180754aa9cf0cf3ab6b369c74c57b7e70
|
047a2348650e5a2ee0bc52500824a34f16167056
|
refs/heads/main
| 2023-03-03T00:22:15.006720
| 2021-02-14T22:58:12
| 2021-02-14T22:58:12
| 329,474,392
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,942
|
r
|
PM_Sales_Upload_Format_Summary.R
|
#' Format Poshmark Upload Panel
#'
#' This function formats the Poshmark Upload Panel.
#'
#' Requires: shiny
#'
#' @param sales_activity Poshmark Sales Activity Table
#' @return alert_text HTML Formatted Panel
#' @export
#' @examples
#'
PM_Sales_Upload_Format_Summary <- function(sales_activity) {
summary_stats <- PM_Sales_Upload_Summary_Stats(sales_activity)
###
alert_text <- wellPanel(style = "background: #fbe7e9; border-color: #565455",
tags$div(
style = "text-align: center;",
tags$img(src = logo_source, width = "200px", align = "center"),
tags$span(tags$b(tags$br(),
tags$h3("Sales Activity Report for: ", summary_stats$user_id),
tags$h4("For Order Dates: ", summary_stats$min_date, " thru ", summary_stats$max_date),
tags$hr(),
tags$h4("Total Order Amount: ", summary_stats$tot_sale),
tags$h4("Total Net Earnings: ", summary_stats$net_earn),
tags$hr(),
tags$h4("Total Items Sold: ", summary_stats$tot_items),
tags$h5("Total Offered Items: ", summary_stats$tot_off),
tags$h5("Total Bundles Items: ", summary_stats$tot_bun),
tags$h5("Total New With Tags: ", summary_stats$tot_nwt),
tags$hr()
)),
tags$div(
style = "text-align: center;",
tags$div(tags$b("Click Submit to Load Data"), style = "display:inline-block; position:relative; right: 30px;"),
tags$div(tags$b("Click Cancel to Skip File"), style = "display:inline-block; position:relative; left: 30px;")
),
tags$br(),
tags$div(
style = "text-align: center;",
actionButton("pmsubmit", "Submit", style = "color: white; background-color: #565455; border-color: #565455;", width = '25%'),
tags$div(style = "display:inline-block; width: 90px", HTML("<br>")),
actionButton("pmcancel", "Cancel", style = "color: white; background-color: #d91e2f; border-color: #d91e2f;", width = '25%')
)
))
###
return(alert_text)
}
|
9ffae06d276125550a33f7dba23347001766a86b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/solrium/examples/update_json.Rd.R
|
fd9666bfb28bea817598c067422709d3a18bd2d9
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,097
|
r
|
update_json.Rd.R
|
library(solrium)
### Name: update_json
### Title: Update documents with JSON data
### Aliases: update_json
### ** Examples
## Not run:
##D # start Solr: bin/solr start -f -c -p 8983
##D
##D # connect
##D (conn <- SolrClient$new())
##D
##D # Add documents
##D file <- system.file("examples", "books2.json", package = "solrium")
##D cat(readLines(file), sep = "\n")
##D conn$update_json(files = file, name = "books")
##D update_json(conn, files = file, name = "books")
##D
##D # Update commands - can include many varying commands
##D ## Add file
##D file <- system.file("examples", "updatecommands_add.json",
##D package = "solrium")
##D cat(readLines(file), sep = "\n")
##D conn$update_json(file, "books")
##D
##D ## Delete file
##D file <- system.file("examples", "updatecommands_delete.json",
##D package = "solrium")
##D cat(readLines(file), sep = "\n")
##D conn$update_json(file, "books")
##D
##D # Add and delete in the same document
##D ## Add a document first, that we can later delete
##D ss <- list(list(id = 456, name = "cat"))
##D conn$add(ss, "books")
## End(Not run)
|
a35dfd612e19030f5ba992a24dc0aabf512ac573
|
7d5d8492c2d88b88bdc57e3c32db038a7e7e7924
|
/robustness/junk/spatial_consistency.R
|
b2389d550e1a363131e72d2293c012a75ab5b8ed
|
[] |
no_license
|
CIAT-DAPA/dapa-climate-change
|
80ab6318d660a010efcd4ad942664c57431c8cce
|
2480332e9d61a862fe5aeacf6f82ef0a1febe8d4
|
refs/heads/master
| 2023-08-17T04:14:49.626909
| 2023-08-15T00:39:58
| 2023-08-15T00:39:58
| 39,960,256
| 15
| 17
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,048
|
r
|
spatial_consistency.R
|
#check why there is something weird going on between yield and meteorology data
#answer: CDO shifted meteorology by 1.125/2 = 0.5625 to the left.
#This was presumably when cutting the data to African domain.
#Hence just a shift to the right will fix it.
#Make sure the displacement is accounted for in any data extraction
wd <- "~/Leeds-work/quest-for-robustness/data"
setwd(wd)
#obs
rs1 <- raster("./meteorology/baseline_climate/Rainf_daily_WFDEI_GPCC/afr_Rainf_daily_WFDEI_GPCC_197901.nc")
rs2 <- raster("./meteorology/baseline_climate/Rainf_daily_WFD_GPCC/afr_Rainf_daily_WFD_GPCC_195001.nc")
#gcms
rs3 <- raster("./meteorology/future_climate/gfdl-esm2m/hist/afr_pr_bced_1960_1999_gfdl-esm2m_hist_1950.nc")
#sowing date
rs4 <- raster("./crop_calendar_sacks/major_maize_harvest.end.tif")
#yield data
rs5 <- raster("./yield_data_maize/descriptive_stats/cv_ModelYld500.tif")
xyFromCell(rs1,1)
xyFromCell(rs2,cellFromXY(rs2,c(-20.25,19.6875)))
xyFromCell(rs3,cellFromXY(rs3,c(-20.25,19.6875)))
xyFromCell(rs4,cellFromXY(rs4,c(-20.25,19.6875)))
xyFromCell(rs5,cellFromXY(rs5,c(-20.25,19.6875)))
rs1_1 <- shift(rs1,x=0.5625,y=0)
rs2_1 <- shift(rs2,x=0.5625,y=0)
rs3_1 <- shift(rs3,x=0.5625,y=0)
xyFromCell(rs1_1,1)
xyFromCell(rs2_1,cellFromXY(rs2_1,c(-19.6875,19.6875)))
xyFromCell(rs3_1,cellFromXY(rs3_1,c(-19.6875,19.6875)))
xyFromCell(rs4,cellFromXY(rs4,c(-19.6875,19.6875)))
xyFromCell(rs5,cellFromXY(rs5,c(-19.6875,19.6875)))
writeRaster(rs1,"wfdei.tif",format="GTiff")
writeRaster(rs2,"wfd.tif",format="GTiff")
writeRaster(rs3,"gcm.tif",format="GTiff")
writeRaster(rs4,"sow.tif",format="GTiff")
writeRaster(rs5,"yield.tif",format="GTiff")
writeRaster(rs1_1,"wfdei_1.tif",format="GTiff")
writeRaster(rs2_1,"wfd_1.tif",format="GTiff")
writeRaster(rs3_1,"gcm_1.tif",format="GTiff")
rs4_1 <- shift(rs4,x=-0.5625,y=0)
writeRaster(rs4_1,"sow_1.tif",format="GTiff")
rs4_2 <- resample(rs4,rs1,method="ngb")
writeRaster(rs4_2,"sow_2.tif",format="GTiff")
rs5_1 <- shift(rs5,x=-0.5625,y=0)
writeRaster(rs5_1,"yield_1.tif",format="GTiff")
|
8755f35efc4bdc0261976f36bd14fe87ca2ad0d5
|
3d4379cd642047d6edcf6f780820b058c49d06ac
|
/Predicting_Pulsar.R
|
2234c9b4e53e8d9f4a18cd1c1abd60100d026a0f
|
[] |
no_license
|
VishakhaT/Predicting-the-presence-of-Pulsar
|
d2e530feed3c14fc4e6abfeae8e5290a0599434f
|
24f263f5f042909252cc31eb62a3959a8a4654c2
|
refs/heads/main
| 2023-02-01T00:42:04.541112
| 2020-12-07T03:11:49
| 2020-12-07T03:11:49
| 319,187,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,473
|
r
|
Predicting_Pulsar.R
|
---
title: 'STAT656: Final Project'
subtitle: 'Predicting a Pulsar'
output:
html_document: default
---
packs = c('dplyr','ggplot2','AppliedPredictiveModeling', 'caret','RANN','corrplot', 'MASS', 'pROC', 'glmnet', 'readr', 'shiny')
lapply(packs,require,character.only=TRUE)
# Define UI for application that outputs Confusion Matrix
ui <- fluidPage(
# Application title
titlePanel("Comparing Classification Methods"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
radioButtons(inputId = "methodType",
label = "Classification Method:",
choiceNames = c('Logistic','Elastic Net','K-Nearest Neighbors','Support Vector Machine','Flexible Discriminant Aanalysis'),
choiceValues = c('logit','enet','knn','svm','fda')),
),
# Show Confusion Matrix Table
mainPanel(
verbatimTextOutput(outputId = "conMatrix")
)
)
)
# Data Pre-Processing
pulsar = read.csv("data/pulsar_stars.csv")
Y = make.names(pulsar$target_class)
X = dplyr::select(pulsar, -target_class)
set.seed(1)
trainSplit = createDataPartition(y = Y, p = 0.8, list = FALSE)
Ytrain = Y[trainSplit]
Xtrain = X[trainSplit,]
XtrainMat = as.matrix(Xtrain)
Ytest = Y[-trainSplit]
Xtest = X[-trainSplit,]
XtestMat = as.matrix(Xtest)
Ytest = relevel(as.factor(Ytest), ref = 'X1')
Ytrain = relevel(as.factor(Ytrain), ref = 'X1')
# Define server logic required to output Confusion Matrix
server = function(input, output) {
output$conMatrix = renderPrint({
if(input$methodType == 'logit'){
method = 'glm'
tuneGrid = NULL
trControl = trainControl(method = 'none')
metric = 'Kappa'
}
if(input$methodType == 'enet'){
method = 'glmnet'
trControl = trainControl(method = "cv", number = 5)
tuneGrid = expand.grid('alpha'=c(.5, 1),'lambda' = seq(0.0001, .01, length.out = 10))
metric = 'Kappa'
}
if(input$methodType == 'knn'){
method = 'knn'
tuneGrid = expand.grid(k = c(1:30))
trControl = trainControl(method = 'cv', number = 5)
metric = 'Kappa'
}
if(input$methodType == 'svm'){
method = 'svmLinear'
tuneGrid = expand.grid(C = c(.001, .01, .1, 1, 10, 50))
trControl = trainControl(method = 'cv', number = 5)
metric = 'Kappa'
}
if(input$methodType == 'fda'){
method = 'fda'
tuneGrid = expand.grid(degree = 1:4, nprune = c(10:30))
trControl = trainControl(method = 'cv', number = 5, classProbs = TRUE)
metric = 'Kappa'
}
outTrain = train(Xtrain, Ytrain, method = method, tuneGrid = tuneGrid, trControl = trControl, metric = metric)
YhatTest = predict(outTrain, Xtest, type = 'raw')
(confusionMatrix(reference = Ytest, data = YhatTest))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
f13f5a5dc25d7a23bb71e16e2f24bf9d67af0c31
|
b13b7c027d119e97012dcbe972cb68cfcd71d8e5
|
/intron_by_coord.r
|
3205ddf180ade78ce19443a385ba081471210414
|
[] |
no_license
|
dhtc/FISH-probe-design
|
197e716a6a416a70fc4f5cbbf8cf1e9258e51fdc
|
3be9a312e84ee30e4e890b0028b2d57c8a27e127
|
refs/heads/master
| 2022-01-05T10:19:01.596164
| 2018-07-24T09:22:41
| 2018-07-24T09:22:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,481
|
r
|
intron_by_coord.r
|
##############################################################
# Extract introns for a list of genes using exon coordinates #
##############################################################
#if biomaRt and the corresponding genome packages are not downloaded yet
#install.packages('Rcpp')
#source("https://bioconductor.org/biocLite.R")
#biocLite("biomaRt")
#biocLite('BSgenome.Mmusculus.UCSC.mm10')
#biocLite("BSgenome.Hsapiens.NCBI.GRCh38")
#or
#biocLite("BSgenome.Hsapiens.UCSC.hg38")
#the csv file with the genes of interest
#it should cointain a column with the ensembl gene ids and the corresponding header should be: Ensembl Gene ID
#the species: m for mouse, h for human
setwd("/Users/GG/Desktop/Kata/pipeline_python_part/pou5f1")
lst = 'pou5f1.csv'
sp = 'h'
library(biomaRt)
library(GenomicRanges)
library(GenomicFeatures)
library(BSgenome)
library(Biostrings)
if(sp == 'm'){
library(BSgenome.Mmusculus.UCSC.mm10)
mart<-useEnsembl("ensembl","mmusculus_gene_ensembl")
}else if(sp == 'h'){
library(BSgenome.Hsapiens.UCSC.hg38)
mart<-useEnsembl("ensembl","hsapiens_gene_ensembl")
}
#import the gene list of interest
gene_list=read.delim(lst,sep=',',header = T)
gene_ids = gene_list$Ensembl.Gene.ID
#get the exon coordinates
exoncoord=getBM(attributes = c('ensembl_gene_id','ensembl_exon_id','chromosome_name','exon_chrom_start','exon_chrom_end'),
filters='ensembl_gene_id',values=gene_ids,mart=mart,bmHeader = TRUE)
#sort by chromosome name and exon coordinate
g_ec=exoncoord[order(exoncoord$`Chromosome/scaffold name`),]
e_ec=g_ec[order(g_ec$`Exon region start (bp)`),]
#get the start and end cordinate for the genes
generange=getBM(attributes = c("ensembl_gene_id","chromosome_name","start_position","end_position"),
filters = "ensembl_gene_id",values = gene_ids,mart = mart,bmHeader = T)
#group by exon coordinate - to have overlapping exons
gname=e_ec$`Gene stable ID`[1]
e_strt = e_ec$`Exon region start (bp)`[1]
lst_end = e_ec$`Exon region end (bp)`[1]
for(gene in 1:dim(e_ec)[1]){
#find the exon with the max end coordinate among overlapping ones
e_end=e_ec$`Exon region end (bp)`[gene]
if(gname==e_ec$`Gene stable ID`[gene]){
e_strt =e_ec$`Exon region start (bp)`[gene]
if(e_strt < lst_end){
if(e_end > lst_end){
lst_end = e_end
}
}else{
while(e_strt > lst_end){
#get intron seq with getSeq function by coordinates
iend = e_ec$`Exon region start (bp)`[gene]
intron_seq=getSeq(Hsapiens,paste("chr",e_ec$`Chromosome/scaffold name`[gene],sep = ""),
start=lst_end, end=iend)
write(paste(paste(">",e_ec$`Gene stable ID`[gene],"_chr",e_ec$`Chromosome/scaffold name`[gene],":",
lst_end,"-",iend,sep=""),
as.character(intron_seq),sep="\n"),
file = paste(as.character(gname),'.fa',sep=""),append = T,sep = "\n")
write(paste(paste("chr",e_ec$`Chromosome/scaffold name`[gene],sep=""),lst_end,iend,e_ec$`Gene stable ID`[gene],sep="\t"),
file="bed_version.txt",append=T,sep="\n")
e_strt =e_ec$`Exon region start (bp)`[gene]
lst_end=e_ec$`Exon region end (bp)`[gene]
rm(intron_seq)
}
}
}else{
gname=e_ec$`Gene stable ID`[gene]
e_strt = e_ec$`Exon region start (bp)`[gene]
lst_end = e_ec$`Exon region end (bp)`[gene]
}
}
|
a71c6d419cebc6a5af4e6b1de752edf4719b66e4
|
fbe57536cc2d84e69a5bf799c88fcb784e853558
|
/R/spc.controlviolation.nelson.1984.test5.zone.a.2.outof.3.R
|
626bddbe9f3d665c4ea7b722919234376422c6b3
|
[
"MIT"
] |
permissive
|
burrm/lolcat
|
78edf19886fffc02e922b061ce346fdf0ee2c80f
|
abd3915791d7e63f3827ccb10b1b0895aafd1e38
|
refs/heads/master
| 2023-04-02T11:27:58.636616
| 2023-03-24T02:33:34
| 2023-03-24T02:33:34
| 49,685,593
| 5
| 2
| null | 2016-10-21T05:14:49
| 2016-01-15T00:56:55
|
R
|
UTF-8
|
R
| false
| false
| 1,787
|
r
|
spc.controlviolation.nelson.1984.test5.zone.a.2.outof.3.R
|
spc.controlviolation.nelson.1984.test5.zone.a.2.outof.3 <- function(
chart.series = NA,
center.line = NA,
control.limits.ucl = NA,
zone.a.upper = NA,
zone.ab.upper = NA,
zone.bc.upper = NA,
control.limits.lcl = NA,
zone.a.lower = NA,
zone.ab.lower = NA,
zone.bc.lower = NA,
point.count = 2,
outof = 3,
...
) {
zones.out <- spc.controlviolation.zones.classify(
chart.series = chart.series,
center.line = center.line,
control.limits.ucl = control.limits.ucl,
zone.a.upper = zone.a.upper,
zone.ab.upper = zone.ab.upper,
zone.bc.upper = zone.bc.upper,
control.limits.lcl = control.limits.lcl,
zone.a.lower = zone.a.lower,
zone.ab.lower = zone.ab.lower,
zone.bc.lower = zone.bc.lower,
...
)
ret <- rep(F, length(chart.series))
criteria.upper <- c("UU","UA")
criteria.lower <- c("LL","LA")
if (length(zones.out) >= point.count) {
if (all(zones.out[1:point.count] %in% criteria.upper) |
all(zones.out[1:point.count] %in% criteria.lower)
) {
ret[1:point.count] <- T
}
if (length(zones.out) >= outof) {
subtract.len <- outof - 1
for (i in outof:length(zones.out)) {
if (zones.out[i] %in% criteria.upper & sum(ifelse(zones.out[(i-subtract.len):i] %in% criteria.upper, 1, 0 )) >= point.count) {
ret[(i-subtract.len):i] <- T
} else if (zones.out[i] %in% criteria.lower & sum(ifelse(zones.out[(i-subtract.len):i] %in% criteria.lower,1,0)) >= point.count) {
ret[(i-subtract.len):i] <- T
}
}
}
}
ret[which(is.na(ret))] <- F
ret
}
|
9f7922e5afbd5bcc8fcaf1544762100880fca4ff
|
5539ea2e76cc320979475135f75fc6a3644fee1f
|
/corr5.R
|
18d4b90909aa9dce3bc14138c38a198456734ead
|
[] |
no_license
|
Georman73/datasciencecoursera
|
e331620f2431d52326e575df3995e105379de4cb
|
8398d9fb1cb1e934227604a1ddbb2ae5d6826923
|
refs/heads/master
| 2021-01-19T17:07:50.034258
| 2017-07-20T22:54:49
| 2017-07-20T22:54:49
| 88,307,184
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 498
|
r
|
corr5.R
|
corr5 <- function(directory, threshold = 0) {
complete_table <- complete("specdata", 1:332)
nobs <- complete_table$nobs
idnums <- complete_table$id[nobs > threshold]
corrdata <- numeric()
fileList <- list.files(path = directory, pattern = ".csv", full.names = TRUE)
j <- 1
for(i in idnums) {
polldata <- read.csv(fileList[i])
corrdata[j] <- cor(polldata$sulfate, polldata$nitrate, use="complete.obs")
j <- j + 1
}
result <- corrdata
return(result)
}
|
89ce3a0bc311e320da2ce8bd94da45dd439f20f9
|
de1c1830440268dd95e74a97a56ca5692f418667
|
/dateTime.R
|
027fee64897c085860c2d3777877209676b2e10f
|
[] |
no_license
|
pennyscalper/cs229
|
fba021ef5b884bcc6c617fa6f07eb63ca0ce2ff8
|
cb82c92f28a446da979eb438210e420d0ce11263
|
refs/heads/master
| 2020-04-11T14:52:46.394484
| 2018-12-15T05:11:56
| 2018-12-15T05:11:56
| 161,871,968
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,342
|
r
|
dateTime.R
|
dtime.toDate <- function(date) {
if(class(date) == 'Date') {
return(date)
}
dt <- data.table(dateChar=as.character(date))
if(any(dt[, grepl('^(19|20)[0-9]{6}$', dateChar)])) {
dt[, date:=as.Date(dateChar, format='%Y%m%d'), dateChar]$date
} else if(any(dt[, grepl('^(19|20)[0-9]{2}-[0-9]{2}-[0-9]{2}$', dateChar)])) {
dt[, date:=as.Date(dateChar, format='%Y-%m-%d'), dateChar]$date
} else if(any(dt[, grepl('^(19|20)[0-9]{2}/[0-9]{2}/[0-9]{2}$', dateChar)])) {
dt[, date:=as.Date(dateChar, format='%Y/%m/%d'), dateChar]$date
} else if(any(dt[, grepl('^[0-9]{2}-[0-9]{2}-(19|20)[0-9]{2}$', dateChar)])) {
dt[, date:=as.Date(dateChar, format='%m-%d-%Y'), dateChar]$date
} else if(any(dt[, grepl('^[0-9]{2}/[0-9]{2}/(19|20)[0-9]{2}$', dateChar)])) {
dt[, date:=as.Date(dateChar, format='%m/%d/%Y'), dateChar]$date
} else {
stop('unknown date format')
}
}
dtime.dateToStr <- function(date) {
if(class(date) != 'Date') {
date <- dtime.toDate(date)
}
data.table(date=date)[, dateChar := format(date, '%Y%m%d'), date]$dateChar
}
dtime.dateToInt <- function(date) {
if(class(date) != 'Date') {
date <- dtime.toDate(date)
}
data.table(date=date)[, dateInt := as.integer(format(date, '%Y%m%d')), date]$dateInt
}
|
8ec856728ebf647fd072de21808c84bb5e9b2198
|
abf71d8505eb53c09420c88e5dd2498df3867540
|
/R/GPS_trip.R
|
59ddaa866360d18abe3e07554c1d8c3e70aaf16b
|
[
"MIT"
] |
permissive
|
MiriamLL/sula
|
260762ecd7f0788b1dfbac48a923265785fcf630
|
39a5ba8a0bd5a28151747b5f5586d603c10e20f9
|
refs/heads/main
| 2023-08-04T18:55:55.895322
| 2023-07-21T14:42:09
| 2023-07-21T14:42:09
| 354,821,022
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 524
|
r
|
GPS_trip.R
|
#' Tracks de bobo/piquero/kena Sula dactylatra en Rapa Nui
#'
#' Datos de tracks de Sula dactylatra colectados usando GPS en Rapa Nui
#' Incluye 1038 observaciones de 7 variables
#' Incluye datos de 1 individuo
#'
#' @docType data
#'
#' @usage data(GPS_trip)
#'
#' @format Un csv en formato rda, contiene datos de cinco individuos.
#' Las columnas son Latitude, Longitude, DateGMT, TimeGMT, ID, tStampt y trip.
#'
#' @keywords datasets
#'
#' @references Lerma et al. (2020)
#'
#'
#' @examples
#' data(GPS_trip)
"GPS_trip"
|
e0f234e5d7c7764be244416909071e932fe3693e
|
841e797ed68081a5c6941ae67b36e8b1b516872b
|
/phaede/server.R
|
b3415aebfd04cf1423353ad09f2de75e6a6bd27f
|
[
"MIT"
] |
permissive
|
ai-cfia/PHAEDE
|
69f6402df34d54ad9e85a35ddedc32605c3052d3
|
ab89cf8136ed457b58b14bb8bf55a00d088ffc2f
|
refs/heads/master
| 2021-01-06T15:05:54.128467
| 2019-12-23T18:53:33
| 2019-12-23T18:53:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,886
|
r
|
server.R
|
library(shiny)
library(fastrtext)
model_fasttext <- load_model("model_fasttext.bin")
percent <- function(x, digits = 2, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
}
shinyServer(
function(input, output) {
predict_fasttext <- reactive({
predict(model_fasttext,
sentences = tolower(input$list_title_description))
})
output$result <- renderUI({
if(!is.null(input$list_title_description)) {
predictions <- predict_fasttext()
shinyjs::enable("download")
lapply(1:length(input$list_title), function(i) {
if(names(predictions[[i]]) == "Low") {
if(i == length(input$list_title)) {
tags$div(
tags$h4(
tags$a(input$list_title[i],
href = input$list_url[i],
target="_blank")
),
tags$p(
HTML(
paste("<font color=\"#008000\"><b>",
names(predictions[[i]]),
" ",
percent(predictions[[i]]),
"</b></font>")
)
),
tags$p(input$list_description[i])
)
} else {
tags$div(
tags$h4(
tags$a(input$list_title[i],
href = input$list_url[i],
target="_blank")
),
tags$p(
HTML(
paste("<font color=\"#008000\"><b>",
names(predictions[[i]]),
" ",
percent(predictions[[i]]),
"</b></font>")
)
),
tags$p(input$list_description[i]),
tags$hr()
)
}
} else {
if(i == length(input$list_title)) {
tags$div(
tags$h4(
tags$a(input$list_title[i],
href = input$list_url[i],
target="_blank")
),
tags$p(
HTML(
paste("<font color=\"#FF0000\"><b>",
names(predictions[[i]]),
" ",
percent(predictions[[i]]),
"</b></font>")
)
),
tags$p(input$list_description[i])
)
} else {
tags$div(
tags$h4(
tags$a(input$list_title[i],
href = input$list_url[i],
target="_blank")
),
tags$p(
HTML(
paste("<font color=\"#FF0000\"><b>",
names(predictions[[i]]),
" ",
percent(predictions[[i]]),
"</b></font>")
)
),
tags$p(input$list_description[i]),
tags$hr()
)
}
}
})
}
})
output$download <- downloadHandler(
filename = function() {
paste(format(Sys.time(), "%Y%m%d%H%M%S"), ".csv", sep = "")
},
content = function(file) {
predictions <- predict_fasttext()
downloadData <- data.frame("Title" = input$list_title,
"URL" = input$list_url,
"Description" = input$list_description,
"Risk" = factor(sapply(predictions, names)))
write.csv(downloadData, file, row.names = FALSE)
}
)
}
)
|
c24963f6bbae84a4a76356af3692821f5bde85e0
|
5868f3fae67e3504b730089d49ef27ce16cf7994
|
/Machine Learning/Homework 2/NHollingsworth_Homework2.R
|
ad7f4fdd5a667fd80b371dd490035ddc21b40b3c
|
[] |
no_license
|
nhollingsworth09/Spring-2019
|
655bc626a347e838c31f63999e381e5f83d48102
|
dfb48c049428e0656f99b2e3d3ec036b6ccfaa8f
|
refs/heads/master
| 2020-04-18T05:22:54.134774
| 2019-08-26T23:48:47
| 2019-08-26T23:48:47
| 167,276,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,032
|
r
|
NHollingsworth_Homework2.R
|
rm(list=ls())
setwd("C:/Users/nholl/Dropbox/SPRING 2019/MSA 8150 - Machine Learning/Homework/HW2")
#install.packages('Metrics')
#install.packages('caret')
library(Metrics)
library(ISLR)
library(boot)
library(MASS)
library(class)
library(caret)
################
## Question 1 ##
################
# Part (a)
# Part (b)
values <- c(0.3057, 0.7227, 1.1566, 2.8622, 1.3588, 0.5377, 0.4336, 0.3426, 3.5784, 2.7694)
keys <- 1:10
data <- data.frame(keys,values)
theta.ml <- (4*length(values))/sum(values)
length(data[,1])
# Boot Function
#######################################################
boot.fn = function(df, index){
return(4*length(df[,1])/sum(df[index,2]))
}
######################################################
set.seed(1)
boot.stats <- boot(data, boot.fn, 50000)
print(boot.stats)
# Part (c)
set.seed(1)
boot.stats2 <- boot(data, boot.fn, 1000)
print(boot.stats2)
################
## Question 3 ##
################
data_heart <- read.csv("HeartData.csv")
#head(data_heart)
#str(data_heart)
train <- data_heart[1:200,]
test <- data_heart[201:297,]
#########
# Part A
#########
################## Linear Regression
features <- colnames(train)
features <- features[features!='num']
glm.fit <- glm(reformulate(features, response = 'num'), data = train, family = 'binomial')
summary(glm.fit)
glm.coef <- summary(glm.fit)$coef
glm.coef[order(glm.coef[,4], decreasing = TRUE),]
#Accuracy
glm.prob <- predict(glm.fit, newdata = test, type = 'response')
#length(glm.prob)
glm.pred <- rep('0', 97)
glm.pred[glm.prob > 0.5] <- '1'
glm.pred
mean(glm.pred == test$num)
accuracy(test$num, glm.pred)
########### LDA ############
lda.fit <- lda(num ~ ., data = train)
lda.fit
#Accuracy
lda.pred <- predict(lda.fit, newdata = test)
lda.class <- lda.pred$class
accuracy(test$num, lda.class)
########## QDA #############
qda.fit <- qda(num~.,data = train)
qda.fit
#Accuracy
qda.class <- predict(qda.fit, newdata = test)$class
accuracy(test$num, qda.class)
########### KNN ############
n <- c(1,5,10)
knn_optimum <- function (train, test, n) {
models <- data.frame(matrix(ncol = 2, nrow = length(n)))
names(models) <- c('K-NN', 'Accuracy')
row <- 1
for (i in n){
knn.fit <- knn(train,test,train[,'num'] ,k=i)
models[row,1] <- i
models[row,2] <- accuracy(test$num, knn.fit)
row <- row + 1
}
print(models)
}
set.seed(1)
knn_optimum(train, test, n)
plot(knn_optimum(train, test, n))
abline(v = 5, lty=2)
########## Evaluation #############
###########
# Part B
###########
f <- c("sex", "cp", "fbs", "slope", "exang", "ca", "thal")
to_factor <- function(df, features){
for (i in features){
df[,i] <- factor(df[,i])
}
print(df)
}
train.cat <- to_factor(train, f)
#str(train.cat)
test.cat <- to_factor(test, f)
#str(test.cat)
################## Linear Regression
features <- colnames(train.cat)
features <- features[features!='num']
glm.fit <- glm(reformulate(features, response = 'num'), data = train.cat, family = 'binomial')
summary(glm.fit)
glm.coef <- summary(glm.fit)$coef
glm.coef[order(glm.coef[,4], decreasing = TRUE),]
#Accuracy
glm.prob <- predict(glm.fit, newdata = test.cat, type = 'response')
#length(glm.prob)
glm.pred <- rep('0', 97)
glm.pred[glm.prob > 0.5] <- '1'
glm.pred
mean(glm.pred == test.cat$num)
accuracy(test.cat$num, glm.pred)
########### LDA ############
lda.fit <- lda(num ~ ., data = train.cat)
lda.fit
#Accuracy
lda.pred <- predict(lda.fit, newdata = test.cat)
lda.class <- lda.pred$class
accuracy(test$num, lda.class)
########## QDA #############
qda.fit <- qda(num~.,data = train.cat)
qda.fit
#Accuracy
qda.class <- predict(qda.fit, newdata = test.cat)$class
accuracy(test.cat$num, qda.class)
########### KNN ############
n <- c(1,5,10)
knn_optimum <- function (train, test, n) {
models <- data.frame(matrix(ncol = 2, nrow = length(n)))
names(models) <- c('K-NN', 'Accuracy')
row <- 1
for (i in n){
knn.fit <- knn(train,test,train[,'num'] ,k=i)
models[row,1] <- i
models[row,2] <- accuracy(test$num, knn.fit)
row <- row + 1
}
print(models)
}
set.seed(1)
knn_optimum(train.cat, test.cat, n)
plot(knn_optimum(train.cat, test.cat, n))
abline(v = 5, lty=2)
########## Evaluation #############
f <- c("sex", "cp", "fbs", "slope", "exang", "ca", "thal")
to_factor <- function(df, features){
for (i in features){
df[,i] <- factor(df[,i])
}
print(df)
}
data.cat <- to_factor(data_heart, f)
#str(data.cat)
##########
# Part C
##########
################## Linear Regression
#- LOOCV
trControl <- trainControl(method = "LOOCV", savePredictions = 'all')
glm.fit.loocv <- train(factor(num)~., data = data.cat, family = 'binomial', method = 'glm', trControl = trControl)
#Accuracy
glm.fit.loocv$results[2]
#- 10-Fold CV
set.seed(1)
trControl <- trainControl(method = "cv", number = 10, savePredictions = 'all')
glm.fit.10 <- train(factor(num)~., data = data.cat, family = 'binomial', method = 'glm', trControl = trControl)
#Accuracy
glm.fit.10$results[2]
########### LDA ############
#- LOOCV
set.seed(1)
trControl <- trainControl(method = "LOOCV", savePredictions = 'all')
lda.fit.loocv <- train(factor(num)~., data = data.cat, method = 'lda', trControl = trControl)
#Accuracy
lda.fit.loocv$results[2]
#- 10-Fold CV
set.seed(1)
trControl <- trainControl(method = "cv", number = 10, savePredictions = 'all')
lda.fit.10 <- train(factor(num)~., data = data.cat, method = 'lda', trControl = trControl)
#Accuracy
lda.fit.10$results[2]
########## QDA #############
#- LOOCV
set.seed(1)
trControl <- trainControl(method = "LOOCV", savePredictions = 'all')
qda.fit.loocv <- train(factor(num)~., data = data.cat, method = 'qda', trControl = trControl)
#Accuracy
qda.fit.loocv$results[2]
#- 10-Fold CV
set.seed(1)
trControl <- trainControl(method = "cv", number = 10, savePredictions = 'all')
qda.fit.10 <- train(factor(num)~., data = data.cat, method = 'qda', trControl = trControl)
#Accuracy
qda.fit.10$results[2]
|
74f32c496cbbabfd7642ee0d8aea4f9a4bd44825
|
1a2d86c73a4c32fba9eb94be677f60b1b049833d
|
/tests/testthat.R
|
3d81a8655bd09bdd87d4e76f8257e3f294c704c8
|
[] |
no_license
|
cran/qpNCA
|
a13fdfd22d3021bea539373ae5354f3ca7436622
|
03f47c29254cd40f71858025e3aef7c9db3fb317
|
refs/heads/master
| 2023-07-12T22:43:17.847268
| 2021-08-16T11:50:02
| 2021-08-16T11:50:02
| 385,804,562
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 58
|
r
|
testthat.R
|
library(testthat)
library(qpNCA)
test_check("qpNCA")
|
61d3ff04e2052c303c8992432c2e1fdca585327e
|
ce16ab16f539400008d7290c3b20438014e15cc4
|
/man/plot_dist_density.Rd
|
1890a62eb5ae24994d26c6431b0b4696dc24c0f5
|
[
"MIT"
] |
permissive
|
antonvsdata/notame
|
5c7f61a7aa1f37cbc0e162c048d0cae3a9fa7cb9
|
6afdf6f0f23a3d172edcd60c28c23be0be59626e
|
refs/heads/main
| 2023-08-17T05:57:38.037628
| 2023-04-02T10:12:36
| 2023-04-02T10:12:36
| 231,045,948
| 11
| 3
|
MIT
| 2023-01-21T12:41:25
| 2019-12-31T07:19:31
|
R
|
UTF-8
|
R
| false
| true
| 1,517
|
rd
|
plot_dist_density.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qc_visualizations.R
\name{plot_dist_density}
\alias{plot_dist_density}
\title{Distance density plot}
\usage{
plot_dist_density(
object,
all_features = FALSE,
dist_method = "euclidean",
center = TRUE,
scale = "uv",
color_scale = getOption("notame.color_scale_dis"),
fill_scale = getOption("notame.fill_scale_dis"),
title = paste("Density plot of", dist_method, "distances between samples"),
subtitle = NULL
)
}
\arguments{
\item{object}{a MetaboSet object}
\item{all_features}{logical, should all features be used? If FALSE (the default), flagged features are removed before visualization.}
\item{dist_method}{method for calculating the distances, passed to dist}
\item{center}{logical, should the data be centered?}
\item{scale}{scaling used, as in pcaMethods::prep. Default is "uv" for unit variance}
\item{color_scale}{a scale for the color of the edge of density curves, as returned by a ggplot function}
\item{fill_scale}{a scale for the fill of the density curves, as returned by a ggplot function}
\item{title}{the plot title}
\item{subtitle}{the plot subtitle}
}
\value{
a ggplot object
}
\description{
Plot density of distances between samples in QC samples and actual samples
}
\examples{
plot_dist_density(merged_sample)
# Drift correction tightens QCs together
plot_dist_density(correct_drift(merged_sample))
}
\seealso{
\code{\link[stats]{dist}}
}
|
e47f6eb673f93dc4c9c5c12b69652d82e02fb739
|
320cbfb68493293c7c37351c3c38a48ac7bcfa2d
|
/IndividualReport/Cluster.R
|
0983d9dbc6ff90d7033dc3ecfc30b6df582d2666
|
[] |
no_license
|
JhPlayGround/crime
|
409c71af33effa8a731eb95a041fcee307f33102
|
71b02efad6ce9b460f4a7c56dcb826ebf6e7e82f
|
refs/heads/master
| 2020-03-14T21:54:19.530704
| 2018-06-07T07:15:53
| 2018-06-07T07:15:53
| 129,665,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,656
|
r
|
Cluster.R
|
#x๋ณ์์ ๋๋คํ ์ 100๊ฐ๋ฅผ ๋ฝ์ Matrix ์์ฑ
x = matrix(rnorm(100),nrow=5)
dist(x)
#๋งจํํ,๋งฅ์๋ฉ ๊ฑฐ๋ฆฌ ๊ตฌํ๊ธฐ
dist(x,method = "manhattan")
dist(x,method = "maximum")
data(iris)
head(iris)
#๋ง์ง๋ง 5๋ฒ์งธ ์ด์ ์ ์ธํ irirs ๋ฐ์ดํฐ๋ฅผ k=3์ผ๋ก ๊ตฐ์ง๋ถ์
#iris ๋ฐ์ดํฐ์ Species์ 3๊ฐ์ง ๊ตฐ์ง์ด ์๋ ๊ฒ์ ์๊ณ ์์ด์ k=3
kmeans.iris = kmeans(iris[,-5],3)
#์์์ ๋์งธ์๋ฆฌ๊น์ง ๋ฐ์ฌ๋ฆผ.
round(sum(kmeans.iris$withinss),2)
#๊ฐ ๋ฐ์ดํฐ๊ฐ ์ด๋ค ๊ตฐ์ง์ผ๋ก ๋ถ๋ฅ๋์๋์ง cluster๋ก ํ์ธ.
kmeans.iris$cluster
#์์ธก์ด ์ ๋์๋์ง ์๋ ๋ฐ์ดํฐ, cluster ๋ฐ์ดํฐ, table ๋ก ๋น๊ต
#์๋ ๋ฐ์ดํฐ
iris[,5]
#cluster ๋ฐ์ดํฐ
kmeans.iris$cluster
#table
table(iris[,5],kmeans.iris$cluster)
#๊ฐ ๊ตฐ์ ์ค์ฌ์ ํ์ธ
kmeans.iris$center
#์๋ชป ๊ตฐ์งํ๋ฅผ ๊ฒฝ์ฐ์ ๋๋นํ์ฌ ์ํํ์๋ฅผ 10๋ฒ์ผ๋ก ๋๋ฆฌ๋ ์ฝ๋
kmeans10.iris = kmeans(iris[,-5],3,nstart = 10)
#10๋ฒ์ผ๋ก ๋๋ฆฐ ํ ๋ค์ ๊ตฐ์ง ํ์ธ
round(sum(kmeans10.iris$withnss),2)
kmeans.iris$center
table(iris$Species,kmeans10.iris$cluster)
๊ฐ
#์๊ฐํ๋ก ํ์ธํ๋ ๋ฒ
library(ggplot2)
#pch๋ ์ฌ๋ณผ, col์ ์, cex๋ ํฌ๊ธฐ
plot(iris[,1:2],pch=8,col=1:3,cex=2)
#๊ตฐ์ง์ด ๋ช๊ฐ์ผ๋ ๊ฐ์ฅ ์ ํฉํ์ง ์์๋ณด๋ plot
#k๊ฐ 2~6๊น์ง ํ์ธ
visual = NULL
for(i in 2:6)
{
set.seed(0723)
eval(parse(text=paste("result",i,"=kmeans(iris[,-5],",i,");",sep="")))
eval(parse(text=paste("visual[",i,"]=result",i,"$tot.withinss",sep="")))
}
plot(visual[-1],type="l",ylab="",xlab="",main="cluster์ ๊ฐ์์ ๋ฐ๋ฅธ ๋ด๋ถ๋ถ์ฐ")
abline(v=3,col="red")
#k=3์ผ๋ ๊ฐ์ฅ ์ ํฉ!
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.